Compare commits

..

56 Commits

Author SHA1 Message Date
Vikhyath Mondreti
78f818f7cd remove duplicate code 2026-01-20 18:32:17 -08:00
Vikhyath Mondreti
cd1c5315d6 fix tag dropdown merge conflict change 2026-01-20 18:19:55 -08:00
Vikhyath Mondreti
601f58cec9 use helper for internal route check 2026-01-20 18:11:38 -08:00
Vikhyath Mondreti
9fc6378f17 Merge remote-tracking branch 'origin/staging' into feat/tools 2026-01-20 18:00:41 -08:00
waleed
63d109de3a added description for inputs to workflow 2026-01-20 17:52:22 -08:00
Vikhyath Mondreti
1f1f015031 improvement(files): update execution for passing base64 strings (#2906)
* progress

* improvement(execution): update execution for passing base64 strings

* fix types

* cleanup comments

* path security vuln

* reject promise correctly

* fix redirect case

* remove proxy routes

* fix tests

* use ipaddr
2026-01-20 17:49:00 -08:00
waleed
c9239b55ef cleanup 2026-01-20 16:34:21 -08:00
waleed
233a3ee0b4 updated extension finder 2026-01-20 16:02:54 -08:00
waleed
c3634c2e38 updated tag dropdown to parse non-operation fields as well 2026-01-20 15:59:40 -08:00
waleed
51ed4f506d updated the rest of the old file patterns, updated mistral outputs for v2 2026-01-20 15:41:06 -08:00
Waleed
4afb245fa2 improvement(executor): upgraded abort controller to handle aborts for loops and parallels (#2880)
* improvement(executor): upgraded abort controller to handle aborts for loops and parallels

* comments
2026-01-20 15:40:37 -08:00
waleed
59578dd140 added mistral v2, files v2, and finalized textract 2026-01-20 15:02:27 -08:00
waleed
dcaae1df7c fix additional fields dropdown in editor, update parser to leave validation to be done on the server 2026-01-20 11:51:22 -08:00
waleed
c5d3405c7a removed upload for textract async version 2026-01-20 11:44:24 -08:00
waleed
0ac6fec0a5 reorder 2026-01-20 11:31:57 -08:00
waleed
75450afb11 ack pr comments 2026-01-20 11:24:03 -08:00
waleed
dbee20e9e5 cleanup 2026-01-20 11:13:29 -08:00
Vikhyath Mondreti
8344d68ca8 improvement(browseruse): add profile id param (#2903)
* improvement(browseruse): add profile id param

* make request a stub since we have directExec
2026-01-20 11:08:47 -08:00
waleed
ecf39c5a54 feat(tools): added textract 2026-01-20 11:06:38 -08:00
Waleed
a26a1a9737 fix(rss): add top-level title, link, pubDate fields to RSS trigger output (#2902)
* fix(rss): add top-level title, link, pubDate fields to RSS trigger output

* fix(imap): add top-level fields to IMAP trigger output
2026-01-20 10:06:13 -08:00
Vikhyath Mondreti
689037a300 fix(canonical): copilot path + update parent (#2901) 2026-01-20 09:43:41 -08:00
Waleed
07f0c01dc4 fix(google): wrap primitive tool responses for Gemini API compatibility (#2900) 2026-01-20 09:27:45 -08:00
Waleed
e4ad31bb6b fix(kb): align bulk chunk operation with API response (#2899)
* fix(kb): align bulk chunk operation with API response

* fix(kb): skip local state update for failed chunks

* fix(kb): correct errors type and refresh on partial failure
2026-01-20 00:24:50 -08:00
Waleed
84691fc873 improvement(modal): fixed popover issue in custom tools modal, removed the ability to update if no changes made (#2897)
* improvement(modal): fixed popover issue in custom tools modal, removed the ability to update if no changes made

* improvement(modal): fixed popover issue in custom tools modal, removed the ability to update if no changes made

* popover fixes, color picker keyboard nav, code simplification

* color standardization

* fix color picker

* set discard alert state when closing modal
2026-01-19 23:52:07 -08:00
Emir Karabeg
2daf34386e fix(copilot): ui/ux (#2891)
* feat(claude): added rules

* fix(copilot): chat loading; refactor(copilot): components, utils, hooks

* fix(copilot): options selection strikethrough

* fix(copilot): options render inside thinking

* fix(copilot): checkpoints, user-input; improvement(code): colors

* fix(copilot): scrolling, tool-call truncation, thinking ui

* fix(copilot): tool call spacing and shimmer/actions on previous messages

* improvement(copilot): queue

* addressed comments
2026-01-19 23:23:21 -08:00
Waleed
ac991d4b54 fix(sso): removed provider specific OIDC logic from SSO registration & deregistration scripts (#2896)
* fix(sso): updated registration & deregistration script for explicit support for Entra ID

* cleanup

* ack PR comment

* ack PR comment

* tested edge cases, ack'd PR comments

* remove trailing slash
2026-01-19 19:23:50 -08:00
Waleed
69614d2d93 improvement(kb): migrate manual fetches in kb module to use reactquery (#2894)
* improvement(kb): migrate manual fetches in kb module to use reactquery

* converted remaining manual kb fetches

* unwrap kb tags before API call, added more query invalidation for chunks

* added resetMutation calls after modal closes
2026-01-19 17:25:17 -08:00
Waleed
6cbadd7110 feat(api): added workflows api route for dynamic discovery (#2892)
* feat(api): added workflows api route for dynamic discovery

* added ability to edit parameter and workflow descriptions

* added new rate limit category, ack PR comments

* fix hasChanges logic

* added whitespace trimming before hasChanges check
2026-01-19 17:21:51 -08:00
Vikhyath Mondreti
9efd3d5b4c improvement(stats): should track mcp and a2a executions like other trigger types (#2895)
* improvement(stats): should track mcp and a2a executions like other trigger types

* update types
2026-01-19 16:29:37 -08:00
Siddharth Ganesan
e575ba2965 feat(settings): add debug mode for superusers (#2893)
* Superuser debug

* Fix

* update templates routes to use helper

---------

Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
2026-01-19 16:28:43 -08:00
Siddharth Ganesan
5f45db4343 improvement(copilot): variables, conditions, router (#2887)
* Temp

* Condition and router copilot syntax updates

* Plan respond plan
2026-01-19 15:24:50 -08:00
Waleed
81cbfe7af4 feat(browseruse): upgraded browseruse endpoints to v2 (#2890) 2026-01-19 14:47:19 -08:00
Waleed
739341b08e improvement(router): add resizable textareas for router conditions (#2888) 2026-01-19 13:59:13 -08:00
Waleed
3c43779ba3 feat(search): added operations to search modal in main app, updated retrieval in docs to use RRF (#2889) 2026-01-19 13:57:56 -08:00
Waleed
1861f77283 feat(terminal): add fix in copilot for errors (#2885) 2026-01-19 13:42:34 -08:00
Vikhyath Mondreti
72c2ba7443 fix(linear): team selector in tool input (#2886) 2026-01-19 12:40:45 -08:00
Waleed
037dad6975 fix(undo-redo): preserve subblock values during undo/redo cycles (#2884)
* fix(undo-redo): preserve subblock values during undo/redo cycles

* added tests
2026-01-19 12:19:51 -08:00
Waleed
408597e12b feat(notifs): added block name to error notifications (#2883) 2026-01-19 09:54:19 -08:00
Waleed
932f8fd654 feat(mcp): updated mcp subblocks for mcp tools to match subblocks (#2882)
* feat(mcp): updated mcp subblocks for mcp tools to match subblocks

* updated trigger descriptions
2026-01-19 09:50:03 -08:00
Waleed
b4c2294e67 improvement(emails): update unsub page, standardize unsub process (#2881) 2026-01-18 20:42:04 -08:00
Vikhyath Mondreti
1dbf92db3f fix(api): tool input parsing into table from agent output (#2879)
* fix(api): transformTable to map agent output to table subblock format

* fix api

* add test
2026-01-18 14:43:02 -08:00
Waleed
3a923648cb feat(ux): more explicit verbiage on some dialog menus, google drive updates, advanved to additional fields, remove general settings store sync in favor of tanstack (#2875)
* fix(verbiage): more explicit verbiage on some dialog menus, google drive updates, advanved to additional fields, remove general settings store sync in favor of tanstack

* updated docs

* nested tag dropdown, more well-defined nested outputs, keyboard nav for context menus, etc

* cleanup

* allow cannonical toggle even if depends on not satisfied

* remove smooth scroll in tag drop

* fix selection

* fix

---------

Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
2026-01-18 13:40:59 -08:00
Vikhyath Mondreti
5e2468cfd3 impovement(slides): add missing properties definitions (#2877) 2026-01-18 12:35:58 -08:00
Vikhyath Mondreti
7c0f43305b fix(resolver): tool configs must take precedence (#2876) 2026-01-18 10:11:57 -08:00
Waleed
ee7572185a improvement(tools): added visibility for tools that were missing it, added new google and github tools (#2874)
* improvement(tools): added visibility for tools that were missing it, added new google tools

* fixed the name for google forms

* revert schema enrichers change

* fixed block ordering
2026-01-17 20:51:15 -08:00
Waleed
19a8daedf7 improvement(performance): used react scan to identify rerendering issues and react issues (#2873) 2026-01-17 19:20:52 -08:00
Vikhyath Mondreti
0fcd52683a improvement(tool-input): general abstraction to enrich agent context, reuse visibility helpers (#2872)
* add abstraction for schema enrichment, improve agent KB block experience for tags, fix visibility of subblocks

* cleanup code

* consolidate

* fix workflow tool react query

* fix deployed context propagation

* fix tests
2026-01-17 19:13:27 -08:00
Waleed
b8b20576d3 improvement(ui): modal style standardization, select drop improvement, duplication selection fixes (#2871)
* improvement(ui): modal style standardization, select drop improvement

* consolidation, fixed canvas issues

* more
2026-01-17 13:31:46 -08:00
Waleed
4b8534ebd0 feat(oauth): upgraded all generic oauth plugin providers to use unqiue account ids (#2870) 2026-01-17 13:09:54 -08:00
Waleed
f6960a4bd4 fix(wand): improved flickering for invalid JSON icon while streaming (#2868) 2026-01-17 12:43:22 -08:00
Vikhyath Mondreti
8740566f6a fix(block-resolver): path lookup check (#2869)
* fix(block-resolver): path lookup check

* remove comments
2026-01-17 12:17:55 -08:00
Waleed
5de7228dd9 improvement(avatar): use selection-update as the source of truth for presence, ignore other socket ops (#2866)
* improvement(avatar): use selection-update as the source of truth for presence, ignore other socket ops

* added logs
2026-01-16 20:17:07 -08:00
Vikhyath Mondreti
75898c69ed fix(start): seed initial subblock values on batch add (#2864) 2026-01-16 20:07:20 -08:00
Vikhyath Mondreti
b14672887b fix(sockets): webhooks logic removal from copilot ops (#2862)
* fix(sockets): dying on deployed webhooks

* fix edit workflow
2026-01-16 19:53:14 -08:00
Waleed
d024c1e489 fix(shift): fix shift select blue ring fading (#2863) 2026-01-16 19:52:51 -08:00
Waleed
d75ea37b3c chore(readme): updated readme (#2861) 2026-01-16 18:18:40 -08:00
648 changed files with 50030 additions and 11309 deletions

View File

@@ -0,0 +1,35 @@
---
paths:
- "apps/sim/components/emcn/**"
---
# EMCN Components
Import from `@/components/emcn`, never from subpaths (except CSS files).
## CVA vs Direct Styles
**Use CVA when:** 2+ variants (primary/secondary, sm/md/lg)
```tsx
const buttonVariants = cva('base-classes', {
variants: { variant: { default: '...', primary: '...' } }
})
export { Button, buttonVariants }
```
**Use direct className when:** Single consistent style, no variations
```tsx
function Label({ className, ...props }) {
return <Primitive className={cn('style-classes', className)} {...props} />
}
```
## Rules
- Use Radix UI primitives for accessibility
- Export component and variants (if using CVA)
- TSDoc with usage examples
- Consistent tokens: `font-medium`, `text-[12px]`, `rounded-[4px]`
- `transition-colors` for hover states

13
.claude/rules/global.md Normal file
View File

@@ -0,0 +1,13 @@
# Global Standards
## Logging
Import `createLogger` from `sim/logger`. Use `logger.info`, `logger.warn`, `logger.error` instead of `console.log`.
## Comments
Use TSDoc for documentation. No `====` separators. No non-TSDoc comments.
## Styling
Never update global styles. Keep all styling local to components.
## Package Manager
Use `bun` and `bunx`, not `npm` and `npx`.

View File

@@ -0,0 +1,56 @@
---
paths:
- "apps/sim/**"
---
# Sim App Architecture
## Core Principles
1. **Single Responsibility**: Each component, hook, store has one clear purpose
2. **Composition Over Complexity**: Break down complex logic into smaller pieces
3. **Type Safety First**: TypeScript interfaces for all props, state, return types
4. **Predictable State**: Zustand for global state, useState for UI-only concerns
## Root-Level Structure
```
apps/sim/
├── app/ # Next.js app router (pages, API routes)
├── blocks/ # Block definitions and registry
├── components/ # Shared UI (emcn/, ui/)
├── executor/ # Workflow execution engine
├── hooks/ # Shared hooks (queries/, selectors/)
├── lib/ # App-wide utilities
├── providers/ # LLM provider integrations
├── stores/ # Zustand stores
├── tools/ # Tool definitions
└── triggers/ # Trigger definitions
```
## Feature Organization
Features live under `app/workspace/[workspaceId]/`:
```
feature/
├── components/ # Feature components
├── hooks/ # Feature-scoped hooks
├── utils/ # Feature-scoped utilities (2+ consumers)
├── feature.tsx # Main component
└── page.tsx # Next.js page entry
```
## Naming Conventions
- **Components**: PascalCase (`WorkflowList`)
- **Hooks**: `use` prefix (`useWorkflowOperations`)
- **Files**: kebab-case (`workflow-list.tsx`)
- **Stores**: `stores/feature/store.ts`
- **Constants**: SCREAMING_SNAKE_CASE
- **Interfaces**: PascalCase with suffix (`WorkflowListProps`)
## Utils Rules
- **Never create `utils.ts` for single consumer** - inline it
- **Create `utils.ts` when** 2+ files need the same helper
- **Check existing sources** before duplicating (`lib/` has many utilities)
- **Location**: `lib/` (app-wide) → `feature/utils/` (feature-scoped) → inline (single-use)

View File

@@ -0,0 +1,48 @@
---
paths:
- "apps/sim/**/*.tsx"
---
# Component Patterns
## Structure Order
```typescript
'use client' // Only if using hooks
// Imports (external → internal)
// Constants at module level
const CONFIG = { SPACING: 8 } as const
// Props interface
interface ComponentProps {
requiredProp: string
optionalProp?: boolean
}
export function Component({ requiredProp, optionalProp = false }: ComponentProps) {
// a. Refs
// b. External hooks (useParams, useRouter)
// c. Store hooks
// d. Custom hooks
// e. Local state
// f. useMemo
// g. useCallback
// h. useEffect
// i. Return JSX
}
```
## Rules
1. `'use client'` only when using React hooks
2. Always define props interface
3. Extract constants with `as const`
4. Semantic HTML (`aside`, `nav`, `article`)
5. Optional chain callbacks: `onAction?.(id)`
## Component Extraction
**Extract when:** 50+ lines, used in 2+ files, or has own state/logic
**Keep inline when:** < 10 lines, single use, purely presentational

View File

@@ -0,0 +1,55 @@
---
paths:
- "apps/sim/**/use-*.ts"
- "apps/sim/**/hooks/**/*.ts"
---
# Hook Patterns
## Structure
```typescript
interface UseFeatureProps {
id: string
onSuccess?: (result: Result) => void
}
export function useFeature({ id, onSuccess }: UseFeatureProps) {
// 1. Refs for stable dependencies
const idRef = useRef(id)
const onSuccessRef = useRef(onSuccess)
// 2. State
const [data, setData] = useState<Data | null>(null)
const [isLoading, setIsLoading] = useState(false)
// 3. Sync refs
useEffect(() => {
idRef.current = id
onSuccessRef.current = onSuccess
}, [id, onSuccess])
// 4. Operations (useCallback with empty deps when using refs)
const fetchData = useCallback(async () => {
setIsLoading(true)
try {
const result = await fetch(`/api/${idRef.current}`).then(r => r.json())
setData(result)
onSuccessRef.current?.(result)
} finally {
setIsLoading(false)
}
}, [])
return { data, isLoading, fetchData }
}
```
## Rules
1. Single responsibility per hook
2. Props interface required
3. Refs for stable callback dependencies
4. Wrap returned functions in useCallback
5. Always try/catch async operations
6. Track loading/error states

View File

@@ -0,0 +1,62 @@
---
paths:
- "apps/sim/**/*.ts"
- "apps/sim/**/*.tsx"
---
# Import Patterns
## Absolute Imports
**Always use absolute imports.** Never use relative imports.
```typescript
// ✓ Good
import { useWorkflowStore } from '@/stores/workflows/store'
import { Button } from '@/components/ui/button'
// ✗ Bad
import { useWorkflowStore } from '../../../stores/workflows/store'
```
## Barrel Exports
Use barrel exports (`index.ts`) when a folder has 3+ exports. Import from barrel, not individual files.
```typescript
// ✓ Good
import { Dashboard, Sidebar } from '@/app/workspace/[workspaceId]/logs/components'
// ✗ Bad
import { Dashboard } from '@/app/workspace/[workspaceId]/logs/components/dashboard/dashboard'
```
## No Re-exports
Do not re-export from non-barrel files. Import directly from the source.
```typescript
// ✓ Good - import from where it's declared
import { CORE_TRIGGER_TYPES } from '@/stores/logs/filters/types'
// ✗ Bad - re-exporting in utils.ts then importing from there
import { CORE_TRIGGER_TYPES } from '@/app/workspace/.../utils'
```
## Import Order
1. React/core libraries
2. External libraries
3. UI components (`@/components/emcn`, `@/components/ui`)
4. Utilities (`@/lib/...`)
5. Stores (`@/stores/...`)
6. Feature imports
7. CSS imports
## Type Imports
Use `type` keyword for type-only imports:
```typescript
import type { WorkflowLog } from '@/stores/logs/types'
```

View File

@@ -0,0 +1,209 @@
---
paths:
- "apps/sim/tools/**"
- "apps/sim/blocks/**"
- "apps/sim/triggers/**"
---
# Adding Integrations
## Overview
Adding a new integration typically requires:
1. **Tools** - API operations (`tools/{service}/`)
2. **Block** - UI component (`blocks/blocks/{service}.ts`)
3. **Icon** - SVG icon (`components/icons.tsx`)
4. **Trigger** (optional) - Webhooks/polling (`triggers/{service}/`)
Always look up the service's API docs first.
## 1. Tools (`tools/{service}/`)
```
tools/{service}/
├── index.ts # Export all tools
├── types.ts # Params/response types
├── {action}.ts # Individual tool (e.g., send_message.ts)
└── ...
```
**Tool file structure:**
```typescript
// tools/{service}/{action}.ts
import type { {Service}Params, {Service}Response } from '@/tools/{service}/types'
import type { ToolConfig } from '@/tools/types'
export const {service}{Action}Tool: ToolConfig<{Service}Params, {Service}Response> = {
id: '{service}_{action}',
name: '{Service} {Action}',
description: 'What this tool does',
version: '1.0.0',
oauth: { required: true, provider: '{service}' }, // if OAuth
params: { /* param definitions */ },
request: {
url: '/api/tools/{service}/{action}',
method: 'POST',
headers: () => ({ 'Content-Type': 'application/json' }),
body: (params) => ({ ...params }),
},
transformResponse: async (response) => {
const data = await response.json()
if (!data.success) throw new Error(data.error)
return { success: true, output: data.output }
},
outputs: { /* output definitions */ },
}
```
**Register in `tools/registry.ts`:**
```typescript
import { {service}{Action}Tool } from '@/tools/{service}'
// Add to registry object
{service}_{action}: {service}{Action}Tool,
```
## 2. Block (`blocks/blocks/{service}.ts`)
```typescript
import { {Service}Icon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types'
import type { {Service}Response } from '@/tools/{service}/types'
export const {Service}Block: BlockConfig<{Service}Response> = {
type: '{service}',
name: '{Service}',
description: 'Short description',
longDescription: 'Detailed description',
category: 'tools',
bgColor: '#hexcolor',
icon: {Service}Icon,
subBlocks: [ /* see SubBlock Properties below */ ],
tools: {
access: ['{service}_{action}', ...],
config: {
tool: (params) => `{service}_${params.operation}`,
params: (params) => ({ ...params }),
},
},
inputs: { /* input definitions */ },
outputs: { /* output definitions */ },
}
```
### SubBlock Properties
```typescript
{
id: 'fieldName', // Unique identifier
title: 'Field Label', // UI label
type: 'short-input', // See SubBlock Types below
placeholder: 'Hint text',
required: true, // See Required below
condition: { ... }, // See Condition below
dependsOn: ['otherField'], // See DependsOn below
mode: 'basic', // 'basic' | 'advanced' | 'both' | 'trigger'
}
```
**SubBlock Types:** `short-input`, `long-input`, `dropdown`, `code`, `switch`, `slider`, `oauth-input`, `channel-selector`, `user-selector`, `file-upload`, etc.
### `condition` - Show/hide based on another field
```typescript
// Show when operation === 'send'
condition: { field: 'operation', value: 'send' }
// Show when operation is 'send' OR 'read'
condition: { field: 'operation', value: ['send', 'read'] }
// Show when operation !== 'send'
condition: { field: 'operation', value: 'send', not: true }
// Complex: NOT in list AND another condition
condition: {
field: 'operation',
value: ['list_channels', 'list_users'],
not: true,
and: { field: 'destinationType', value: 'dm', not: true }
}
```
### `required` - Field validation
```typescript
// Always required
required: true
// Conditionally required (same syntax as condition)
required: { field: 'operation', value: 'send' }
```
### `dependsOn` - Clear field when dependencies change
```typescript
// Clear when credential changes
dependsOn: ['credential']
// Clear when authMethod changes AND (credential OR botToken) changes
dependsOn: { all: ['authMethod'], any: ['credential', 'botToken'] }
```
### `mode` - When to show field
- `'basic'` - Only in basic mode (default UI)
- `'advanced'` - Only in advanced mode (manual input)
- `'both'` - Show in both modes (default)
- `'trigger'` - Only when block is used as trigger
**Register in `blocks/registry.ts`:**
```typescript
import { {Service}Block } from '@/blocks/blocks/{service}'
// Add to registry object (alphabetically)
{service}: {Service}Block,
```
## 3. Icon (`components/icons.tsx`)
```typescript
export function {Service}Icon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
{/* SVG path from service's brand assets */}
</svg>
)
}
```
## 4. Trigger (`triggers/{service}/`) - Optional
```
triggers/{service}/
├── index.ts # Export all triggers
├── webhook.ts # Webhook handler
├── utils.ts # Shared utilities
└── {event}.ts # Specific event handlers
```
**Register in `triggers/registry.ts`:**
```typescript
import { {service}WebhookTrigger } from '@/triggers/{service}'
// Add to TRIGGER_REGISTRY
{service}_webhook: {service}WebhookTrigger,
```
## Checklist
- [ ] Look up API docs for the service
- [ ] Create `tools/{service}/types.ts` with proper types
- [ ] Create tool files for each operation
- [ ] Create `tools/{service}/index.ts` barrel export
- [ ] Register tools in `tools/registry.ts`
- [ ] Add icon to `components/icons.tsx`
- [ ] Create block in `blocks/blocks/{service}.ts`
- [ ] Register block in `blocks/registry.ts`
- [ ] (Optional) Create triggers in `triggers/{service}/`
- [ ] (Optional) Register triggers in `triggers/registry.ts`

View File

@@ -0,0 +1,66 @@
---
paths:
- "apps/sim/hooks/queries/**/*.ts"
---
# React Query Patterns
All React Query hooks live in `hooks/queries/`.
## Query Key Factory
Every query file defines a keys factory:
```typescript
export const entityKeys = {
all: ['entity'] as const,
list: (workspaceId?: string) => [...entityKeys.all, 'list', workspaceId ?? ''] as const,
detail: (id?: string) => [...entityKeys.all, 'detail', id ?? ''] as const,
}
```
## File Structure
```typescript
// 1. Query keys factory
// 2. Types (if needed)
// 3. Private fetch functions
// 4. Exported hooks
```
## Query Hook
```typescript
export function useEntityList(workspaceId?: string, options?: { enabled?: boolean }) {
return useQuery({
queryKey: entityKeys.list(workspaceId),
queryFn: () => fetchEntities(workspaceId as string),
enabled: Boolean(workspaceId) && (options?.enabled ?? true),
staleTime: 60 * 1000,
placeholderData: keepPreviousData,
})
}
```
## Mutation Hook
```typescript
export function useCreateEntity() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async (variables) => { /* fetch POST */ },
onSuccess: () => queryClient.invalidateQueries({ queryKey: entityKeys.all }),
})
}
```
## Optimistic Updates
For optimistic mutations syncing with Zustand, use `createOptimisticMutationHandlers` from `@/hooks/queries/utils/optimistic-mutation`.
## Naming
- **Keys**: `entityKeys`
- **Query hooks**: `useEntity`, `useEntityList`
- **Mutation hooks**: `useCreateEntity`, `useUpdateEntity`
- **Fetch functions**: `fetchEntity` (private)

View File

@@ -0,0 +1,71 @@
---
paths:
- "apps/sim/**/store.ts"
- "apps/sim/**/stores/**/*.ts"
---
# Zustand Store Patterns
Stores live in `stores/`. Complex stores split into `store.ts` + `types.ts`.
## Basic Store
```typescript
import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import type { FeatureState } from '@/stores/feature/types'
const initialState = { items: [] as Item[], activeId: null as string | null }
export const useFeatureStore = create<FeatureState>()(
devtools(
(set, get) => ({
...initialState,
setItems: (items) => set({ items }),
addItem: (item) => set((state) => ({ items: [...state.items, item] })),
reset: () => set(initialState),
}),
{ name: 'feature-store' }
)
)
```
## Persisted Store
```typescript
import { create } from 'zustand'
import { persist } from 'zustand/middleware'
export const useFeatureStore = create<FeatureState>()(
persist(
(set) => ({
width: 300,
setWidth: (width) => set({ width }),
_hasHydrated: false,
setHasHydrated: (v) => set({ _hasHydrated: v }),
}),
{
name: 'feature-state',
partialize: (state) => ({ width: state.width }),
onRehydrateStorage: () => (state) => state?.setHasHydrated(true),
}
)
)
```
## Rules
1. Use `devtools` middleware (named stores)
2. Use `persist` only when data should survive reload
3. `partialize` to persist only necessary state
4. `_hasHydrated` pattern for persisted stores needing hydration tracking
5. Immutable updates only
6. `set((state) => ...)` when depending on previous state
7. Provide `reset()` action
## Outside React
```typescript
const items = useFeatureStore.getState().items
useFeatureStore.setState({ items: newItems })
```

View File

@@ -0,0 +1,41 @@
---
paths:
- "apps/sim/**/*.tsx"
- "apps/sim/**/*.css"
---
# Styling Rules
## Tailwind
1. **No inline styles** - Use Tailwind classes
2. **No duplicate dark classes** - Skip `dark:` when value matches light mode
3. **Exact values** - `text-[14px]`, `h-[26px]`
4. **Transitions** - `transition-colors` for interactive states
## Conditional Classes
```typescript
import { cn } from '@/lib/utils'
<div className={cn(
'base-classes',
isActive && 'active-classes',
disabled ? 'opacity-60' : 'hover:bg-accent'
)} />
```
## CSS Variables
For dynamic values (widths, heights) synced with stores:
```typescript
// In store
setWidth: (width) => {
set({ width })
document.documentElement.style.setProperty('--sidebar-width', `${width}px`)
}
// In component
<aside style={{ width: 'var(--sidebar-width)' }} />
```

View File

@@ -0,0 +1,58 @@
---
paths:
- "apps/sim/**/*.test.ts"
- "apps/sim/**/*.test.tsx"
---
# Testing Patterns
Use Vitest. Test files: `feature.ts``feature.test.ts`
## Structure
```typescript
/**
* @vitest-environment node
*/
import { databaseMock, loggerMock } from '@sim/testing'
import { describe, expect, it, vi } from 'vitest'
vi.mock('@sim/db', () => databaseMock)
vi.mock('@sim/logger', () => loggerMock)
import { myFunction } from '@/lib/feature'
describe('myFunction', () => {
beforeEach(() => vi.clearAllMocks())
it.concurrent('isolated tests run in parallel', () => { ... })
})
```
## @sim/testing Package
Always prefer over local mocks.
| Category | Utilities |
|----------|-----------|
| **Mocks** | `loggerMock`, `databaseMock`, `setupGlobalFetchMock()` |
| **Factories** | `createSession()`, `createWorkflowRecord()`, `createBlock()`, `createExecutorContext()` |
| **Builders** | `WorkflowBuilder`, `ExecutionContextBuilder` |
| **Assertions** | `expectWorkflowAccessGranted()`, `expectBlockExecuted()` |
## Rules
1. `@vitest-environment node` directive at file top
2. `vi.mock()` calls before importing mocked modules
3. `@sim/testing` utilities over local mocks
4. `it.concurrent` for isolated tests (no shared mutable state)
5. `beforeEach(() => vi.clearAllMocks())` to reset state
## Hoisted Mocks
For mutable mock references:
```typescript
const mockFn = vi.hoisted(() => vi.fn())
vi.mock('@/lib/module', () => ({ myFunction: mockFn }))
mockFn.mockResolvedValue({ data: 'test' })
```

View File

@@ -0,0 +1,21 @@
---
paths:
- "apps/sim/**/*.ts"
- "apps/sim/**/*.tsx"
---
# TypeScript Rules
1. **No `any`** - Use proper types or `unknown` with type guards
2. **Props interface** - Always define for components
3. **Const assertions** - `as const` for constant objects/arrays
4. **Ref types** - Explicit: `useRef<HTMLDivElement>(null)`
5. **Type imports** - `import type { X }` for type-only imports
```typescript
// ✗ Bad
const handleClick = (e: any) => {}
// ✓ Good
const handleClick = (e: React.MouseEvent<HTMLButtonElement>) => {}
```

View File

@@ -8,7 +8,7 @@ alwaysApply: true
You are a professional software engineer. All code must follow best practices: accurate, readable, clean, and efficient. You are a professional software engineer. All code must follow best practices: accurate, readable, clean, and efficient.
## Logging ## Logging
Import `createLogger` from `sim/logger`. Use `logger.info`, `logger.warn`, `logger.error` instead of `console.log`. Import `createLogger` from `@sim/logger`. Use `logger.info`, `logger.warn`, `logger.error` instead of `console.log`.
## Comments ## Comments
Use TSDoc for documentation. No `====` separators. No non-TSDoc comments. Use TSDoc for documentation. No `====` separators. No non-TSDoc comments.

View File

@@ -9,12 +9,12 @@
<p align="center"> <p align="center">
<a href="https://sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/sim.ai-6F3DFA" alt="Sim.ai"></a> <a href="https://sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/sim.ai-6F3DFA" alt="Sim.ai"></a>
<a href="https://discord.gg/Hr4UWYEcTT" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Discord-Join%20Server-5865F2?logo=discord&logoColor=white" alt="Discord"></a> <a href="https://discord.gg/Hr4UWYEcTT" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Discord-Join%20Server-5865F2?logo=discord&logoColor=white" alt="Discord"></a>
<a href="https://x.com/simdotai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/twitter/follow/simstudioai?style=social" alt="Twitter"></a> <a href="https://x.com/simdotai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/twitter/follow/simdotai?style=social" alt="Twitter"></a>
<a href="https://docs.sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Docs-6F3DFA.svg" alt="Documentation"></a> <a href="https://deepwiki.com/simstudioai/sim" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/DeepWiki-1E90FF.svg" alt="DeepWiki"></a> <a href="https://docs.sim.ai" target="_blank" rel="noopener noreferrer"><img src="https://img.shields.io/badge/Docs-6F3DFA.svg" alt="Documentation"></a>
</p> </p>
<p align="center"> <p align="center">
<a href="https://cursor.com/link/prompt?text=Help%20me%20set%20up%20Sim%20Studio%20locally.%20Follow%20these%20steps%3A%0A%0A1.%20First%2C%20verify%20Docker%20is%20installed%20and%20running%3A%0A%20%20%20docker%20--version%0A%20%20%20docker%20info%0A%0A2.%20Clone%20the%20repository%3A%0A%20%20%20git%20clone%20https%3A%2F%2Fgithub.com%2Fsimstudioai%2Fsim.git%0A%20%20%20cd%20sim%0A%0A3.%20Start%20the%20services%20with%20Docker%20Compose%3A%0A%20%20%20docker%20compose%20-f%20docker-compose.prod.yml%20up%20-d%0A%0A4.%20Wait%20for%20all%20containers%20to%20be%20healthy%20(this%20may%20take%201-2%20minutes)%3A%0A%20%20%20docker%20compose%20-f%20docker-compose.prod.yml%20ps%0A%0A5.%20Verify%20the%20app%20is%20accessible%20at%20http%3A%2F%2Flocalhost%3A3000%0A%0AIf%20there%20are%20any%20errors%2C%20help%20me%20troubleshoot%20them.%20Common%20issues%3A%0A-%20Port%203000%2C%203002%2C%20or%205432%20already%20in%20use%0A-%20Docker%20not%20running%0A-%20Insufficient%20memory%20(needs%2012GB%2B%20RAM)%0A%0AFor%20local%20AI%20models%20with%20Ollama%2C%20use%20this%20instead%20of%20step%203%3A%0A%20%20%20docker%20compose%20-f%20docker-compose.ollama.yml%20--profile%20setup%20up%20-d"><img src="https://img.shields.io/badge/Set%20Up%20with-Cursor-000000?logo=cursor&logoColor=white" alt="Set Up with Cursor"></a> <a href="https://deepwiki.com/simstudioai/sim" target="_blank" rel="noopener noreferrer"><img src="https://deepwiki.com/badge.svg" alt="Ask DeepWiki"></a> <a href="https://cursor.com/link/prompt?text=Help%20me%20set%20up%20Sim%20locally.%20Follow%20these%20steps%3A%0A%0A1.%20First%2C%20verify%20Docker%20is%20installed%20and%20running%3A%0A%20%20%20docker%20--version%0A%20%20%20docker%20info%0A%0A2.%20Clone%20the%20repository%3A%0A%20%20%20git%20clone%20https%3A%2F%2Fgithub.com%2Fsimstudioai%2Fsim.git%0A%20%20%20cd%20sim%0A%0A3.%20Start%20the%20services%20with%20Docker%20Compose%3A%0A%20%20%20docker%20compose%20-f%20docker-compose.prod.yml%20up%20-d%0A%0A4.%20Wait%20for%20all%20containers%20to%20be%20healthy%20(this%20may%20take%201-2%20minutes)%3A%0A%20%20%20docker%20compose%20-f%20docker-compose.prod.yml%20ps%0A%0A5.%20Verify%20the%20app%20is%20accessible%20at%20http%3A%2F%2Flocalhost%3A3000%0A%0AIf%20there%20are%20any%20errors%2C%20help%20me%20troubleshoot%20them.%20Common%20issues%3A%0A-%20Port%203000%2C%203002%2C%20or%205432%20already%20in%20use%0A-%20Docker%20not%20running%0A-%20Insufficient%20memory%20(needs%2012GB%2B%20RAM)%0A%0AFor%20local%20AI%20models%20with%20Ollama%2C%20use%20this%20instead%20of%20step%203%3A%0A%20%20%20docker%20compose%20-f%20docker-compose.ollama.yml%20--profile%20setup%20up%20-d"><img src="https://img.shields.io/badge/Set%20Up%20with-Cursor-000000?logo=cursor&logoColor=white" alt="Set Up with Cursor"></a>
</p> </p>
### Build Workflows with Ease ### Build Workflows with Ease

View File

@@ -86,27 +86,112 @@ export async function GET(request: NextRequest) {
) )
.limit(candidateLimit) .limit(candidateLimit)
const seenIds = new Set<string>() const knownLocales = ['en', 'es', 'fr', 'de', 'ja', 'zh']
const mergedResults = []
for (let i = 0; i < Math.max(vectorResults.length, keywordResults.length); i++) { const vectorRankMap = new Map<string, number>()
if (i < vectorResults.length && !seenIds.has(vectorResults[i].chunkId)) { vectorResults.forEach((r, idx) => vectorRankMap.set(r.chunkId, idx + 1))
mergedResults.push(vectorResults[i])
seenIds.add(vectorResults[i].chunkId) const keywordRankMap = new Map<string, number>()
} keywordResults.forEach((r, idx) => keywordRankMap.set(r.chunkId, idx + 1))
if (i < keywordResults.length && !seenIds.has(keywordResults[i].chunkId)) {
mergedResults.push(keywordResults[i]) const allChunkIds = new Set([
seenIds.add(keywordResults[i].chunkId) ...vectorResults.map((r) => r.chunkId),
...keywordResults.map((r) => r.chunkId),
])
const k = 60
type ResultWithRRF = (typeof vectorResults)[0] & { rrfScore: number }
const scoredResults: ResultWithRRF[] = []
for (const chunkId of allChunkIds) {
const vectorRank = vectorRankMap.get(chunkId) ?? Number.POSITIVE_INFINITY
const keywordRank = keywordRankMap.get(chunkId) ?? Number.POSITIVE_INFINITY
const rrfScore = 1 / (k + vectorRank) + 1 / (k + keywordRank)
const result =
vectorResults.find((r) => r.chunkId === chunkId) ||
keywordResults.find((r) => r.chunkId === chunkId)
if (result) {
scoredResults.push({ ...result, rrfScore })
} }
} }
const filteredResults = mergedResults.slice(0, limit) scoredResults.sort((a, b) => b.rrfScore - a.rrfScore)
const searchResults = filteredResults.map((result) => {
const localeFilteredResults = scoredResults.filter((result) => {
const firstPart = result.sourceDocument.split('/')[0]
if (knownLocales.includes(firstPart)) {
return firstPart === locale
}
return locale === 'en'
})
const queryLower = query.toLowerCase()
const getTitleBoost = (result: ResultWithRRF): number => {
const fileName = result.sourceDocument
.replace('.mdx', '')
.split('/')
.pop()
?.toLowerCase()
?.replace(/_/g, ' ')
if (fileName === queryLower) return 0.01
if (fileName?.includes(queryLower)) return 0.005
return 0
}
localeFilteredResults.sort((a, b) => {
return b.rrfScore + getTitleBoost(b) - (a.rrfScore + getTitleBoost(a))
})
const pageMap = new Map<string, ResultWithRRF>()
for (const result of localeFilteredResults) {
const pageKey = result.sourceDocument
const existing = pageMap.get(pageKey)
if (!existing || result.rrfScore > existing.rrfScore) {
pageMap.set(pageKey, result)
}
}
const deduplicatedResults = Array.from(pageMap.values())
.sort((a, b) => b.rrfScore + getTitleBoost(b) - (a.rrfScore + getTitleBoost(a)))
.slice(0, limit)
const searchResults = deduplicatedResults.map((result) => {
const title = result.headerText || result.sourceDocument.replace('.mdx', '') const title = result.headerText || result.sourceDocument.replace('.mdx', '')
const pathParts = result.sourceDocument const pathParts = result.sourceDocument
.replace('.mdx', '') .replace('.mdx', '')
.split('/') .split('/')
.map((part) => part.charAt(0).toUpperCase() + part.slice(1)) .filter((part) => part !== 'index' && !knownLocales.includes(part))
.map((part) => {
return part
.replace(/_/g, ' ')
.split(' ')
.map((word) => {
const acronyms = [
'api',
'mcp',
'sdk',
'url',
'http',
'json',
'xml',
'html',
'css',
'ai',
]
if (acronyms.includes(word.toLowerCase())) {
return word.toUpperCase()
}
return word.charAt(0).toUpperCase() + word.slice(1)
})
.join(' ')
})
return { return {
id: result.chunkId, id: result.chunkId,

View File

@@ -1739,12 +1739,12 @@ export function BrowserUseIcon(props: SVGProps<SVGSVGElement>) {
{...props} {...props}
version='1.0' version='1.0'
xmlns='http://www.w3.org/2000/svg' xmlns='http://www.w3.org/2000/svg'
width='150pt' width='28'
height='150pt' height='28'
viewBox='0 0 150 150' viewBox='0 0 150 150'
preserveAspectRatio='xMidYMid meet' preserveAspectRatio='xMidYMid meet'
> >
<g transform='translate(0,150) scale(0.05,-0.05)' fill='#000000' stroke='none'> <g transform='translate(0,150) scale(0.05,-0.05)' fill='currentColor' stroke='none'>
<path <path
d='M786 2713 c-184 -61 -353 -217 -439 -405 -76 -165 -65 -539 19 -666 d='M786 2713 c-184 -61 -353 -217 -439 -405 -76 -165 -65 -539 19 -666
l57 -85 -48 -124 c-203 -517 -79 -930 346 -1155 159 -85 441 -71 585 28 l111 l57 -85 -48 -124 c-203 -517 -79 -930 346 -1155 159 -85 441 -71 585 28 l111
@@ -4093,6 +4093,23 @@ export function SQSIcon(props: SVGProps<SVGSVGElement>) {
) )
} }
export function TextractIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
viewBox='10 14 60 52'
version='1.1'
xmlns='http://www.w3.org/2000/svg'
xmlnsXlink='http://www.w3.org/1999/xlink'
>
<path
d='M22.0624102,50 C24.3763895,53.603 28.4103535,56 33.0003125,56 C40.1672485,56 45.9991964,50.168 45.9991964,43 C45.9991964,35.832 40.1672485,30 33.0003125,30 C27.6033607,30 22.9664021,33.307 21.0024196,38 L23.2143999,38 C25.0393836,34.444 28.7363506,32 33.0003125,32 C39.0652583,32 43.9992143,36.935 43.9992143,43 C43.9992143,49.065 39.0652583,54 33.0003125,54 C29.5913429,54 26.5413702,52.441 24.5213882,50 L22.0624102,50 Z M37.0002768,45 L37.0002768,43 L41.9992321,43 C41.9992321,38.038 37.9622682,34 33.0003125,34 C28.0373568,34 23.9993929,38.038 23.9993929,43 L28.9993482,43 L28.9993482,45 L24.2313908,45 C25.1443826,49.002 28.7253507,52 33.0003125,52 C35.1362934,52 37.0992759,51.249 38.6442621,50 L34.0003036,50 L34.0003036,48 L40.4782457,48 C41.0812403,47.102 41.5202364,46.087 41.7682342,45 L37.0002768,45 Z M21.0024196,48 L23.2143999,48 C22.4434068,46.498 22.0004107,44.801 22.0004107,43 C22.0004107,41.959 22.1554093,40.955 22.4264069,40 L20.3634253,40 C20.1344274,40.965 19.9994286,41.966 19.9994286,43 C19.9994286,44.771 20.3584254,46.46 21.0024196,48 L21.0024196,48 Z M19.7434309,50 L17.0004554,50 L17.0004554,48 L18.8744386,48 C18.5344417,47.04 18.2894438,46.038 18.1494451,45 L15.4144695,45 L16.707458,46.293 L15.2924706,47.707 L12.2924974,44.707 C11.9025009,44.316 11.9025009,43.684 12.2924974,43.293 L15.2924706,40.293 L16.707458,41.707 L15.4144695,43 L18.0004464,43 C18.0004464,41.973 18.1044455,40.97 18.3024437,40 L17.0004554,40 L17.0004554,38 L18.8744386,38 C20.9404202,32.184 26.4833707,28 33.0003125,28 C37.427273,28 41.4002375,29.939 44.148213,33 L59.0000804,33 L59.0000804,35 L45.6661994,35 C47.1351863,37.318 47.9991786,40.058 47.9991786,43 L59.0000804,43 L59.0000804,45 L47.8501799,45 C46.8681887,52.327 40.5912447,58 33.0003125,58 C27.2563638,58 22.2624084,54.752 19.7434309,50 L19.7434309,50 Z M37.0002768,39 C37.0002768,38.448 36.5522808,38 36.0002857,38 L29.9993482,38 C29.4473442,38 28.9993482,38.448 28.9993482,39 L28.9993482,41 L31.0003304,41 L31.0003304,40 L32.0003214,40 L32.0003214,43 L31.0003304,43 L31.0003304,45 L35.0002946,45 L35.0002946,43 L34.0003036,43 L34.0003036,40 L35.0002946,40 L35.0002946,41 L37.0002768,41 L37.0002768,39 Z M49.0001696,40 L59.0000804,40 L59.0000804,38 L49.0001696,38 L49.0001696,40 Z M49.0001696,50 L59.0000804,50 L59.0000804,48 L49.0001696,48 L49.0001696,50 Z M57.0000982,27 L60.5850662,27 L57.0000982,23.414 L57.0000982,27 Z M63.7070383,27.293 C63.8940367,27.48 64.0000357,27.735 64.0000357,28 L64.0000357,63 C64.0000357,63.552 63.5520397,64 63.0000446,64 L32.0003304,64 C31.4473264,64 31.0003304,63.552 31.0003304,63 L31.0003304,59 L33.0003125,59 L33.0003125,62 L62.0000536,62 L62.0000536,29 L56.0001071,29 C55.4471121,29 55.0001161,28.552 55.0001161,28 L55.0001161,22 L33.0003125,22 L33.0003125,27 L31.0003304,27 L31.0003304,21 C31.0003304,20.448 31.4473264,20 32.0003304,20 L56.0001071,20 C56.2651048,20 56.5191025,20.105 56.7071008,20.293 L63.7070383,27.293 Z M68,24.166 L68,61 C68,61.552 67.552004,62 67.0000089,62 L65.0000268,62 L65.0000268,60 L66.0000179,60 L66.0000179,24.612 L58.6170838,18 L36.0002857,18 L36.0002857,19 L34.0003036,19 L34.0003036,17 C34.0003036,16.448 34.4472996,16 35.0003036,16 L59.0000804,16 C59.2460782,16 59.483076,16.091 59.6660744,16.255 L67.666003,23.42 C67.8780011,23.61 68,23.881 68,24.166 L68,24.166 Z'
fill='currentColor'
/>
</svg>
)
}
export function McpIcon(props: SVGProps<SVGSVGElement>) { export function McpIcon(props: SVGProps<SVGSVGElement>) {
return ( return (
<svg <svg

View File

@@ -110,6 +110,7 @@ import {
SupabaseIcon, SupabaseIcon,
TavilyIcon, TavilyIcon,
TelegramIcon, TelegramIcon,
TextractIcon,
TinybirdIcon, TinybirdIcon,
TranslateIcon, TranslateIcon,
TrelloIcon, TrelloIcon,
@@ -143,7 +144,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
calendly: CalendlyIcon, calendly: CalendlyIcon,
circleback: CirclebackIcon, circleback: CirclebackIcon,
clay: ClayIcon, clay: ClayIcon,
confluence: ConfluenceIcon, confluence_v2: ConfluenceIcon,
cursor_v2: CursorIcon, cursor_v2: CursorIcon,
datadog: DatadogIcon, datadog: DatadogIcon,
discord: DiscordIcon, discord: DiscordIcon,
@@ -153,7 +154,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
elasticsearch: ElasticsearchIcon, elasticsearch: ElasticsearchIcon,
elevenlabs: ElevenLabsIcon, elevenlabs: ElevenLabsIcon,
exa: ExaAIIcon, exa: ExaAIIcon,
file: DocumentIcon, file_v2: DocumentIcon,
firecrawl: FirecrawlIcon, firecrawl: FirecrawlIcon,
fireflies: FirefliesIcon, fireflies: FirefliesIcon,
github_v2: GithubIcon, github_v2: GithubIcon,
@@ -195,7 +196,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
microsoft_excel_v2: MicrosoftExcelIcon, microsoft_excel_v2: MicrosoftExcelIcon,
microsoft_planner: MicrosoftPlannerIcon, microsoft_planner: MicrosoftPlannerIcon,
microsoft_teams: MicrosoftTeamsIcon, microsoft_teams: MicrosoftTeamsIcon,
mistral_parse: MistralIcon, mistral_parse_v2: MistralIcon,
mongodb: MongoDBIcon, mongodb: MongoDBIcon,
mysql: MySQLIcon, mysql: MySQLIcon,
neo4j: Neo4jIcon, neo4j: Neo4jIcon,
@@ -237,6 +238,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
supabase: SupabaseIcon, supabase: SupabaseIcon,
tavily: TavilyIcon, tavily: TavilyIcon,
telegram: TelegramIcon, telegram: TelegramIcon,
textract: TextractIcon,
tinybird: TinybirdIcon, tinybird: TinybirdIcon,
translate: TranslateIcon, translate: TranslateIcon,
trello: TrelloIcon, trello: TrelloIcon,
@@ -244,7 +246,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
twilio_sms: TwilioIcon, twilio_sms: TwilioIcon,
twilio_voice: TwilioIcon, twilio_voice: TwilioIcon,
typeform: TypeformIcon, typeform: TypeformIcon,
video_generator: VideoIcon, video_generator_v2: VideoIcon,
vision: EyeIcon, vision: EyeIcon,
wealthbox: WealthboxIcon, wealthbox: WealthboxIcon,
webflow: WebflowIcon, webflow: WebflowIcon,

View File

@@ -7,7 +7,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard <BlockInfoCard
type="browser_use" type="browser_use"
color="#E0E0E0" color="#181C1E"
/> />
{/* MANUAL-CONTENT-START:intro */} {/* MANUAL-CONTENT-START:intro */}

View File

@@ -6,7 +6,7 @@ description: Interact with Confluence
import { BlockInfoCard } from "@/components/ui/block-info-card" import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard <BlockInfoCard
type="confluence" type="confluence_v2"
color="#E0E0E0" color="#E0E0E0"
/> />

View File

@@ -6,7 +6,7 @@ description: Read and parse multiple files
import { BlockInfoCard } from "@/components/ui/block-info-card" import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard <BlockInfoCard
type="file" type="file_v2"
color="#40916C" color="#40916C"
/> />
@@ -48,7 +48,7 @@ Parse one or more uploaded files or files from URLs (text, PDF, CSV, images, etc
| Parameter | Type | Description | | Parameter | Type | Description |
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `files` | array | Array of parsed files | | `files` | array | Array of parsed files with content, metadata, and file properties |
| `combinedContent` | string | Combined content of all parsed files | | `combinedContent` | string | All file contents merged into a single text string |

File diff suppressed because it is too large Load Diff

View File

@@ -119,6 +119,145 @@ Get a specific event from Google Calendar. Returns API-aligned fields only.
| `creator` | json | Event creator | | `creator` | json | Event creator |
| `organizer` | json | Event organizer | | `organizer` | json | Event organizer |
### `google_calendar_update`
Update an existing event in Google Calendar. Returns API-aligned fields only.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `calendarId` | string | No | Calendar ID \(defaults to primary\) |
| `eventId` | string | Yes | Event ID to update |
| `summary` | string | No | New event title/summary |
| `description` | string | No | New event description |
| `location` | string | No | New event location |
| `startDateTime` | string | No | New start date and time. MUST include timezone offset \(e.g., 2025-06-03T10:00:00-08:00\) OR provide timeZone parameter |
| `endDateTime` | string | No | New end date and time. MUST include timezone offset \(e.g., 2025-06-03T11:00:00-08:00\) OR provide timeZone parameter |
| `timeZone` | string | No | Time zone \(e.g., America/Los_Angeles\). Required if datetime does not include offset. |
| `attendees` | array | No | Array of attendee email addresses \(replaces existing attendees\) |
| `sendUpdates` | string | No | How to send updates to attendees: all, externalOnly, or none |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Event ID |
| `htmlLink` | string | Event link |
| `status` | string | Event status |
| `summary` | string | Event title |
| `description` | string | Event description |
| `location` | string | Event location |
| `start` | json | Event start |
| `end` | json | Event end |
| `attendees` | json | Event attendees |
| `creator` | json | Event creator |
| `organizer` | json | Event organizer |
### `google_calendar_delete`
Delete an event from Google Calendar. Returns API-aligned fields only.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `calendarId` | string | No | Calendar ID \(defaults to primary\) |
| `eventId` | string | Yes | Event ID to delete |
| `sendUpdates` | string | No | How to send updates to attendees: all, externalOnly, or none |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `eventId` | string | Deleted event ID |
| `deleted` | boolean | Whether deletion was successful |
### `google_calendar_move`
Move an event to a different calendar. Returns API-aligned fields only.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `calendarId` | string | No | Source calendar ID \(defaults to primary\) |
| `eventId` | string | Yes | Event ID to move |
| `destinationCalendarId` | string | Yes | Destination calendar ID |
| `sendUpdates` | string | No | How to send updates to attendees: all, externalOnly, or none |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Event ID |
| `htmlLink` | string | Event link |
| `status` | string | Event status |
| `summary` | string | Event title |
| `description` | string | Event description |
| `location` | string | Event location |
| `start` | json | Event start |
| `end` | json | Event end |
| `attendees` | json | Event attendees |
| `creator` | json | Event creator |
| `organizer` | json | Event organizer |
### `google_calendar_instances`
Get instances of a recurring event from Google Calendar. Returns API-aligned fields only.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `calendarId` | string | No | Calendar ID \(defaults to primary\) |
| `eventId` | string | Yes | Recurring event ID to get instances of |
| `timeMin` | string | No | Lower bound for instances \(RFC3339 timestamp, e.g., 2025-06-03T00:00:00Z\) |
| `timeMax` | string | No | Upper bound for instances \(RFC3339 timestamp, e.g., 2025-06-04T00:00:00Z\) |
| `maxResults` | number | No | Maximum number of instances to return \(default 250, max 2500\) |
| `pageToken` | string | No | Token for retrieving subsequent pages of results |
| `showDeleted` | boolean | No | Include deleted instances |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `nextPageToken` | string | Next page token |
| `timeZone` | string | Calendar time zone |
| `instances` | json | List of recurring event instances |
### `google_calendar_list_calendars`
List all calendars in the user
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `minAccessRole` | string | No | Minimum access role for returned calendars: freeBusyReader, reader, writer, or owner |
| `maxResults` | number | No | Maximum number of calendars to return \(default 100, max 250\) |
| `pageToken` | string | No | Token for retrieving subsequent pages of results |
| `showDeleted` | boolean | No | Include deleted calendars |
| `showHidden` | boolean | No | Include hidden calendars |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `nextPageToken` | string | Next page token |
| `calendars` | array | List of calendars |
| ↳ `id` | string | Calendar ID |
| ↳ `summary` | string | Calendar title |
| ↳ `description` | string | Calendar description |
| ↳ `location` | string | Calendar location |
| ↳ `timeZone` | string | Calendar time zone |
| ↳ `accessRole` | string | Access role for the calendar |
| ↳ `backgroundColor` | string | Calendar background color |
| ↳ `foregroundColor` | string | Calendar foreground color |
| ↳ `primary` | boolean | Whether this is the primary calendar |
| ↳ `hidden` | boolean | Whether the calendar is hidden |
| ↳ `selected` | boolean | Whether the calendar is selected |
### `google_calendar_quick_add` ### `google_calendar_quick_add`
Create events from natural language text. Returns API-aligned fields only. Create events from natural language text. Returns API-aligned fields only.

View File

@@ -1,6 +1,6 @@
--- ---
title: Google Drive title: Google Drive
description: Create, upload, and list files description: Manage files, folders, and permissions
--- ---
import { BlockInfoCard } from "@/components/ui/block-info-card" import { BlockInfoCard } from "@/components/ui/block-info-card"
@@ -40,217 +40,12 @@ In Sim, the Google Drive integration enables your agents to interact directly wi
## Usage Instructions ## Usage Instructions
Integrate Google Drive into the workflow. Can create, upload, and list files. Integrate Google Drive into the workflow. Can create, upload, download, copy, move, delete, share files and manage permissions.
## Tools ## Tools
### `google_drive_upload`
Upload a file to Google Drive with complete metadata returned
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileName` | string | Yes | The name of the file to upload |
| `file` | file | No | Binary file to upload \(UserFile object\) |
| `content` | string | No | Text content to upload \(use this OR file, not both\) |
| `mimeType` | string | No | The MIME type of the file to upload \(auto-detected from file if not provided\) |
| `folderSelector` | string | No | Select the folder to upload the file to |
| `folderId` | string | No | The ID of the folder to upload the file to \(internal use\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | object | Complete uploaded file metadata from Google Drive |
| ↳ `id` | string | Google Drive file ID |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `kind` | string | Resource type identifier |
| ↳ `description` | string | File description |
| ↳ `originalFilename` | string | Original uploaded filename |
| ↳ `fullFileExtension` | string | Full file extension |
| ↳ `fileExtension` | string | File extension |
| ↳ `owners` | json | List of file owners |
| ↳ `permissions` | json | File permissions |
| ↳ `permissionIds` | json | Permission IDs |
| ↳ `shared` | boolean | Whether file is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `writersCanShare` | boolean | Whether writers can share |
| ↳ `viewersCanCopyContent` | boolean | Whether viewers can copy |
| ↳ `copyRequiresWriterPermission` | boolean | Whether copy requires writer permission |
| ↳ `sharingUser` | json | User who shared the file |
| ↳ `starred` | boolean | Whether file is starred |
| ↳ `trashed` | boolean | Whether file is in trash |
| ↳ `explicitlyTrashed` | boolean | Whether explicitly trashed |
| ↳ `appProperties` | json | App-specific properties |
| ↳ `createdTime` | string | File creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `modifiedByMeTime` | string | When modified by current user |
| ↳ `viewedByMeTime` | string | When last viewed by current user |
| ↳ `sharedWithMeTime` | string | When shared with current user |
| ↳ `lastModifyingUser` | json | User who last modified the file |
| ↳ `viewedByMe` | boolean | Whether viewed by current user |
| ↳ `modifiedByMe` | boolean | Whether modified by current user |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `webContentLink` | string | Direct download URL |
| ↳ `iconLink` | string | URL to file icon |
| ↳ `thumbnailLink` | string | URL to thumbnail |
| ↳ `exportLinks` | json | Export format links |
| ↳ `size` | string | File size in bytes |
| ↳ `quotaBytesUsed` | string | Storage quota used |
| ↳ `md5Checksum` | string | MD5 hash |
| ↳ `sha1Checksum` | string | SHA-1 hash |
| ↳ `sha256Checksum` | string | SHA-256 hash |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `spaces` | json | Spaces containing file |
| ↳ `driveId` | string | Shared drive ID |
| ↳ `capabilities` | json | User capabilities on file |
| ↳ `version` | string | Version number |
| ↳ `headRevisionId` | string | Head revision ID |
| ↳ `hasThumbnail` | boolean | Whether has thumbnail |
| ↳ `thumbnailVersion` | string | Thumbnail version |
| ↳ `imageMediaMetadata` | json | Image-specific metadata |
| ↳ `videoMediaMetadata` | json | Video-specific metadata |
| ↳ `isAppAuthorized` | boolean | Whether created by requesting app |
| ↳ `contentRestrictions` | json | Content restrictions |
| ↳ `linkShareMetadata` | json | Link share metadata |
### `google_drive_create_folder`
Create a new folder in Google Drive with complete metadata returned
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileName` | string | Yes | Name of the folder to create |
| `folderSelector` | string | No | Select the parent folder to create the folder in |
| `folderId` | string | No | ID of the parent folder \(internal use\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | object | Complete created folder metadata from Google Drive |
| ↳ `id` | string | Google Drive folder ID |
| ↳ `name` | string | Folder name |
| ↳ `mimeType` | string | MIME type \(application/vnd.google-apps.folder\) |
| ↳ `kind` | string | Resource type identifier |
| ↳ `description` | string | Folder description |
| ↳ `owners` | json | List of folder owners |
| ↳ `permissions` | json | Folder permissions |
| ↳ `permissionIds` | json | Permission IDs |
| ↳ `shared` | boolean | Whether folder is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `writersCanShare` | boolean | Whether writers can share |
| ↳ `viewersCanCopyContent` | boolean | Whether viewers can copy |
| ↳ `copyRequiresWriterPermission` | boolean | Whether copy requires writer permission |
| ↳ `sharingUser` | json | User who shared the folder |
| ↳ `starred` | boolean | Whether folder is starred |
| ↳ `trashed` | boolean | Whether folder is in trash |
| ↳ `explicitlyTrashed` | boolean | Whether explicitly trashed |
| ↳ `appProperties` | json | App-specific properties |
| ↳ `folderColorRgb` | string | Folder color |
| ↳ `createdTime` | string | Folder creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `modifiedByMeTime` | string | When modified by current user |
| ↳ `viewedByMeTime` | string | When last viewed by current user |
| ↳ `sharedWithMeTime` | string | When shared with current user |
| ↳ `lastModifyingUser` | json | User who last modified the folder |
| ↳ `viewedByMe` | boolean | Whether viewed by current user |
| ↳ `modifiedByMe` | boolean | Whether modified by current user |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `iconLink` | string | URL to folder icon |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `spaces` | json | Spaces containing folder |
| ↳ `driveId` | string | Shared drive ID |
| ↳ `capabilities` | json | User capabilities on folder |
| ↳ `version` | string | Version number |
| ↳ `isAppAuthorized` | boolean | Whether created by requesting app |
| ↳ `contentRestrictions` | json | Content restrictions |
| ↳ `linkShareMetadata` | json | Link share metadata |
### `google_drive_download`
Download a file from Google Drive with complete metadata (exports Google Workspace files automatically)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to download |
| `mimeType` | string | No | The MIME type to export Google Workspace files to \(optional\) |
| `fileName` | string | No | Optional filename override |
| `includeRevisions` | boolean | No | Whether to include revision history in the metadata \(default: true, returns first 100 revisions\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | object | Downloaded file data |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type of the file |
| ↳ `data` | string | File content as base64-encoded string |
| ↳ `size` | number | File size in bytes |
| `metadata` | object | Complete file metadata from Google Drive |
| ↳ `id` | string | Google Drive file ID |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `kind` | string | Resource type identifier |
| ↳ `description` | string | File description |
| ↳ `originalFilename` | string | Original uploaded filename |
| ↳ `fullFileExtension` | string | Full file extension |
| ↳ `fileExtension` | string | File extension |
| ↳ `owners` | json | List of file owners |
| ↳ `permissions` | json | File permissions |
| ↳ `permissionIds` | json | Permission IDs |
| ↳ `shared` | boolean | Whether file is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `writersCanShare` | boolean | Whether writers can share |
| ↳ `viewersCanCopyContent` | boolean | Whether viewers can copy |
| ↳ `copyRequiresWriterPermission` | boolean | Whether copy requires writer permission |
| ↳ `sharingUser` | json | User who shared the file |
| ↳ `starred` | boolean | Whether file is starred |
| ↳ `trashed` | boolean | Whether file is in trash |
| ↳ `explicitlyTrashed` | boolean | Whether explicitly trashed |
| ↳ `appProperties` | json | App-specific properties |
| ↳ `createdTime` | string | File creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `modifiedByMeTime` | string | When modified by current user |
| ↳ `viewedByMeTime` | string | When last viewed by current user |
| ↳ `sharedWithMeTime` | string | When shared with current user |
| ↳ `lastModifyingUser` | json | User who last modified the file |
| ↳ `viewedByMe` | boolean | Whether viewed by current user |
| ↳ `modifiedByMe` | boolean | Whether modified by current user |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `webContentLink` | string | Direct download URL |
| ↳ `iconLink` | string | URL to file icon |
| ↳ `thumbnailLink` | string | URL to thumbnail |
| ↳ `exportLinks` | json | Export format links |
| ↳ `size` | string | File size in bytes |
| ↳ `quotaBytesUsed` | string | Storage quota used |
| ↳ `md5Checksum` | string | MD5 hash |
| ↳ `sha1Checksum` | string | SHA-1 hash |
| ↳ `sha256Checksum` | string | SHA-256 hash |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `spaces` | json | Spaces containing file |
| ↳ `driveId` | string | Shared drive ID |
| ↳ `capabilities` | json | User capabilities on file |
| ↳ `version` | string | Version number |
| ↳ `headRevisionId` | string | Head revision ID |
| ↳ `hasThumbnail` | boolean | Whether has thumbnail |
| ↳ `thumbnailVersion` | string | Thumbnail version |
| ↳ `imageMediaMetadata` | json | Image-specific metadata |
| ↳ `videoMediaMetadata` | json | Video-specific metadata |
| ↳ `isAppAuthorized` | boolean | Whether created by requesting app |
| ↳ `contentRestrictions` | json | Content restrictions |
| ↳ `linkShareMetadata` | json | Link share metadata |
| ↳ `revisions` | json | File revision history \(first 100 revisions only\) |
### `google_drive_list` ### `google_drive_list`
List files and folders in Google Drive with complete metadata List files and folders in Google Drive with complete metadata
@@ -271,9 +66,9 @@ List files and folders in Google Drive with complete metadata
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `files` | array | Array of file metadata objects from Google Drive | | `files` | array | Array of file metadata objects from Google Drive |
| ↳ `id` | string | Google Drive file ID | | ↳ `id` | string | Google Drive file ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name | | ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type | | ↳ `mimeType` | string | MIME type |
| ↳ `kind` | string | Resource type identifier |
| ↳ `description` | string | File description | | ↳ `description` | string | File description |
| ↳ `originalFilename` | string | Original uploaded filename | | ↳ `originalFilename` | string | Original uploaded filename |
| ↳ `fullFileExtension` | string | Full file extension | | ↳ `fullFileExtension` | string | Full file extension |
@@ -324,4 +119,455 @@ List files and folders in Google Drive with complete metadata
| ↳ `linkShareMetadata` | json | Link share metadata | | ↳ `linkShareMetadata` | json | Link share metadata |
| `nextPageToken` | string | Token for fetching the next page of results | | `nextPageToken` | string | Token for fetching the next page of results |
### `google_drive_get_file`
Get metadata for a specific file in Google Drive by its ID
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | json | The file metadata |
| ↳ `id` | string | Google Drive file ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `description` | string | File description |
| ↳ `size` | string | File size in bytes |
| ↳ `starred` | boolean | Whether file is starred |
| ↳ `trashed` | boolean | Whether file is in trash |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `webContentLink` | string | Direct download URL |
| ↳ `iconLink` | string | URL to file icon |
| ↳ `thumbnailLink` | string | URL to thumbnail |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `owners` | json | List of file owners |
| ↳ `permissions` | json | File permissions |
| ↳ `createdTime` | string | File creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `lastModifyingUser` | json | User who last modified the file |
| ↳ `shared` | boolean | Whether file is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `capabilities` | json | User capabilities on file |
| ↳ `md5Checksum` | string | MD5 hash |
| ↳ `version` | string | Version number |
### `google_drive_create_folder`
Create a new folder in Google Drive with complete metadata returned
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileName` | string | Yes | Name of the folder to create |
| `folderSelector` | string | No | Select the parent folder to create the folder in |
| `folderId` | string | No | ID of the parent folder \(internal use\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | object | Complete created folder metadata from Google Drive |
| ↳ `id` | string | Google Drive folder ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | Folder name |
| ↳ `mimeType` | string | MIME type \(application/vnd.google-apps.folder\) |
| ↳ `description` | string | Folder description |
| ↳ `owners` | json | List of folder owners |
| ↳ `permissions` | json | Folder permissions |
| ↳ `permissionIds` | json | Permission IDs |
| ↳ `shared` | boolean | Whether folder is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `writersCanShare` | boolean | Whether writers can share |
| ↳ `viewersCanCopyContent` | boolean | Whether viewers can copy |
| ↳ `copyRequiresWriterPermission` | boolean | Whether copy requires writer permission |
| ↳ `sharingUser` | json | User who shared the folder |
| ↳ `starred` | boolean | Whether folder is starred |
| ↳ `trashed` | boolean | Whether folder is in trash |
| ↳ `explicitlyTrashed` | boolean | Whether explicitly trashed |
| ↳ `appProperties` | json | App-specific properties |
| ↳ `folderColorRgb` | string | Folder color |
| ↳ `createdTime` | string | Folder creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `modifiedByMeTime` | string | When modified by current user |
| ↳ `viewedByMeTime` | string | When last viewed by current user |
| ↳ `sharedWithMeTime` | string | When shared with current user |
| ↳ `lastModifyingUser` | json | User who last modified the folder |
| ↳ `viewedByMe` | boolean | Whether viewed by current user |
| ↳ `modifiedByMe` | boolean | Whether modified by current user |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `iconLink` | string | URL to folder icon |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `spaces` | json | Spaces containing folder |
| ↳ `driveId` | string | Shared drive ID |
| ↳ `capabilities` | json | User capabilities on folder |
| ↳ `version` | string | Version number |
| ↳ `isAppAuthorized` | boolean | Whether created by requesting app |
| ↳ `contentRestrictions` | json | Content restrictions |
| ↳ `linkShareMetadata` | json | Link share metadata |
### `google_drive_upload`
Upload a file to Google Drive with complete metadata returned
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileName` | string | Yes | The name of the file to upload |
| `file` | file | No | Binary file to upload \(UserFile object\) |
| `content` | string | No | Text content to upload \(use this OR file, not both\) |
| `mimeType` | string | No | The MIME type of the file to upload \(auto-detected from file if not provided\) |
| `folderSelector` | string | No | Select the folder to upload the file to |
| `folderId` | string | No | The ID of the folder to upload the file to \(internal use\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | object | Complete uploaded file metadata from Google Drive |
| ↳ `id` | string | Google Drive file ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `description` | string | File description |
| ↳ `originalFilename` | string | Original uploaded filename |
| ↳ `fullFileExtension` | string | Full file extension |
| ↳ `fileExtension` | string | File extension |
| ↳ `owners` | json | List of file owners |
| ↳ `permissions` | json | File permissions |
| ↳ `permissionIds` | json | Permission IDs |
| ↳ `shared` | boolean | Whether file is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `writersCanShare` | boolean | Whether writers can share |
| ↳ `viewersCanCopyContent` | boolean | Whether viewers can copy |
| ↳ `copyRequiresWriterPermission` | boolean | Whether copy requires writer permission |
| ↳ `sharingUser` | json | User who shared the file |
| ↳ `starred` | boolean | Whether file is starred |
| ↳ `trashed` | boolean | Whether file is in trash |
| ↳ `explicitlyTrashed` | boolean | Whether explicitly trashed |
| ↳ `appProperties` | json | App-specific properties |
| ↳ `createdTime` | string | File creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `modifiedByMeTime` | string | When modified by current user |
| ↳ `viewedByMeTime` | string | When last viewed by current user |
| ↳ `sharedWithMeTime` | string | When shared with current user |
| ↳ `lastModifyingUser` | json | User who last modified the file |
| ↳ `viewedByMe` | boolean | Whether viewed by current user |
| ↳ `modifiedByMe` | boolean | Whether modified by current user |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `webContentLink` | string | Direct download URL |
| ↳ `iconLink` | string | URL to file icon |
| ↳ `thumbnailLink` | string | URL to thumbnail |
| ↳ `exportLinks` | json | Export format links |
| ↳ `size` | string | File size in bytes |
| ↳ `quotaBytesUsed` | string | Storage quota used |
| ↳ `md5Checksum` | string | MD5 hash |
| ↳ `sha1Checksum` | string | SHA-1 hash |
| ↳ `sha256Checksum` | string | SHA-256 hash |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `spaces` | json | Spaces containing file |
| ↳ `driveId` | string | Shared drive ID |
| ↳ `capabilities` | json | User capabilities on file |
| ↳ `version` | string | Version number |
| ↳ `headRevisionId` | string | Head revision ID |
| ↳ `hasThumbnail` | boolean | Whether has thumbnail |
| ↳ `thumbnailVersion` | string | Thumbnail version |
| ↳ `imageMediaMetadata` | json | Image-specific metadata |
| ↳ `videoMediaMetadata` | json | Video-specific metadata |
| ↳ `isAppAuthorized` | boolean | Whether created by requesting app |
| ↳ `contentRestrictions` | json | Content restrictions |
| ↳ `linkShareMetadata` | json | Link share metadata |
### `google_drive_download`
Download a file from Google Drive with complete metadata (exports Google Workspace files automatically)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to download |
| `mimeType` | string | No | The MIME type to export Google Workspace files to \(optional\) |
| `fileName` | string | No | Optional filename override |
| `includeRevisions` | boolean | No | Whether to include revision history in the metadata \(default: true, returns first 100 revisions\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | object | Downloaded file data |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type of the file |
| ↳ `data` | string | File content as base64-encoded string |
| ↳ `size` | number | File size in bytes |
| `metadata` | object | Complete file metadata from Google Drive |
| ↳ `id` | string | Google Drive file ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `description` | string | File description |
| ↳ `originalFilename` | string | Original uploaded filename |
| ↳ `fullFileExtension` | string | Full file extension |
| ↳ `fileExtension` | string | File extension |
| ↳ `owners` | json | List of file owners |
| ↳ `permissions` | json | File permissions |
| ↳ `permissionIds` | json | Permission IDs |
| ↳ `shared` | boolean | Whether file is shared |
| ↳ `ownedByMe` | boolean | Whether owned by current user |
| ↳ `writersCanShare` | boolean | Whether writers can share |
| ↳ `viewersCanCopyContent` | boolean | Whether viewers can copy |
| ↳ `copyRequiresWriterPermission` | boolean | Whether copy requires writer permission |
| ↳ `sharingUser` | json | User who shared the file |
| ↳ `starred` | boolean | Whether file is starred |
| ↳ `trashed` | boolean | Whether file is in trash |
| ↳ `explicitlyTrashed` | boolean | Whether explicitly trashed |
| ↳ `appProperties` | json | App-specific properties |
| ↳ `createdTime` | string | File creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `modifiedByMeTime` | string | When modified by current user |
| ↳ `viewedByMeTime` | string | When last viewed by current user |
| ↳ `sharedWithMeTime` | string | When shared with current user |
| ↳ `lastModifyingUser` | json | User who last modified the file |
| ↳ `viewedByMe` | boolean | Whether viewed by current user |
| ↳ `modifiedByMe` | boolean | Whether modified by current user |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `webContentLink` | string | Direct download URL |
| ↳ `iconLink` | string | URL to file icon |
| ↳ `thumbnailLink` | string | URL to thumbnail |
| ↳ `exportLinks` | json | Export format links |
| ↳ `size` | string | File size in bytes |
| ↳ `quotaBytesUsed` | string | Storage quota used |
| ↳ `md5Checksum` | string | MD5 hash |
| ↳ `sha1Checksum` | string | SHA-1 hash |
| ↳ `sha256Checksum` | string | SHA-256 hash |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `spaces` | json | Spaces containing file |
| ↳ `driveId` | string | Shared drive ID |
| ↳ `capabilities` | json | User capabilities on file |
| ↳ `version` | string | Version number |
| ↳ `headRevisionId` | string | Head revision ID |
| ↳ `hasThumbnail` | boolean | Whether has thumbnail |
| ↳ `thumbnailVersion` | string | Thumbnail version |
| ↳ `imageMediaMetadata` | json | Image-specific metadata |
| ↳ `videoMediaMetadata` | json | Video-specific metadata |
| ↳ `isAppAuthorized` | boolean | Whether created by requesting app |
| ↳ `contentRestrictions` | json | Content restrictions |
| ↳ `linkShareMetadata` | json | Link share metadata |
| ↳ `revisions` | json | File revision history \(first 100 revisions only\) |
### `google_drive_copy`
Create a copy of a file in Google Drive
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to copy |
| `newName` | string | No | Name for the copied file \(defaults to "Copy of \[original name\]"\) |
| `destinationFolderId` | string | No | ID of the folder to place the copy in \(defaults to same location as original\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | json | The copied file metadata |
| ↳ `id` | string | Google Drive file ID of the copy |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `createdTime` | string | File creation time |
| ↳ `modifiedTime` | string | Last modification time |
| ↳ `owners` | json | List of file owners |
| ↳ `size` | string | File size in bytes |
### `google_drive_update`
Update file metadata in Google Drive (rename, move, star, add description)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to update |
| `name` | string | No | New name for the file |
| `description` | string | No | New description for the file |
| `addParents` | string | No | Comma-separated list of parent folder IDs to add \(moves file to these folders\) |
| `removeParents` | string | No | Comma-separated list of parent folder IDs to remove |
| `starred` | boolean | No | Whether to star or unstar the file |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | json | The updated file metadata |
| ↳ `id` | string | Google Drive file ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `description` | string | File description |
| ↳ `starred` | boolean | Whether file is starred |
| ↳ `webViewLink` | string | URL to view in browser |
| ↳ `parents` | json | Parent folder IDs |
| ↳ `modifiedTime` | string | Last modification time |
### `google_drive_trash`
Move a file to the trash in Google Drive (can be restored later)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to move to trash |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `file` | json | The trashed file metadata |
| ↳ `id` | string | Google Drive file ID |
| ↳ `kind` | string | Resource type identifier |
| ↳ `name` | string | File name |
| ↳ `mimeType` | string | MIME type |
| ↳ `trashed` | boolean | Whether file is in trash \(should be true\) |
| ↳ `trashedTime` | string | When file was trashed |
| ↳ `webViewLink` | string | URL to view in browser |
### `google_drive_delete`
Permanently delete a file from Google Drive (bypasses trash)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to permanently delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the file was successfully deleted |
| `fileId` | string | The ID of the deleted file |
### `google_drive_share`
Share a file with a user, group, domain, or make it public
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to share |
| `type` | string | Yes | Type of grantee: user, group, domain, or anyone |
| `role` | string | Yes | Permission role: owner \(transfer ownership\), organizer \(shared drive only\), fileOrganizer \(shared drive only\), writer \(edit\), commenter \(view and comment\), reader \(view only\) |
| `email` | string | No | Email address of the user or group \(required for type=user or type=group\) |
| `domain` | string | No | Domain to share with \(required for type=domain\) |
| `transferOwnership` | boolean | No | Required when role is owner. Transfers ownership to the specified user. |
| `moveToNewOwnersRoot` | boolean | No | When transferring ownership, move the file to the new owner's My Drive root folder. |
| `sendNotification` | boolean | No | Whether to send an email notification \(default: true\) |
| `emailMessage` | string | No | Custom message to include in the notification email |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `permission` | json | The created permission details |
| ↳ `id` | string | Permission ID |
| ↳ `type` | string | Grantee type \(user, group, domain, anyone\) |
| ↳ `role` | string | Permission role |
| ↳ `emailAddress` | string | Email of the grantee |
| ↳ `displayName` | string | Display name of the grantee |
| ↳ `domain` | string | Domain of the grantee |
| ↳ `expirationTime` | string | Expiration time |
| ↳ `deleted` | boolean | Whether grantee is deleted |
### `google_drive_unshare`
Remove a permission from a file (revoke access)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to modify permissions on |
| `permissionId` | string | Yes | The ID of the permission to remove \(use list_permissions to find this\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `removed` | boolean | Whether the permission was successfully removed |
| `fileId` | string | The ID of the file |
| `permissionId` | string | The ID of the removed permission |
### `google_drive_list_permissions`
List all permissions (who has access) for a file in Google Drive
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `fileId` | string | Yes | The ID of the file to list permissions for |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `permissions` | array | List of permissions on the file |
| ↳ `id` | string | Permission ID \(use to remove permission\) |
| ↳ `type` | string | Grantee type \(user, group, domain, anyone\) |
| ↳ `role` | string | Permission role \(owner, organizer, fileOrganizer, writer, commenter, reader\) |
| ↳ `emailAddress` | string | Email of the grantee |
| ↳ `displayName` | string | Display name of the grantee |
| ↳ `photoLink` | string | Photo URL of the grantee |
| ↳ `domain` | string | Domain of the grantee |
| ↳ `expirationTime` | string | When permission expires |
| ↳ `deleted` | boolean | Whether grantee account is deleted |
| ↳ `allowFileDiscovery` | boolean | Whether file is discoverable by grantee |
| ↳ `pendingOwner` | boolean | Whether ownership transfer is pending |
| ↳ `permissionDetails` | json | Details about inherited permissions |
| `nextPageToken` | string | Token for fetching the next page of permissions |
### `google_drive_get_about`
Get information about the user and their Google Drive (storage quota, capabilities)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `user` | json | Information about the authenticated user |
| ↳ `displayName` | string | User display name |
| ↳ `emailAddress` | string | User email address |
| ↳ `photoLink` | string | URL to user profile photo |
| ↳ `permissionId` | string | User permission ID |
| ↳ `me` | boolean | Whether this is the authenticated user |
| `storageQuota` | json | Storage quota information in bytes |
| ↳ `limit` | string | Total storage limit in bytes \(null for unlimited\) |
| ↳ `usage` | string | Total storage used in bytes |
| ↳ `usageInDrive` | string | Storage used by Drive files in bytes |
| ↳ `usageInDriveTrash` | string | Storage used by trashed files in bytes |
| `canCreateDrives` | boolean | Whether user can create shared drives |
| `importFormats` | json | Map of MIME types that can be imported and their target formats |
| `exportFormats` | json | Map of Google Workspace MIME types and their exportable formats |
| `maxUploadSize` | string | Maximum upload size in bytes |

View File

@@ -1,6 +1,6 @@
--- ---
title: Google Forms title: Google Forms
description: Read responses from a Google Form description: Manage Google Forms and responses
--- ---
import { BlockInfoCard } from "@/components/ui/block-info-card" import { BlockInfoCard } from "@/components/ui/block-info-card"
@@ -29,7 +29,7 @@ In Sim, the Google Forms integration enables your agents to programmatically acc
## Usage Instructions ## Usage Instructions
Integrate Google Forms into your workflow. Provide a Form ID to list responses, or specify a Response ID to fetch a single response. Requires OAuth. Integrate Google Forms into your workflow. Read form structure, get responses, create forms, update content, and manage notification watches.
@@ -37,15 +37,246 @@ Integrate Google Forms into your workflow. Provide a Form ID to list responses,
### `google_forms_get_responses` ### `google_forms_get_responses`
Retrieve a single response or list responses from a Google Form
#### Input #### Input
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form |
| `responseId` | string | No | If provided, returns this specific response |
| `pageSize` | number | No | Maximum number of responses to return \(service may return fewer\). Defaults to 5000. |
#### Output #### Output
| Parameter | Type | Description | | Parameter | Type | Description |
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `data` | json | Response or list of responses | | `responses` | array | Array of form responses \(when no responseId provided\) |
| ↳ `responseId` | string | Unique response ID |
| ↳ `createTime` | string | When the response was created |
| ↳ `lastSubmittedTime` | string | When the response was last submitted |
| ↳ `answers` | json | Map of question IDs to answer values |
| `response` | object | Single form response \(when responseId is provided\) |
| ↳ `responseId` | string | Unique response ID |
| ↳ `createTime` | string | When the response was created |
| ↳ `lastSubmittedTime` | string | When the response was last submitted |
| ↳ `answers` | json | Map of question IDs to answer values |
| `raw` | json | Raw API response data |
### `google_forms_get_form`
Retrieve a form structure including its items, settings, and metadata
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `formId` | string | The form ID |
| `title` | string | The form title visible to responders |
| `description` | string | The form description |
| `documentTitle` | string | The document title visible in Drive |
| `responderUri` | string | The URI to share with responders |
| `linkedSheetId` | string | The ID of the linked Google Sheet |
| `revisionId` | string | The revision ID of the form |
| `items` | array | The form items \(questions, sections, etc.\) |
| ↳ `itemId` | string | Item ID |
| ↳ `title` | string | Item title |
| ↳ `description` | string | Item description |
| `settings` | json | Form settings |
| `publishSettings` | json | Form publish settings |
### `google_forms_create_form`
Create a new Google Form with a title
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `title` | string | Yes | The title of the form visible to responders |
| `documentTitle` | string | No | The document title visible in Drive \(defaults to form title\) |
| `unpublished` | boolean | No | If true, create an unpublished form that does not accept responses |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `formId` | string | The ID of the created form |
| `title` | string | The form title |
| `documentTitle` | string | The document title in Drive |
| `responderUri` | string | The URI to share with responders |
| `revisionId` | string | The revision ID of the form |
### `google_forms_batch_update`
Apply multiple updates to a form (add items, update info, change settings, etc.)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form to update |
| `requests` | json | Yes | Array of update requests \(updateFormInfo, updateSettings, createItem, updateItem, moveItem, deleteItem\) |
| `includeFormInResponse` | boolean | No | Whether to return the updated form in the response |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `replies` | array | The replies from each update request |
| `writeControl` | object | Write control information with revision IDs |
| ↳ `requiredRevisionId` | string | Required revision ID for conflict detection |
| ↳ `targetRevisionId` | string | Target revision ID |
| `form` | object | The updated form \(if includeFormInResponse was true\) |
| ↳ `formId` | string | The form ID |
| ↳ `info` | object | Form info containing title and description |
| ↳ `title` | string | The form title visible to responders |
| ↳ `description` | string | The form description |
| ↳ `documentTitle` | string | The document title visible in Drive |
| ↳ `title` | string | Item title |
| ↳ `description` | string | Item description |
| ↳ `documentTitle` | string | The document title visible in Drive |
| ↳ `settings` | object | Form settings |
| ↳ `quizSettings` | object | Quiz settings |
| ↳ `isQuiz` | boolean | Whether the form is a quiz |
| ↳ `isQuiz` | boolean | Whether the form is a quiz |
| ↳ `emailCollectionType` | string | Email collection type |
| ↳ `quizSettings` | object | Quiz settings |
| ↳ `isQuiz` | boolean | Whether the form is a quiz |
| ↳ `isQuiz` | boolean | Whether the form is a quiz |
| ↳ `emailCollectionType` | string | Email collection type |
| ↳ `itemId` | string | Item ID |
| ↳ `questionItem` | json | Question item configuration |
| ↳ `questionGroupItem` | json | Question group configuration |
| ↳ `pageBreakItem` | json | Page break configuration |
| ↳ `textItem` | json | Text item configuration |
| ↳ `imageItem` | json | Image item configuration |
| ↳ `videoItem` | json | Video item configuration |
| ↳ `revisionId` | string | The revision ID of the form |
| ↳ `responderUri` | string | The URI to share with responders |
| ↳ `linkedSheetId` | string | The ID of the linked Google Sheet |
| ↳ `publishSettings` | object | Form publish settings |
| ↳ `publishState` | object | Current publish state |
| ↳ `isPublished` | boolean | Whether the form is published |
| ↳ `isAcceptingResponses` | boolean | Whether the form is accepting responses |
| ↳ `isPublished` | boolean | Whether the form is published |
| ↳ `isAcceptingResponses` | boolean | Whether the form is accepting responses |
| ↳ `publishState` | object | Current publish state |
| ↳ `isPublished` | boolean | Whether the form is published |
| ↳ `isAcceptingResponses` | boolean | Whether the form is accepting responses |
| ↳ `isPublished` | boolean | Whether the form is published |
| ↳ `isAcceptingResponses` | boolean | Whether the form is accepting responses |
### `google_forms_set_publish_settings`
Update the publish settings of a form (publish/unpublish, accept responses)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form |
| `isPublished` | boolean | Yes | Whether the form is published and visible to others |
| `isAcceptingResponses` | boolean | No | Whether the form accepts responses \(forced to false if isPublished is false\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `formId` | string | The form ID |
| `publishSettings` | json | The updated publish settings |
| ↳ `publishState` | object | The publish state |
| ↳ `isPublished` | boolean | Whether the form is published |
| ↳ `isAcceptingResponses` | boolean | Whether the form accepts responses |
| ↳ `isPublished` | boolean | Whether the form is published |
| ↳ `isAcceptingResponses` | boolean | Whether the form accepts responses |
### `google_forms_create_watch`
Create a notification watch for form changes (schema changes or new responses)
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form to watch |
| `eventType` | string | Yes | Event type to watch: SCHEMA \(form changes\) or RESPONSES \(new submissions\) |
| `topicName` | string | Yes | The Cloud Pub/Sub topic name \(format: projects/\{project\}/topics/\{topic\}\) |
| `watchId` | string | No | Custom watch ID \(4-63 chars, lowercase letters, numbers, hyphens\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | The watch ID |
| `eventType` | string | The event type being watched |
| `topicName` | string | The Cloud Pub/Sub topic |
| `createTime` | string | When the watch was created |
| `expireTime` | string | When the watch expires \(7 days after creation\) |
| `state` | string | The watch state \(ACTIVE, SUSPENDED\) |
### `google_forms_list_watches`
List all notification watches for a form
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `watches` | array | List of watches for the form |
| ↳ `id` | string | Watch ID |
| ↳ `eventType` | string | Event type \(SCHEMA or RESPONSES\) |
| ↳ `createTime` | string | When the watch was created |
| ↳ `expireTime` | string | When the watch expires |
| ↳ `state` | string | Watch state |
### `google_forms_delete_watch`
Delete a notification watch from a form
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form |
| `watchId` | string | Yes | The ID of the watch to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the watch was successfully deleted |
### `google_forms_renew_watch`
Renew a notification watch for another 7 days
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `formId` | string | Yes | The ID of the Google Form |
| `watchId` | string | Yes | The ID of the watch to renew |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | The watch ID |
| `eventType` | string | The event type being watched |
| `expireTime` | string | The new expiration time |
| `state` | string | The watch state |

View File

@@ -215,4 +215,191 @@ Check if a user is a member of a Google Group
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `isMember` | boolean | Whether the user is a member of the group | | `isMember` | boolean | Whether the user is a member of the group |
### `google_groups_list_aliases`
List all email aliases for a Google Group
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `groupKey` | string | Yes | Group email address or unique group ID |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `aliases` | array | List of email aliases for the group |
| ↳ `id` | string | Unique group identifier |
| ↳ `primaryEmail` | string | Group |
| ↳ `alias` | string | Alias email address |
| ↳ `kind` | string | API resource type |
| ↳ `etag` | string | Resource version identifier |
### `google_groups_add_alias`
Add an email alias to a Google Group
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `groupKey` | string | Yes | Group email address or unique group ID |
| `alias` | string | Yes | The email alias to add to the group |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Unique group identifier |
| `primaryEmail` | string | Group |
| `alias` | string | The alias that was added |
| `kind` | string | API resource type |
| `etag` | string | Resource version identifier |
### `google_groups_remove_alias`
Remove an email alias from a Google Group
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `groupKey` | string | Yes | Group email address or unique group ID |
| `alias` | string | Yes | The email alias to remove from the group |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the alias was successfully deleted |
### `google_groups_get_settings`
Get the settings for a Google Group including access permissions, moderation, and posting options
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `groupEmail` | string | Yes | The email address of the group |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `email` | string | The group |
| `name` | string | The group name \(max 75 characters\) |
| `description` | string | The group description \(max 4096 characters\) |
| `whoCanJoin` | string | Who can join the group \(ANYONE_CAN_JOIN, ALL_IN_DOMAIN_CAN_JOIN, INVITED_CAN_JOIN, CAN_REQUEST_TO_JOIN\) |
| `whoCanViewMembership` | string | Who can view group membership |
| `whoCanViewGroup` | string | Who can view group messages |
| `whoCanPostMessage` | string | Who can post messages to the group |
| `allowExternalMembers` | string | Whether external users can be members |
| `allowWebPosting` | string | Whether web posting is allowed |
| `primaryLanguage` | string | The group |
| `isArchived` | string | Whether messages are archived |
| `archiveOnly` | string | Whether the group is archive-only \(inactive\) |
| `messageModerationLevel` | string | Message moderation level |
| `spamModerationLevel` | string | Spam handling level \(ALLOW, MODERATE, SILENTLY_MODERATE, REJECT\) |
| `replyTo` | string | Default reply destination |
| `customReplyTo` | string | Custom email for replies |
| `includeCustomFooter` | string | Whether to include custom footer |
| `customFooterText` | string | Custom footer text \(max 1000 characters\) |
| `sendMessageDenyNotification` | string | Whether to send rejection notifications |
| `defaultMessageDenyNotificationText` | string | Default rejection message text |
| `membersCanPostAsTheGroup` | string | Whether members can post as the group |
| `includeInGlobalAddressList` | string | Whether included in Global Address List |
| `whoCanLeaveGroup` | string | Who can leave the group |
| `whoCanContactOwner` | string | Who can contact the group owner |
| `favoriteRepliesOnTop` | string | Whether favorite replies appear at top |
| `whoCanApproveMembers` | string | Who can approve new members |
| `whoCanBanUsers` | string | Who can ban users |
| `whoCanModerateMembers` | string | Who can manage members |
| `whoCanModerateContent` | string | Who can moderate content |
| `whoCanAssistContent` | string | Who can assist with content metadata |
| `enableCollaborativeInbox` | string | Whether collaborative inbox is enabled |
| `whoCanDiscoverGroup` | string | Who can discover the group |
| `defaultSender` | string | Default sender identity \(DEFAULT_SELF or GROUP\) |
### `google_groups_update_settings`
Update the settings for a Google Group including access permissions, moderation, and posting options
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `groupEmail` | string | Yes | The email address of the group |
| `name` | string | No | The group name \(max 75 characters\) |
| `description` | string | No | The group description \(max 4096 characters\) |
| `whoCanJoin` | string | No | Who can join: ANYONE_CAN_JOIN, ALL_IN_DOMAIN_CAN_JOIN, INVITED_CAN_JOIN, CAN_REQUEST_TO_JOIN |
| `whoCanViewMembership` | string | No | Who can view membership: ALL_IN_DOMAIN_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, ALL_MANAGERS_CAN_VIEW |
| `whoCanViewGroup` | string | No | Who can view group messages: ANYONE_CAN_VIEW, ALL_IN_DOMAIN_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, ALL_MANAGERS_CAN_VIEW |
| `whoCanPostMessage` | string | No | Who can post: NONE_CAN_POST, ALL_MANAGERS_CAN_POST, ALL_MEMBERS_CAN_POST, ALL_OWNERS_CAN_POST, ALL_IN_DOMAIN_CAN_POST, ANYONE_CAN_POST |
| `allowExternalMembers` | string | No | Whether external users can be members: true or false |
| `allowWebPosting` | string | No | Whether web posting is allowed: true or false |
| `primaryLanguage` | string | No | The group's primary language \(e.g., en\) |
| `isArchived` | string | No | Whether messages are archived: true or false |
| `archiveOnly` | string | No | Whether the group is archive-only \(inactive\): true or false |
| `messageModerationLevel` | string | No | Message moderation: MODERATE_ALL_MESSAGES, MODERATE_NON_MEMBERS, MODERATE_NEW_MEMBERS, MODERATE_NONE |
| `spamModerationLevel` | string | No | Spam handling: ALLOW, MODERATE, SILENTLY_MODERATE, REJECT |
| `replyTo` | string | No | Default reply: REPLY_TO_CUSTOM, REPLY_TO_SENDER, REPLY_TO_LIST, REPLY_TO_OWNER, REPLY_TO_IGNORE, REPLY_TO_MANAGERS |
| `customReplyTo` | string | No | Custom email for replies \(when replyTo is REPLY_TO_CUSTOM\) |
| `includeCustomFooter` | string | No | Whether to include custom footer: true or false |
| `customFooterText` | string | No | Custom footer text \(max 1000 characters\) |
| `sendMessageDenyNotification` | string | No | Whether to send rejection notifications: true or false |
| `defaultMessageDenyNotificationText` | string | No | Default rejection message text |
| `membersCanPostAsTheGroup` | string | No | Whether members can post as the group: true or false |
| `includeInGlobalAddressList` | string | No | Whether included in Global Address List: true or false |
| `whoCanLeaveGroup` | string | No | Who can leave: ALL_MANAGERS_CAN_LEAVE, ALL_MEMBERS_CAN_LEAVE, NONE_CAN_LEAVE |
| `whoCanContactOwner` | string | No | Who can contact owner: ALL_IN_DOMAIN_CAN_CONTACT, ALL_MANAGERS_CAN_CONTACT, ALL_MEMBERS_CAN_CONTACT, ANYONE_CAN_CONTACT |
| `favoriteRepliesOnTop` | string | No | Whether favorite replies appear at top: true or false |
| `whoCanApproveMembers` | string | No | Who can approve members: ALL_OWNERS_CAN_APPROVE, ALL_MANAGERS_CAN_APPROVE, ALL_MEMBERS_CAN_APPROVE, NONE_CAN_APPROVE |
| `whoCanBanUsers` | string | No | Who can ban users: OWNERS_ONLY, OWNERS_AND_MANAGERS, NONE |
| `whoCanModerateMembers` | string | No | Who can manage members: OWNERS_ONLY, OWNERS_AND_MANAGERS, ALL_MEMBERS, NONE |
| `whoCanModerateContent` | string | No | Who can moderate content: OWNERS_ONLY, OWNERS_AND_MANAGERS, ALL_MEMBERS, NONE |
| `whoCanAssistContent` | string | No | Who can assist with content metadata: OWNERS_ONLY, OWNERS_AND_MANAGERS, ALL_MEMBERS, NONE |
| `enableCollaborativeInbox` | string | No | Whether collaborative inbox is enabled: true or false |
| `whoCanDiscoverGroup` | string | No | Who can discover: ANYONE_CAN_DISCOVER, ALL_IN_DOMAIN_CAN_DISCOVER, ALL_MEMBERS_CAN_DISCOVER |
| `defaultSender` | string | No | Default sender: DEFAULT_SELF or GROUP |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `email` | string | The group |
| `name` | string | The group name |
| `description` | string | The group description |
| `whoCanJoin` | string | Who can join the group |
| `whoCanViewMembership` | string | Who can view group membership |
| `whoCanViewGroup` | string | Who can view group messages |
| `whoCanPostMessage` | string | Who can post messages to the group |
| `allowExternalMembers` | string | Whether external users can be members |
| `allowWebPosting` | string | Whether web posting is allowed |
| `primaryLanguage` | string | The group |
| `isArchived` | string | Whether messages are archived |
| `archiveOnly` | string | Whether the group is archive-only |
| `messageModerationLevel` | string | Message moderation level |
| `spamModerationLevel` | string | Spam handling level |
| `replyTo` | string | Default reply destination |
| `customReplyTo` | string | Custom email for replies |
| `includeCustomFooter` | string | Whether to include custom footer |
| `customFooterText` | string | Custom footer text |
| `sendMessageDenyNotification` | string | Whether to send rejection notifications |
| `defaultMessageDenyNotificationText` | string | Default rejection message text |
| `membersCanPostAsTheGroup` | string | Whether members can post as the group |
| `includeInGlobalAddressList` | string | Whether included in Global Address List |
| `whoCanLeaveGroup` | string | Who can leave the group |
| `whoCanContactOwner` | string | Who can contact the group owner |
| `favoriteRepliesOnTop` | string | Whether favorite replies appear at top |
| `whoCanApproveMembers` | string | Who can approve new members |
| `whoCanBanUsers` | string | Who can ban users |
| `whoCanModerateMembers` | string | Who can manage members |
| `whoCanModerateContent` | string | Who can moderate content |
| `whoCanAssistContent` | string | Who can assist with content metadata |
| `enableCollaborativeInbox` | string | Whether collaborative inbox is enabled |
| `whoCanDiscoverGroup` | string | Who can discover the group |
| `defaultSender` | string | Default sender identity |

View File

@@ -28,7 +28,7 @@ In Sim, the Google Sheets integration empowers your agents to automate reading f
## Usage Instructions ## Usage Instructions
Integrate Google Sheets into the workflow with explicit sheet selection. Can read, write, append, and update data in specific sheets. Integrate Google Sheets into the workflow with explicit sheet selection. Can read, write, append, update, clear data, create spreadsheets, get spreadsheet info, and copy sheets.
@@ -42,9 +42,8 @@ Read data from a specific sheet in a Google Sheets spreadsheet
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet | | `spreadsheetId` | string | Yes | The ID of the spreadsheet \(found in the URL: docs.google.com/spreadsheets/d/\{SPREADSHEET_ID\}/edit\). |
| `sheetName` | string | Yes | The name of the sheet/tab to read from | | `range` | string | No | The A1 notation range to read \(e.g. "Sheet1!A1:D10", "A1:B5"\). Defaults to first sheet A1:Z1000 if not specified. |
| `cellRange` | string | No | The cell range to read \(e.g. "A1:D10"\). Defaults to "A1:Z1000" if not specified. |
#### Output #### Output
@@ -66,8 +65,7 @@ Write data to a specific sheet in a Google Sheets spreadsheet
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet | | `spreadsheetId` | string | Yes | The ID of the spreadsheet |
| `sheetName` | string | Yes | The name of the sheet/tab to write to | | `range` | string | No | The A1 notation range to write to \(e.g. "Sheet1!A1:D10", "A1:B5"\) |
| `cellRange` | string | No | The cell range to write to \(e.g. "A1:D10", "A1"\). Defaults to "A1" if not specified. |
| `values` | array | Yes | The data to write as a 2D array \(e.g. \[\["Name", "Age"\], \["Alice", 30\], \["Bob", 25\]\]\) or array of objects. | | `values` | array | Yes | The data to write as a 2D array \(e.g. \[\["Name", "Age"\], \["Alice", 30\], \["Bob", 25\]\]\) or array of objects. |
| `valueInputOption` | string | No | The format of the data to write | | `valueInputOption` | string | No | The format of the data to write |
| `includeValuesInResponse` | boolean | No | Whether to include the written values in the response | | `includeValuesInResponse` | boolean | No | Whether to include the written values in the response |
@@ -93,8 +91,7 @@ Update data in a specific sheet in a Google Sheets spreadsheet
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet to update | | `spreadsheetId` | string | Yes | The ID of the spreadsheet to update |
| `sheetName` | string | Yes | The name of the sheet/tab to update | | `range` | string | No | The A1 notation range to update \(e.g. "Sheet1!A1:D10", "A1:B5"\) |
| `cellRange` | string | No | The cell range to update \(e.g. "A1:D10", "A1"\). Defaults to "A1" if not specified. |
| `values` | array | Yes | The data to update as a 2D array \(e.g. \[\["Name", "Age"\], \["Alice", 30\]\]\) or array of objects. | | `values` | array | Yes | The data to update as a 2D array \(e.g. \[\["Name", "Age"\], \["Alice", 30\]\]\) or array of objects. |
| `valueInputOption` | string | No | The format of the data to update | | `valueInputOption` | string | No | The format of the data to update |
| `includeValuesInResponse` | boolean | No | Whether to include the updated values in the response | | `includeValuesInResponse` | boolean | No | Whether to include the updated values in the response |
@@ -120,7 +117,7 @@ Append data to the end of a specific sheet in a Google Sheets spreadsheet
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet to append to | | `spreadsheetId` | string | Yes | The ID of the spreadsheet to append to |
| `sheetName` | string | Yes | The name of the sheet/tab to append to | | `range` | string | No | The A1 notation range to append after \(e.g. "Sheet1", "Sheet1!A:D"\) |
| `values` | array | Yes | The data to append as a 2D array \(e.g. \[\["Alice", 30\], \["Bob", 25\]\]\) or array of objects. | | `values` | array | Yes | The data to append as a 2D array \(e.g. \[\["Alice", 30\], \["Bob", 25\]\]\) or array of objects. |
| `valueInputOption` | string | No | The format of the data to append | | `valueInputOption` | string | No | The format of the data to append |
| `insertDataOption` | string | No | How to insert the data \(OVERWRITE or INSERT_ROWS\) | | `insertDataOption` | string | No | How to insert the data \(OVERWRITE or INSERT_ROWS\) |
@@ -139,4 +136,180 @@ Append data to the end of a specific sheet in a Google Sheets spreadsheet
| ↳ `spreadsheetId` | string | Google Sheets spreadsheet ID | | ↳ `spreadsheetId` | string | Google Sheets spreadsheet ID |
| ↳ `spreadsheetUrl` | string | Spreadsheet URL | | ↳ `spreadsheetUrl` | string | Spreadsheet URL |
### `google_sheets_clear`
Clear values from a specific range in a Google Sheets spreadsheet
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet |
| `sheetName` | string | Yes | The name of the sheet/tab to clear |
| `cellRange` | string | No | The cell range to clear \(e.g. "A1:D10"\). Clears entire sheet if not specified. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `clearedRange` | string | The range that was cleared |
| `sheetName` | string | Name of the sheet that was cleared |
| `metadata` | json | Spreadsheet metadata including ID and URL |
| ↳ `spreadsheetId` | string | Google Sheets spreadsheet ID |
| ↳ `spreadsheetUrl` | string | Spreadsheet URL |
### `google_sheets_get_spreadsheet`
Get metadata about a Google Sheets spreadsheet including title and sheet list
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet |
| `includeGridData` | boolean | No | Whether to include grid data \(cell values\). Defaults to false. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `spreadsheetId` | string | The spreadsheet ID |
| `title` | string | The title of the spreadsheet |
| `locale` | string | The locale of the spreadsheet |
| `timeZone` | string | The time zone of the spreadsheet |
| `spreadsheetUrl` | string | URL to the spreadsheet |
| `sheets` | array | List of sheets in the spreadsheet |
| ↳ `sheetId` | number | The sheet ID |
| ↳ `title` | string | The sheet title/name |
| ↳ `index` | number | The sheet index \(position\) |
| ↳ `rowCount` | number | Number of rows in the sheet |
| ↳ `columnCount` | number | Number of columns in the sheet |
| ↳ `hidden` | boolean | Whether the sheet is hidden |
### `google_sheets_create_spreadsheet`
Create a new Google Sheets spreadsheet
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `title` | string | Yes | The title of the new spreadsheet |
| `sheetTitles` | json | No | Array of sheet names to create \(e.g., \["Sheet1", "Data", "Summary"\]\). Defaults to a single "Sheet1". |
| `locale` | string | No | The locale of the spreadsheet \(e.g., "en_US"\) |
| `timeZone` | string | No | The time zone of the spreadsheet \(e.g., "America/New_York"\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `spreadsheetId` | string | The ID of the created spreadsheet |
| `title` | string | The title of the created spreadsheet |
| `spreadsheetUrl` | string | URL to the created spreadsheet |
| `sheets` | array | List of sheets created in the spreadsheet |
| ↳ `sheetId` | number | The sheet ID |
| ↳ `title` | string | The sheet title/name |
| ↳ `index` | number | The sheet index \(position\) |
### `google_sheets_batch_get`
Read multiple ranges from a Google Sheets spreadsheet in a single request
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet |
| `ranges` | json | Yes | Array of ranges to read \(e.g., \["Sheet1!A1:D10", "Sheet2!A1:B5"\]\). Each range should include sheet name. |
| `majorDimension` | string | No | The major dimension of values: "ROWS" \(default\) or "COLUMNS" |
| `valueRenderOption` | string | No | How values should be rendered: "FORMATTED_VALUE" \(default\), "UNFORMATTED_VALUE", or "FORMULA" |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `spreadsheetId` | string | The spreadsheet ID |
| `valueRanges` | array | Array of value ranges read from the spreadsheet |
| ↳ `range` | string | The range that was read |
| ↳ `majorDimension` | string | Major dimension \(ROWS or COLUMNS\) |
| ↳ `values` | array | The cell values as a 2D array |
| `metadata` | json | Spreadsheet metadata including ID and URL |
| ↳ `spreadsheetId` | string | Google Sheets spreadsheet ID |
| ↳ `spreadsheetUrl` | string | Spreadsheet URL |
### `google_sheets_batch_update`
Update multiple ranges in a Google Sheets spreadsheet in a single request
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet |
| `data` | json | Yes | Array of value ranges to update. Each item should have "range" \(e.g., "Sheet1!A1:D10"\) and "values" \(2D array\). |
| `valueInputOption` | string | No | How input data should be interpreted: "RAW" or "USER_ENTERED" \(default\). USER_ENTERED parses formulas. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `spreadsheetId` | string | The spreadsheet ID |
| `totalUpdatedRows` | number | Total number of rows updated |
| `totalUpdatedColumns` | number | Total number of columns updated |
| `totalUpdatedCells` | number | Total number of cells updated |
| `totalUpdatedSheets` | number | Total number of sheets updated |
| `responses` | array | Array of update responses for each range |
| ↳ `spreadsheetId` | string | The spreadsheet ID |
| ↳ `updatedRange` | string | The range that was updated |
| ↳ `updatedRows` | number | Number of rows updated in this range |
| ↳ `updatedColumns` | number | Number of columns updated in this range |
| ↳ `updatedCells` | number | Number of cells updated in this range |
| `metadata` | json | Spreadsheet metadata including ID and URL |
| ↳ `spreadsheetId` | string | Google Sheets spreadsheet ID |
| ↳ `spreadsheetUrl` | string | Spreadsheet URL |
### `google_sheets_batch_clear`
Clear multiple ranges in a Google Sheets spreadsheet in a single request
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet |
| `ranges` | json | Yes | Array of ranges to clear \(e.g., \["Sheet1!A1:D10", "Sheet2!A1:B5"\]\). Each range should include sheet name. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `spreadsheetId` | string | The spreadsheet ID |
| `clearedRanges` | array | Array of ranges that were cleared |
| `metadata` | json | Spreadsheet metadata including ID and URL |
| ↳ `spreadsheetId` | string | Google Sheets spreadsheet ID |
| ↳ `spreadsheetUrl` | string | Spreadsheet URL |
### `google_sheets_copy_sheet`
Copy a sheet from one spreadsheet to another
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `sourceSpreadsheetId` | string | Yes | The ID of the source spreadsheet |
| `sheetId` | number | Yes | The ID of the sheet to copy \(numeric ID, not the sheet name\). Use Get Spreadsheet to find sheet IDs. |
| `destinationSpreadsheetId` | string | Yes | The ID of the destination spreadsheet where the sheet will be copied |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `sheetId` | number | The ID of the newly created sheet in the destination |
| `title` | string | The title of the copied sheet |
| `index` | number | The index \(position\) of the copied sheet |
| `sheetType` | string | The type of the sheet \(GRID, CHART, etc.\) |
| `destinationSpreadsheetId` | string | The ID of the destination spreadsheet |
| `destinationSpreadsheetUrl` | string | URL to the destination spreadsheet |

View File

@@ -30,7 +30,7 @@ In Sim, the Google Slides integration enables your agents to interact directly w
## Usage Instructions ## Usage Instructions
Integrate Google Slides into the workflow. Can read, write, create presentations, replace text, add slides, add images, and get thumbnails. Integrate Google Slides into the workflow. Can read, write, create presentations, replace text, add slides, add images, get thumbnails, get page details, delete objects, duplicate objects, reorder slides, create tables, create shapes, and insert text.
@@ -52,6 +52,15 @@ Read content from a Google Slides presentation
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `slides` | json | Array of slides with their content | | `slides` | json | Array of slides with their content |
| `metadata` | json | Presentation metadata including ID, title, and URL | | `metadata` | json | Presentation metadata including ID, title, and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `title` | string | The presentation title |
| ↳ `pageSize` | object | Presentation page size |
| ↳ `width` | json | Page width as a Dimension object |
| ↳ `height` | json | Page height as a Dimension object |
| ↳ `width` | json | Page width as a Dimension object |
| ↳ `height` | json | Page height as a Dimension object |
| ↳ `mimeType` | string | The mime type of the presentation |
| ↳ `url` | string | URL to open the presentation |
### `google_slides_write` ### `google_slides_write`
@@ -71,6 +80,10 @@ Write or update content in a Google Slides presentation
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `updatedContent` | boolean | Indicates if presentation content was updated successfully | | `updatedContent` | boolean | Indicates if presentation content was updated successfully |
| `metadata` | json | Updated presentation metadata including ID, title, and URL | | `metadata` | json | Updated presentation metadata including ID, title, and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `title` | string | The presentation title |
| ↳ `mimeType` | string | The mime type of the presentation |
| ↳ `url` | string | URL to open the presentation |
### `google_slides_create` ### `google_slides_create`
@@ -90,6 +103,10 @@ Create a new Google Slides presentation
| Parameter | Type | Description | | Parameter | Type | Description |
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `metadata` | json | Created presentation metadata including ID, title, and URL | | `metadata` | json | Created presentation metadata including ID, title, and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `title` | string | The presentation title |
| ↳ `mimeType` | string | The mime type of the presentation |
| ↳ `url` | string | URL to open the presentation |
### `google_slides_replace_all_text` ### `google_slides_replace_all_text`
@@ -111,6 +128,10 @@ Find and replace all occurrences of text throughout a Google Slides presentation
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `occurrencesChanged` | number | Number of text occurrences that were replaced | | `occurrencesChanged` | number | Number of text occurrences that were replaced |
| `metadata` | json | Operation metadata including presentation ID and URL | | `metadata` | json | Operation metadata including presentation ID and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `findText` | string | The text that was searched for |
| ↳ `replaceText` | string | The text that replaced the matches |
| ↳ `url` | string | URL to open the presentation |
### `google_slides_add_slide` ### `google_slides_add_slide`
@@ -131,6 +152,10 @@ Add a new slide to a Google Slides presentation with a specified layout
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `slideId` | string | The object ID of the newly created slide | | `slideId` | string | The object ID of the newly created slide |
| `metadata` | json | Operation metadata including presentation ID, layout, and URL | | `metadata` | json | Operation metadata including presentation ID, layout, and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `layout` | string | The layout used for the new slide |
| ↳ `insertionIndex` | number | The zero-based index where the slide was inserted |
| ↳ `url` | string | URL to open the presentation |
### `google_slides_add_image` ### `google_slides_add_image`
@@ -154,6 +179,10 @@ Insert an image into a specific slide in a Google Slides presentation
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `imageId` | string | The object ID of the newly created image | | `imageId` | string | The object ID of the newly created image |
| `metadata` | json | Operation metadata including presentation ID and image URL | | `metadata` | json | Operation metadata including presentation ID and image URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `pageObjectId` | string | The page object ID where the image was inserted |
| ↳ `imageUrl` | string | The source image URL |
| ↳ `url` | string | URL to open the presentation |
### `google_slides_get_thumbnail` ### `google_slides_get_thumbnail`
@@ -176,5 +205,182 @@ Generate a thumbnail image of a specific slide in a Google Slides presentation
| `width` | number | Width of the thumbnail in pixels | | `width` | number | Width of the thumbnail in pixels |
| `height` | number | Height of the thumbnail in pixels | | `height` | number | Height of the thumbnail in pixels |
| `metadata` | json | Operation metadata including presentation ID and page object ID | | `metadata` | json | Operation metadata including presentation ID and page object ID |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `pageObjectId` | string | The page object ID for the thumbnail |
| ↳ `thumbnailSize` | string | The requested thumbnail size |
| ↳ `mimeType` | string | The thumbnail MIME type |
### `google_slides_get_page`
Get detailed information about a specific slide/page in a Google Slides presentation
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `pageObjectId` | string | Yes | The object ID of the slide/page to retrieve |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `objectId` | string | The object ID of the page |
| `pageType` | string | The type of page \(SLIDE, MASTER, LAYOUT, NOTES, NOTES_MASTER\) |
| `pageElements` | array | Array of page elements \(shapes, images, tables, etc.\) on this page |
| `slideProperties` | object | Properties specific to slides \(layout, master, notes\) |
| ↳ `layoutObjectId` | string | Object ID of the layout this slide is based on |
| ↳ `masterObjectId` | string | Object ID of the master this slide is based on |
| ↳ `notesPage` | json | The notes page associated with the slide |
| `metadata` | object | Operation metadata including presentation ID and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `url` | string | URL to the presentation |
### `google_slides_delete_object`
Delete a page element (shape, image, table, etc.) or an entire slide from a Google Slides presentation
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `objectId` | string | Yes | The object ID of the element or slide to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `deleted` | boolean | Whether the object was successfully deleted |
| `objectId` | string | The object ID that was deleted |
| `metadata` | object | Operation metadata including presentation ID and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `url` | string | URL to the presentation |
### `google_slides_duplicate_object`
Duplicate an object (slide, shape, image, table, etc.) in a Google Slides presentation
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `objectId` | string | Yes | The object ID of the element or slide to duplicate |
| `objectIds` | string | No | Optional JSON object mapping source object IDs \(within the slide being duplicated\) to new object IDs for the duplicates. Format: \{"sourceId1":"newId1","sourceId2":"newId2"\} |
| `Format` | string | No | No description |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `duplicatedObjectId` | string | The object ID of the newly created duplicate |
| `metadata` | object | Operation metadata including presentation ID and source object ID |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `sourceObjectId` | string | The original object ID that was duplicated |
| ↳ `url` | string | URL to the presentation |
### `google_slides_update_slides_position`
Move one or more slides to a new position in a Google Slides presentation
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `slideObjectIds` | string | Yes | Comma-separated list of slide object IDs to move. The slides will maintain their relative order. |
| `insertionIndex` | number | Yes | The zero-based index where the slides should be moved. All slides with indices greater than or equal to this will be shifted right. |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `moved` | boolean | Whether the slides were successfully moved |
| `slideObjectIds` | array | The slide object IDs that were moved |
| `insertionIndex` | number | The index where the slides were moved to |
| `metadata` | object | Operation metadata including presentation ID and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `url` | string | URL to the presentation |
### `google_slides_create_table`
Create a new table on a slide in a Google Slides presentation
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `pageObjectId` | string | Yes | The object ID of the slide/page to add the table to |
| `rows` | number | Yes | Number of rows in the table \(minimum 1\) |
| `columns` | number | Yes | Number of columns in the table \(minimum 1\) |
| `width` | number | No | Width of the table in points \(default: 400\) |
| `height` | number | No | Height of the table in points \(default: 200\) |
| `positionX` | number | No | X position from the left edge in points \(default: 100\) |
| `positionY` | number | No | Y position from the top edge in points \(default: 100\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `tableId` | string | The object ID of the newly created table |
| `rows` | number | Number of rows in the table |
| `columns` | number | Number of columns in the table |
| `metadata` | object | Operation metadata including presentation ID and page object ID |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `pageObjectId` | string | The page object ID where the table was created |
| ↳ `url` | string | URL to the presentation |
### `google_slides_create_shape`
Create a shape (rectangle, ellipse, text box, arrow, etc.) on a slide in a Google Slides presentation
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `pageObjectId` | string | Yes | The object ID of the slide/page to add the shape to |
| `shapeType` | string | Yes | The type of shape to create. Common types: TEXT_BOX, RECTANGLE, ROUND_RECTANGLE, ELLIPSE, TRIANGLE, DIAMOND, STAR_5, ARROW_EAST, HEART, CLOUD |
| `width` | number | No | Width of the shape in points \(default: 200\) |
| `height` | number | No | Height of the shape in points \(default: 100\) |
| `positionX` | number | No | X position from the left edge in points \(default: 100\) |
| `positionY` | number | No | Y position from the top edge in points \(default: 100\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `shapeId` | string | The object ID of the newly created shape |
| `shapeType` | string | The type of shape that was created |
| `metadata` | object | Operation metadata including presentation ID and page object ID |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `pageObjectId` | string | The page object ID where the shape was created |
| ↳ `url` | string | URL to the presentation |
### `google_slides_insert_text`
Insert text into a shape or table cell in a Google Slides presentation. Use this to add text to text boxes, shapes, or table cells.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `presentationId` | string | Yes | The ID of the presentation |
| `objectId` | string | Yes | The object ID of the shape or table cell to insert text into. For table cells, use the cell object ID. |
| `text` | string | Yes | The text to insert |
| `insertionIndex` | number | No | The zero-based index at which to insert the text. If not specified, text is inserted at the beginning \(index 0\). |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `inserted` | boolean | Whether the text was successfully inserted |
| `objectId` | string | The object ID where text was inserted |
| `text` | string | The text that was inserted |
| `metadata` | object | Operation metadata including presentation ID and URL |
| ↳ `presentationId` | string | The presentation ID |
| ↳ `url` | string | URL to the presentation |

View File

@@ -51,6 +51,7 @@ Search for similar content in a knowledge base using vector similarity
| `properties` | string | No | No description | | `properties` | string | No | No description |
| `tagName` | string | No | No description | | `tagName` | string | No | No description |
| `tagValue` | string | No | No description | | `tagValue` | string | No | No description |
| `tagFilters` | string | No | No description |
#### Output #### Output
@@ -108,19 +109,8 @@ Create a new document in a knowledge base
| `knowledgeBaseId` | string | Yes | ID of the knowledge base containing the document | | `knowledgeBaseId` | string | Yes | ID of the knowledge base containing the document |
| `name` | string | Yes | Name of the document | | `name` | string | Yes | Name of the document |
| `content` | string | Yes | Content of the document | | `content` | string | Yes | Content of the document |
| `tag1` | string | No | Tag 1 value for the document | | `documentTags` | object | No | Document tags |
| `tag2` | string | No | Tag 2 value for the document | | `documentTags` | string | No | No description |
| `tag3` | string | No | Tag 3 value for the document |
| `tag4` | string | No | Tag 4 value for the document |
| `tag5` | string | No | Tag 5 value for the document |
| `tag6` | string | No | Tag 6 value for the document |
| `tag7` | string | No | Tag 7 value for the document |
| `documentTagsData` | array | No | Structured tag data with names, types, and values |
| `items` | object | No | No description |
| `properties` | string | No | No description |
| `tagName` | string | No | No description |
| `tagValue` | string | No | No description |
| `tagType` | string | No | No description |
#### Output #### Output

View File

@@ -106,6 +106,7 @@
"supabase", "supabase",
"tavily", "tavily",
"telegram", "telegram",
"textract",
"tinybird", "tinybird",
"translate", "translate",
"trello", "trello",

View File

@@ -45,8 +45,7 @@ Read data from a specific sheet in a Microsoft Excel spreadsheet
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet to read from | | `spreadsheetId` | string | Yes | The ID of the spreadsheet to read from |
| `sheetName` | string | Yes | The name of the sheet/tab to read from | | `range` | string | No | The range of cells to read from. Accepts "SheetName!A1:B2" for explicit ranges or just "SheetName" to read the used range of that sheet. If omitted, reads the used range of the first sheet. |
| `cellRange` | string | No | The cell range to read \(e.g., "A1:D10"\). If not specified, reads the entire used range. |
#### Output #### Output
@@ -68,9 +67,8 @@ Write data to a specific sheet in a Microsoft Excel spreadsheet
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `spreadsheetId` | string | Yes | The ID of the spreadsheet to write to | | `spreadsheetId` | string | Yes | The ID of the spreadsheet to write to |
| `sheetName` | string | Yes | The name of the sheet/tab to write to | | `range` | string | No | The range of cells to write to |
| `cellRange` | string | No | The cell range to write to \(e.g., "A1:D10", "A1"\). Defaults to "A1" if not specified. | | `values` | array | Yes | The data to write to the spreadsheet |
| `values` | array | Yes | The data to write as a 2D array \(e.g. \[\["Name", "Age"\], \["Alice", 30\], \["Bob", 25\]\]\) or array of objects. |
| `valueInputOption` | string | No | The format of the data to write | | `valueInputOption` | string | No | The format of the data to write |
| `includeValuesInResponse` | boolean | No | Whether to include the written values in the response | | `includeValuesInResponse` | boolean | No | Whether to include the written values in the response |

View File

@@ -6,7 +6,7 @@ description: Extract text from PDF documents
import { BlockInfoCard } from "@/components/ui/block-info-card" import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard <BlockInfoCard
type="mistral_parse" type="mistral_parse_v2"
color="#000000" color="#000000"
/> />
@@ -54,18 +54,37 @@ Parse PDF documents using Mistral OCR API
| Parameter | Type | Description | | Parameter | Type | Description |
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `success` | boolean | Whether the PDF was parsed successfully | | `pages` | array | Array of page objects from Mistral OCR |
| `content` | string | Extracted content in the requested format \(markdown, text, or JSON\) | | ↳ `index` | number | Page index \(zero-based\) |
| `metadata` | object | Processing metadata including jobId, fileType, pageCount, and usage info | | ↳ `markdown` | string | Extracted markdown content |
| ↳ `jobId` | string | Unique job identifier | | ↳ `images` | array | Images extracted from this page with bounding boxes |
| ↳ `fileType` | string | File type \(e.g., pdf\) | | ↳ `id` | string | Image identifier \(e.g., img-0.jpeg\) |
| ↳ `fileName` | string | Original file name | | ↳ `top_left_x` | number | Top-left X coordinate in pixels |
| ↳ `source` | string | Source type \(url\) | | ↳ `top_left_y` | number | Top-left Y coordinate in pixels |
| ↳ `pageCount` | number | Number of pages processed | | ↳ `bottom_right_x` | number | Bottom-right X coordinate in pixels |
| ↳ `model` | string | Mistral model used | | ↳ `bottom_right_y` | number | Bottom-right Y coordinate in pixels |
| ↳ `resultType` | string | Output format \(markdown, text, json\) | | ↳ `image_base64` | string | Base64-encoded image data \(when include_image_base64=true\) |
| ↳ `processedAt` | string | Processing timestamp | | ↳ `id` | string | Image identifier \(e.g., img-0.jpeg\) |
| ↳ `sourceUrl` | string | Source URL if applicable | | ↳ `top_left_x` | number | Top-left X coordinate in pixels |
| ↳ `usageInfo` | object | Usage statistics from OCR processing | | ↳ `top_left_y` | number | Top-left Y coordinate in pixels |
| ↳ `bottom_right_x` | number | Bottom-right X coordinate in pixels |
| ↳ `bottom_right_y` | number | Bottom-right Y coordinate in pixels |
| ↳ `image_base64` | string | Base64-encoded image data \(when include_image_base64=true\) |
| ↳ `dimensions` | object | Page dimensions |
| ↳ `dpi` | number | Dots per inch |
| ↳ `height` | number | Page height in pixels |
| ↳ `width` | number | Page width in pixels |
| ↳ `dpi` | number | Dots per inch |
| ↳ `height` | number | Page height in pixels |
| ↳ `width` | number | Page width in pixels |
| ↳ `tables` | array | Extracted tables as HTML/markdown \(when table_format is set\). Referenced via placeholders like \[tbl-0.html\] |
| ↳ `hyperlinks` | array | Array of URL strings detected in the page \(e.g., \[ |
| ↳ `header` | string | Page header content \(when extract_header=true\) |
| ↳ `footer` | string | Page footer content \(when extract_footer=true\) |
| `model` | string | Mistral OCR model identifier \(e.g., mistral-ocr-latest\) |
| `usage_info` | object | Usage and processing statistics |
| ↳ `pages_processed` | number | Total number of pages processed |
| ↳ `doc_size_bytes` | number | Document file size in bytes |
| `document_annotation` | string | Structured annotation data as JSON string \(when applicable\) |

View File

@@ -58,6 +58,7 @@ Upload a file to an AWS S3 bucket
| Parameter | Type | Description | | Parameter | Type | Description |
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `url` | string | URL of the uploaded S3 object | | `url` | string | URL of the uploaded S3 object |
| `uri` | string | S3 URI of the uploaded object \(s3://bucket/key\) |
| `metadata` | object | Upload metadata including ETag and location | | `metadata` | object | Upload metadata including ETag and location |
### `s3_get_object` ### `s3_get_object`
@@ -149,6 +150,7 @@ Copy an object within or between AWS S3 buckets
| Parameter | Type | Description | | Parameter | Type | Description |
| --------- | ---- | ----------- | | --------- | ---- | ----------- |
| `url` | string | URL of the copied S3 object | | `url` | string | URL of the copied S3 object |
| `uri` | string | S3 URI of the copied object \(s3://bucket/key\) |
| `metadata` | object | Copy operation metadata | | `metadata` | object | Copy operation metadata |

View File

@@ -84,9 +84,10 @@ Send messages to Slack channels or direct messages. Supports Slack mrkdwn format
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `authMethod` | string | No | Authentication method: oauth or bot_token | | `authMethod` | string | No | Authentication method: oauth or bot_token |
| `destinationType` | string | No | Destination type: channel or dm |
| `botToken` | string | No | Bot token for Custom Bot | | `botToken` | string | No | Bot token for Custom Bot |
| `channel` | string | No | Target Slack channel \(e.g., #general\) | | `channel` | string | No | Target Slack channel \(e.g., #general\) |
| `userId` | string | No | Target Slack user ID for direct messages \(e.g., U1234567890\) | | `dmUserId` | string | No | Target Slack user for direct messages |
| `text` | string | Yes | Message text to send \(supports Slack mrkdwn formatting\) | | `text` | string | Yes | Message text to send \(supports Slack mrkdwn formatting\) |
| `thread_ts` | string | No | Thread timestamp to reply to \(creates thread reply\) | | `thread_ts` | string | No | Thread timestamp to reply to \(creates thread reply\) |
| `files` | file[] | No | Files to attach to the message | | `files` | file[] | No | Files to attach to the message |
@@ -132,9 +133,10 @@ Read the latest messages from Slack channels. Retrieve conversation history with
| Parameter | Type | Required | Description | | Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- | | --------- | ---- | -------- | ----------- |
| `authMethod` | string | No | Authentication method: oauth or bot_token | | `authMethod` | string | No | Authentication method: oauth or bot_token |
| `destinationType` | string | No | Destination type: channel or dm |
| `botToken` | string | No | Bot token for Custom Bot | | `botToken` | string | No | Bot token for Custom Bot |
| `channel` | string | No | Slack channel to read messages from \(e.g., #general\) | | `channel` | string | No | Slack channel to read messages from \(e.g., #general\) |
| `userId` | string | No | User ID for DM conversation \(e.g., U1234567890\) | | `dmUserId` | string | No | Target Slack user for DM conversation |
| `limit` | number | No | Number of messages to retrieve \(default: 10, max: 15\) | | `limit` | number | No | Number of messages to retrieve \(default: 10, max: 15\) |
| `oldest` | string | No | Start of time range \(timestamp\) | | `oldest` | string | No | Start of time range \(timestamp\) |
| `latest` | string | No | End of time range \(timestamp\) | | `latest` | string | No | End of time range \(timestamp\) |

View File

@@ -0,0 +1,120 @@
---
title: AWS Textract
description: Extract text, tables, and forms from documents
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="textract"
color="linear-gradient(135deg, #055F4E 0%, #56C0A7 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[AWS Textract](https://aws.amazon.com/textract/) is a powerful AI service from Amazon Web Services designed to automatically extract printed text, handwriting, tables, forms, key-value pairs, and other structured data from scanned documents and images. Textract leverages advanced optical character recognition (OCR) and document analysis to transform documents into actionable data, enabling automation, analytics, compliance, and more.
With AWS Textract, you can:
- **Extract text from images and documents**: Recognize printed text and handwriting in formats such as PDF, JPEG, PNG, or TIFF
- **Detect and extract tables**: Automatically find tables and output their structured content
- **Parse forms and key-value pairs**: Pull structured data from forms, including fields and their corresponding values
- **Identify signatures and layout features**: Detect signatures, geometric layout, and relationships between document elements
- **Customize extraction with queries**: Extract specific fields and answers using query-based extraction (e.g., "What is the invoice number?")
In Sim, the AWS Textract integration empowers your agents to intelligently process documents as part of their workflows. This unlocks automation scenarios such as data entry from invoices, onboarding documents, contracts, receipts, and more. Your agents can extract relevant data, analyze structured forms, and generate summaries or reports directly from document uploads or URLs. By connecting Sim with AWS Textract, you can reduce manual effort, improve data accuracy, and streamline your business processes with robust document understanding.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate AWS Textract into your workflow to extract text, tables, forms, and key-value pairs from documents. Single-page mode supports JPEG, PNG, and single-page PDF. Multi-page mode supports multi-page PDF and TIFF.
## Tools
### `textract_parser`
Parse documents using AWS Textract OCR and document analysis
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `accessKeyId` | string | Yes | AWS Access Key ID |
| `secretAccessKey` | string | Yes | AWS Secret Access Key |
| `region` | string | Yes | AWS region for Textract service \(e.g., us-east-1\) |
| `processingMode` | string | No | Document type: single-page or multi-page. Defaults to single-page. |
| `filePath` | string | No | URL to a document to be processed \(JPEG, PNG, or single-page PDF\). |
| `s3Uri` | string | No | S3 URI for multi-page processing \(s3://bucket/key\). |
| `fileUpload` | object | No | File upload data from file-upload component |
| `featureTypes` | array | No | Feature types to detect: TABLES, FORMS, QUERIES, SIGNATURES, LAYOUT. If not specified, only text detection is performed. |
| `items` | string | No | Feature type |
| `queries` | array | No | Custom queries to extract specific information. Only used when featureTypes includes QUERIES. |
| `items` | object | No | Query configuration |
| `properties` | string | No | The query text |
| `Text` | string | No | No description |
| `Alias` | string | No | No description |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `blocks` | array | Array of Block objects containing detected text, tables, forms, and other elements |
| ↳ `BlockType` | string | Type of block \(PAGE, LINE, WORD, TABLE, CELL, KEY_VALUE_SET, etc.\) |
| ↳ `Id` | string | Unique identifier for the block |
| ↳ `Text` | string | Query text |
| ↳ `TextType` | string | Type of text \(PRINTED or HANDWRITING\) |
| ↳ `Confidence` | number | Confidence score \(0-100\) |
| ↳ `Page` | number | Page number |
| ↳ `Geometry` | object | Location and bounding box information |
| ↳ `BoundingBox` | object | Height as ratio of document height |
| ↳ `Height` | number | Height as ratio of document height |
| ↳ `Left` | number | Left position as ratio of document width |
| ↳ `Top` | number | Top position as ratio of document height |
| ↳ `Width` | number | Width as ratio of document width |
| ↳ `Height` | number | Height as ratio of document height |
| ↳ `Left` | number | Left position as ratio of document width |
| ↳ `Top` | number | Top position as ratio of document height |
| ↳ `Width` | number | Width as ratio of document width |
| ↳ `Polygon` | array | Polygon coordinates |
| ↳ `X` | number | X coordinate |
| ↳ `Y` | number | Y coordinate |
| ↳ `X` | number | X coordinate |
| ↳ `Y` | number | Y coordinate |
| ↳ `BoundingBox` | object | Height as ratio of document height |
| ↳ `Height` | number | Height as ratio of document height |
| ↳ `Left` | number | Left position as ratio of document width |
| ↳ `Top` | number | Top position as ratio of document height |
| ↳ `Width` | number | Width as ratio of document width |
| ↳ `Height` | number | Height as ratio of document height |
| ↳ `Left` | number | Left position as ratio of document width |
| ↳ `Top` | number | Top position as ratio of document height |
| ↳ `Width` | number | Width as ratio of document width |
| ↳ `Polygon` | array | Polygon coordinates |
| ↳ `X` | number | X coordinate |
| ↳ `Y` | number | Y coordinate |
| ↳ `X` | number | X coordinate |
| ↳ `Y` | number | Y coordinate |
| ↳ `Relationships` | array | Relationships to other blocks |
| ↳ `Type` | string | Relationship type \(CHILD, VALUE, ANSWER, etc.\) |
| ↳ `Ids` | array | IDs of related blocks |
| ↳ `Type` | string | Relationship type \(CHILD, VALUE, ANSWER, etc.\) |
| ↳ `Ids` | array | IDs of related blocks |
| ↳ `EntityTypes` | array | Entity types for KEY_VALUE_SET \(KEY or VALUE\) |
| ↳ `SelectionStatus` | string | For checkboxes: SELECTED or NOT_SELECTED |
| ↳ `RowIndex` | number | Row index for table cells |
| ↳ `ColumnIndex` | number | Column index for table cells |
| ↳ `RowSpan` | number | Row span for merged cells |
| ↳ `ColumnSpan` | number | Column span for merged cells |
| ↳ `Query` | object | Query information for QUERY blocks |
| ↳ `Text` | string | Query text |
| ↳ `Alias` | string | Query alias |
| ↳ `Pages` | array | Pages to search |
| ↳ `Alias` | string | Query alias |
| ↳ `Pages` | array | Pages to search |
| `documentMetadata` | object | Metadata about the analyzed document |
| ↳ `pages` | number | Number of pages in the document |
| `modelVersion` | string | Version of the Textract model used for processing |

View File

@@ -6,7 +6,7 @@ description: Generate videos from text using AI
import { BlockInfoCard } from "@/components/ui/block-info-card" import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard <BlockInfoCard
type="video_generator" type="video_generator_v2"
color="#181C1E" color="#181C1E"
/> />

View File

@@ -4,7 +4,7 @@ import { eq } from 'drizzle-orm'
import { NextResponse } from 'next/server' import { NextResponse } from 'next/server'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
const logger = createLogger('SSO-Providers') const logger = createLogger('SSOProvidersRoute')
export async function GET() { export async function GET() {
try { try {

View File

@@ -6,7 +6,7 @@ import { hasSSOAccess } from '@/lib/billing'
import { env } from '@/lib/core/config/env' import { env } from '@/lib/core/config/env'
import { REDACTED_MARKER } from '@/lib/core/security/redaction' import { REDACTED_MARKER } from '@/lib/core/security/redaction'
const logger = createLogger('SSO-Register') const logger = createLogger('SSORegisterRoute')
const mappingSchema = z const mappingSchema = z
.object({ .object({
@@ -43,6 +43,10 @@ const ssoRegistrationSchema = z.discriminatedUnion('providerType', [
]) ])
.default(['openid', 'profile', 'email']), .default(['openid', 'profile', 'email']),
pkce: z.boolean().default(true), pkce: z.boolean().default(true),
authorizationEndpoint: z.string().url().optional(),
tokenEndpoint: z.string().url().optional(),
userInfoEndpoint: z.string().url().optional(),
jwksEndpoint: z.string().url().optional(),
}), }),
z.object({ z.object({
providerType: z.literal('saml'), providerType: z.literal('saml'),
@@ -64,12 +68,10 @@ const ssoRegistrationSchema = z.discriminatedUnion('providerType', [
export async function POST(request: NextRequest) { export async function POST(request: NextRequest) {
try { try {
// SSO plugin must be enabled in Better Auth
if (!env.SSO_ENABLED) { if (!env.SSO_ENABLED) {
return NextResponse.json({ error: 'SSO is not enabled' }, { status: 400 }) return NextResponse.json({ error: 'SSO is not enabled' }, { status: 400 })
} }
// Check plan access (enterprise) or env var override
const session = await getSession() const session = await getSession()
if (!session?.user?.id) { if (!session?.user?.id) {
return NextResponse.json({ error: 'Authentication required' }, { status: 401 }) return NextResponse.json({ error: 'Authentication required' }, { status: 401 })
@@ -116,7 +118,16 @@ export async function POST(request: NextRequest) {
} }
if (providerType === 'oidc') { if (providerType === 'oidc') {
const { clientId, clientSecret, scopes, pkce } = body const {
clientId,
clientSecret,
scopes,
pkce,
authorizationEndpoint,
tokenEndpoint,
userInfoEndpoint,
jwksEndpoint,
} = body
const oidcConfig: any = { const oidcConfig: any = {
clientId, clientId,
@@ -127,50 +138,104 @@ export async function POST(request: NextRequest) {
pkce: pkce ?? true, pkce: pkce ?? true,
} }
// Add manual endpoints for providers that might need them oidcConfig.authorizationEndpoint = authorizationEndpoint
// Common patterns for OIDC providers that don't support discovery properly oidcConfig.tokenEndpoint = tokenEndpoint
if ( oidcConfig.userInfoEndpoint = userInfoEndpoint
issuer.includes('okta.com') || oidcConfig.jwksEndpoint = jwksEndpoint
issuer.includes('auth0.com') ||
issuer.includes('identityserver')
) {
const baseUrl = issuer.includes('/oauth2/default')
? issuer.replace('/oauth2/default', '')
: issuer.replace('/oauth', '').replace('/v2.0', '').replace('/oauth2', '')
// Okta-style endpoints const needsDiscovery =
if (issuer.includes('okta.com')) { !oidcConfig.authorizationEndpoint || !oidcConfig.tokenEndpoint || !oidcConfig.jwksEndpoint
oidcConfig.authorizationEndpoint = `${baseUrl}/oauth2/default/v1/authorize`
oidcConfig.tokenEndpoint = `${baseUrl}/oauth2/default/v1/token`
oidcConfig.userInfoEndpoint = `${baseUrl}/oauth2/default/v1/userinfo`
oidcConfig.jwksEndpoint = `${baseUrl}/oauth2/default/v1/keys`
}
// Auth0-style endpoints
else if (issuer.includes('auth0.com')) {
oidcConfig.authorizationEndpoint = `${baseUrl}/authorize`
oidcConfig.tokenEndpoint = `${baseUrl}/oauth/token`
oidcConfig.userInfoEndpoint = `${baseUrl}/userinfo`
oidcConfig.jwksEndpoint = `${baseUrl}/.well-known/jwks.json`
}
// Generic OIDC endpoints (IdentityServer, etc.)
else {
oidcConfig.authorizationEndpoint = `${baseUrl}/connect/authorize`
oidcConfig.tokenEndpoint = `${baseUrl}/connect/token`
oidcConfig.userInfoEndpoint = `${baseUrl}/connect/userinfo`
oidcConfig.jwksEndpoint = `${baseUrl}/.well-known/jwks`
}
logger.info('Using manual OIDC endpoints for provider', { if (needsDiscovery) {
const discoveryUrl = `${issuer.replace(/\/$/, '')}/.well-known/openid-configuration`
try {
logger.info('Fetching OIDC discovery document for missing endpoints', {
discoveryUrl,
hasAuthEndpoint: !!oidcConfig.authorizationEndpoint,
hasTokenEndpoint: !!oidcConfig.tokenEndpoint,
hasJwksEndpoint: !!oidcConfig.jwksEndpoint,
})
const discoveryResponse = await fetch(discoveryUrl, {
headers: { Accept: 'application/json' },
})
if (!discoveryResponse.ok) {
logger.error('Failed to fetch OIDC discovery document', {
status: discoveryResponse.status,
statusText: discoveryResponse.statusText,
})
return NextResponse.json(
{
error: `Failed to fetch OIDC discovery document from ${discoveryUrl}. Status: ${discoveryResponse.status}. Provide all endpoints explicitly or verify the issuer URL.`,
},
{ status: 400 }
)
}
const discovery = await discoveryResponse.json()
oidcConfig.authorizationEndpoint =
oidcConfig.authorizationEndpoint || discovery.authorization_endpoint
oidcConfig.tokenEndpoint = oidcConfig.tokenEndpoint || discovery.token_endpoint
oidcConfig.userInfoEndpoint = oidcConfig.userInfoEndpoint || discovery.userinfo_endpoint
oidcConfig.jwksEndpoint = oidcConfig.jwksEndpoint || discovery.jwks_uri
logger.info('Merged OIDC endpoints (user-provided + discovery)', {
providerId,
issuer,
authorizationEndpoint: oidcConfig.authorizationEndpoint,
tokenEndpoint: oidcConfig.tokenEndpoint,
userInfoEndpoint: oidcConfig.userInfoEndpoint,
jwksEndpoint: oidcConfig.jwksEndpoint,
})
} catch (error) {
logger.error('Error fetching OIDC discovery document', {
error: error instanceof Error ? error.message : 'Unknown error',
discoveryUrl,
})
return NextResponse.json(
{
error: `Failed to fetch OIDC discovery document from ${discoveryUrl}. Please verify the issuer URL is correct or provide all endpoints explicitly.`,
},
{ status: 400 }
)
}
} else {
logger.info('Using explicitly provided OIDC endpoints (all present)', {
providerId, providerId,
provider: issuer.includes('okta.com') issuer,
? 'Okta' authorizationEndpoint: oidcConfig.authorizationEndpoint,
: issuer.includes('auth0.com') tokenEndpoint: oidcConfig.tokenEndpoint,
? 'Auth0' userInfoEndpoint: oidcConfig.userInfoEndpoint,
: 'Generic', jwksEndpoint: oidcConfig.jwksEndpoint,
authEndpoint: oidcConfig.authorizationEndpoint,
}) })
} }
if (
!oidcConfig.authorizationEndpoint ||
!oidcConfig.tokenEndpoint ||
!oidcConfig.jwksEndpoint
) {
const missing: string[] = []
if (!oidcConfig.authorizationEndpoint) missing.push('authorizationEndpoint')
if (!oidcConfig.tokenEndpoint) missing.push('tokenEndpoint')
if (!oidcConfig.jwksEndpoint) missing.push('jwksEndpoint')
logger.error('Missing required OIDC endpoints after discovery merge', {
missing,
authorizationEndpoint: oidcConfig.authorizationEndpoint,
tokenEndpoint: oidcConfig.tokenEndpoint,
jwksEndpoint: oidcConfig.jwksEndpoint,
})
return NextResponse.json(
{
error: `Missing required OIDC endpoints: ${missing.join(', ')}. Please provide these explicitly or verify the issuer supports OIDC discovery.`,
},
{ status: 400 }
)
}
providerConfig.oidcConfig = oidcConfig providerConfig.oidcConfig = oidcConfig
} else if (providerType === 'saml') { } else if (providerType === 'saml') {
const { const {

View File

@@ -224,7 +224,7 @@ export async function POST(req: NextRequest) {
hasApiKey: !!executionParams.apiKey, hasApiKey: !!executionParams.apiKey,
}) })
const result = await executeTool(resolvedToolName, executionParams, true) const result = await executeTool(resolvedToolName, executionParams)
logger.info(`[${tracker.requestId}] Tool execution complete`, { logger.info(`[${tracker.requestId}] Tool execution complete`, {
toolName, toolName,

View File

@@ -1,10 +1,11 @@
import { db } from '@sim/db' import { db } from '@sim/db'
import { templateCreators, user } from '@sim/db/schema' import { templateCreators } from '@sim/db/schema'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm' import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
const logger = createLogger('CreatorVerificationAPI') const logger = createLogger('CreatorVerificationAPI')
@@ -23,9 +24,8 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
} }
// Check if user is a super user // Check if user is a super user
const currentUser = await db.select().from(user).where(eq(user.id, session.user.id)).limit(1) const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
if (!effectiveSuperUser) {
if (!currentUser[0]?.isSuperUser) {
logger.warn(`[${requestId}] Non-super user attempted to verify creator: ${id}`) logger.warn(`[${requestId}] Non-super user attempted to verify creator: ${id}`)
return NextResponse.json({ error: 'Only super users can verify creators' }, { status: 403 }) return NextResponse.json({ error: 'Only super users can verify creators' }, { status: 403 })
} }
@@ -76,9 +76,8 @@ export async function DELETE(
} }
// Check if user is a super user // Check if user is a super user
const currentUser = await db.select().from(user).where(eq(user.id, session.user.id)).limit(1) const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
if (!effectiveSuperUser) {
if (!currentUser[0]?.isSuperUser) {
logger.warn(`[${requestId}] Non-super user attempted to unverify creator: ${id}`) logger.warn(`[${requestId}] Non-super user attempted to unverify creator: ${id}`)
return NextResponse.json({ error: 'Only super users can unverify creators' }, { status: 403 }) return NextResponse.json({ error: 'Only super users can unverify creators' }, { status: 403 })
} }

View File

@@ -6,9 +6,10 @@ import { createLogger } from '@sim/logger'
import binaryExtensionsList from 'binary-extensions' import binaryExtensionsList from 'binary-extensions'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { createPinnedUrl, validateUrlWithDNS } from '@/lib/core/security/input-validation' import { secureFetchWithPinnedIP, validateUrlWithDNS } from '@/lib/core/security/input-validation'
import { isSupportedFileType, parseFile } from '@/lib/file-parsers' import { isSupportedFileType, parseFile } from '@/lib/file-parsers'
import { isUsingCloudStorage, type StorageContext, StorageService } from '@/lib/uploads' import { isUsingCloudStorage, type StorageContext, StorageService } from '@/lib/uploads'
import { uploadExecutionFile } from '@/lib/uploads/contexts/execution'
import { UPLOAD_DIR_SERVER } from '@/lib/uploads/core/setup.server' import { UPLOAD_DIR_SERVER } from '@/lib/uploads/core/setup.server'
import { getFileMetadataByKey } from '@/lib/uploads/server/metadata' import { getFileMetadataByKey } from '@/lib/uploads/server/metadata'
import { import {
@@ -21,6 +22,7 @@ import {
} from '@/lib/uploads/utils/file-utils' } from '@/lib/uploads/utils/file-utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils' import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import { verifyFileAccess } from '@/app/api/files/authorization' import { verifyFileAccess } from '@/app/api/files/authorization'
import type { UserFile } from '@/executor/types'
import '@/lib/uploads/core/setup.server' import '@/lib/uploads/core/setup.server'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -30,6 +32,12 @@ const logger = createLogger('FilesParseAPI')
const MAX_DOWNLOAD_SIZE_BYTES = 100 * 1024 * 1024 // 100 MB const MAX_DOWNLOAD_SIZE_BYTES = 100 * 1024 * 1024 // 100 MB
const DOWNLOAD_TIMEOUT_MS = 30000 // 30 seconds const DOWNLOAD_TIMEOUT_MS = 30000 // 30 seconds
interface ExecutionContext {
workspaceId: string
workflowId: string
executionId: string
}
interface ParseResult { interface ParseResult {
success: boolean success: boolean
content?: string content?: string
@@ -37,6 +45,7 @@ interface ParseResult {
filePath: string filePath: string
originalName?: string // Original filename from database (for workspace files) originalName?: string // Original filename from database (for workspace files)
viewerUrl?: string | null // Viewer URL for the file if available viewerUrl?: string | null // Viewer URL for the file if available
userFile?: UserFile // UserFile object for the raw file
metadata?: { metadata?: {
fileType: string fileType: string
size: number size: number
@@ -70,27 +79,45 @@ export async function POST(request: NextRequest) {
const userId = authResult.userId const userId = authResult.userId
const requestData = await request.json() const requestData = await request.json()
const { filePath, fileType, workspaceId } = requestData const { filePath, fileType, workspaceId, workflowId, executionId } = requestData
if (!filePath || (typeof filePath === 'string' && filePath.trim() === '')) { if (!filePath || (typeof filePath === 'string' && filePath.trim() === '')) {
return NextResponse.json({ success: false, error: 'No file path provided' }, { status: 400 }) return NextResponse.json({ success: false, error: 'No file path provided' }, { status: 400 })
} }
logger.info('File parse request received:', { filePath, fileType, workspaceId, userId }) // Build execution context if all required fields are present
const executionContext: ExecutionContext | undefined =
workspaceId && workflowId && executionId
? { workspaceId, workflowId, executionId }
: undefined
logger.info('File parse request received:', {
filePath,
fileType,
workspaceId,
userId,
hasExecutionContext: !!executionContext,
})
if (Array.isArray(filePath)) { if (Array.isArray(filePath)) {
const results = [] const results = []
for (const path of filePath) { for (const singlePath of filePath) {
if (!path || (typeof path === 'string' && path.trim() === '')) { if (!singlePath || (typeof singlePath === 'string' && singlePath.trim() === '')) {
results.push({ results.push({
success: false, success: false,
error: 'Empty file path in array', error: 'Empty file path in array',
filePath: path || '', filePath: singlePath || '',
}) })
continue continue
} }
const result = await parseFileSingle(path, fileType, workspaceId, userId) const result = await parseFileSingle(
singlePath,
fileType,
workspaceId,
userId,
executionContext
)
if (result.metadata) { if (result.metadata) {
result.metadata.processingTime = Date.now() - startTime result.metadata.processingTime = Date.now() - startTime
} }
@@ -106,6 +133,7 @@ export async function POST(request: NextRequest) {
fileType: result.metadata?.fileType || 'application/octet-stream', fileType: result.metadata?.fileType || 'application/octet-stream',
size: result.metadata?.size || 0, size: result.metadata?.size || 0,
binary: false, binary: false,
file: result.userFile,
}, },
filePath: result.filePath, filePath: result.filePath,
viewerUrl: result.viewerUrl, viewerUrl: result.viewerUrl,
@@ -121,7 +149,7 @@ export async function POST(request: NextRequest) {
}) })
} }
const result = await parseFileSingle(filePath, fileType, workspaceId, userId) const result = await parseFileSingle(filePath, fileType, workspaceId, userId, executionContext)
if (result.metadata) { if (result.metadata) {
result.metadata.processingTime = Date.now() - startTime result.metadata.processingTime = Date.now() - startTime
@@ -137,6 +165,7 @@ export async function POST(request: NextRequest) {
fileType: result.metadata?.fileType || 'application/octet-stream', fileType: result.metadata?.fileType || 'application/octet-stream',
size: result.metadata?.size || 0, size: result.metadata?.size || 0,
binary: false, binary: false,
file: result.userFile,
}, },
filePath: result.filePath, filePath: result.filePath,
viewerUrl: result.viewerUrl, viewerUrl: result.viewerUrl,
@@ -164,7 +193,8 @@ async function parseFileSingle(
filePath: string, filePath: string,
fileType: string, fileType: string,
workspaceId: string, workspaceId: string,
userId: string userId: string,
executionContext?: ExecutionContext
): Promise<ParseResult> { ): Promise<ParseResult> {
logger.info('Parsing file:', filePath) logger.info('Parsing file:', filePath)
@@ -186,18 +216,18 @@ async function parseFileSingle(
} }
if (filePath.includes('/api/files/serve/')) { if (filePath.includes('/api/files/serve/')) {
return handleCloudFile(filePath, fileType, undefined, userId) return handleCloudFile(filePath, fileType, undefined, userId, executionContext)
} }
if (filePath.startsWith('http://') || filePath.startsWith('https://')) { if (filePath.startsWith('http://') || filePath.startsWith('https://')) {
return handleExternalUrl(filePath, fileType, workspaceId, userId) return handleExternalUrl(filePath, fileType, workspaceId, userId, executionContext)
} }
if (isUsingCloudStorage()) { if (isUsingCloudStorage()) {
return handleCloudFile(filePath, fileType, undefined, userId) return handleCloudFile(filePath, fileType, undefined, userId, executionContext)
} }
return handleLocalFile(filePath, fileType, userId) return handleLocalFile(filePath, fileType, userId, executionContext)
} }
/** /**
@@ -230,12 +260,14 @@ function validateFilePath(filePath: string): { isValid: boolean; error?: string
/** /**
* Handle external URL * Handle external URL
* If workspaceId is provided, checks if file already exists and saves to workspace if not * If workspaceId is provided, checks if file already exists and saves to workspace if not
* If executionContext is provided, also stores the file in execution storage and returns UserFile
*/ */
async function handleExternalUrl( async function handleExternalUrl(
url: string, url: string,
fileType: string, fileType: string,
workspaceId: string, workspaceId: string,
userId: string userId: string,
executionContext?: ExecutionContext
): Promise<ParseResult> { ): Promise<ParseResult> {
try { try {
logger.info('Fetching external URL:', url) logger.info('Fetching external URL:', url)
@@ -312,17 +344,13 @@ async function handleExternalUrl(
if (existingFile) { if (existingFile) {
const storageFilePath = `/api/files/serve/${existingFile.key}` const storageFilePath = `/api/files/serve/${existingFile.key}`
return handleCloudFile(storageFilePath, fileType, 'workspace', userId) return handleCloudFile(storageFilePath, fileType, 'workspace', userId, executionContext)
} }
} }
} }
const pinnedUrl = createPinnedUrl(url, urlValidation.resolvedIP!) const response = await secureFetchWithPinnedIP(url, urlValidation.resolvedIP!, {
const response = await fetch(pinnedUrl, { timeout: DOWNLOAD_TIMEOUT_MS,
signal: AbortSignal.timeout(DOWNLOAD_TIMEOUT_MS),
headers: {
Host: urlValidation.originalHostname!,
},
}) })
if (!response.ok) { if (!response.ok) {
throw new Error(`Failed to fetch URL: ${response.status} ${response.statusText}`) throw new Error(`Failed to fetch URL: ${response.status} ${response.statusText}`)
@@ -341,6 +369,19 @@ async function handleExternalUrl(
logger.info(`Downloaded file from URL: ${url}, size: ${buffer.length} bytes`) logger.info(`Downloaded file from URL: ${url}, size: ${buffer.length} bytes`)
let userFile: UserFile | undefined
const mimeType = response.headers.get('content-type') || getMimeTypeFromExtension(extension)
if (executionContext) {
try {
userFile = await uploadExecutionFile(executionContext, buffer, filename, mimeType, userId)
logger.info(`Stored file in execution storage: ${filename}`, { key: userFile.key })
} catch (uploadError) {
logger.warn(`Failed to store file in execution storage:`, uploadError)
// Continue without userFile - parsing can still work
}
}
if (shouldCheckWorkspace) { if (shouldCheckWorkspace) {
try { try {
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId) const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
@@ -353,8 +394,6 @@ async function handleExternalUrl(
}) })
} else { } else {
const { uploadWorkspaceFile } = await import('@/lib/uploads/contexts/workspace') const { uploadWorkspaceFile } = await import('@/lib/uploads/contexts/workspace')
const mimeType =
response.headers.get('content-type') || getMimeTypeFromExtension(extension)
await uploadWorkspaceFile(workspaceId, userId, buffer, filename, mimeType) await uploadWorkspaceFile(workspaceId, userId, buffer, filename, mimeType)
logger.info(`Saved URL file to workspace storage: ${filename}`) logger.info(`Saved URL file to workspace storage: ${filename}`)
} }
@@ -363,17 +402,23 @@ async function handleExternalUrl(
} }
} }
let parseResult: ParseResult
if (extension === 'pdf') { if (extension === 'pdf') {
return await handlePdfBuffer(buffer, filename, fileType, url) parseResult = await handlePdfBuffer(buffer, filename, fileType, url)
} } else if (extension === 'csv') {
if (extension === 'csv') { parseResult = await handleCsvBuffer(buffer, filename, fileType, url)
return await handleCsvBuffer(buffer, filename, fileType, url) } else if (isSupportedFileType(extension)) {
} parseResult = await handleGenericTextBuffer(buffer, filename, extension, fileType, url)
if (isSupportedFileType(extension)) { } else {
return await handleGenericTextBuffer(buffer, filename, extension, fileType, url) parseResult = handleGenericBuffer(buffer, filename, extension, fileType)
} }
return handleGenericBuffer(buffer, filename, extension, fileType) // Attach userFile to the result
if (userFile) {
parseResult.userFile = userFile
}
return parseResult
} catch (error) { } catch (error) {
logger.error(`Error handling external URL ${url}:`, error) logger.error(`Error handling external URL ${url}:`, error)
return { return {
@@ -386,12 +431,15 @@ async function handleExternalUrl(
/** /**
* Handle file stored in cloud storage * Handle file stored in cloud storage
* If executionContext is provided and file is not already from execution storage,
* copies the file to execution storage and returns UserFile
*/ */
async function handleCloudFile( async function handleCloudFile(
filePath: string, filePath: string,
fileType: string, fileType: string,
explicitContext: string | undefined, explicitContext: string | undefined,
userId: string userId: string,
executionContext?: ExecutionContext
): Promise<ParseResult> { ): Promise<ParseResult> {
try { try {
const cloudKey = extractStorageKey(filePath) const cloudKey = extractStorageKey(filePath)
@@ -438,6 +486,7 @@ async function handleCloudFile(
const filename = originalFilename || cloudKey.split('/').pop() || cloudKey const filename = originalFilename || cloudKey.split('/').pop() || cloudKey
const extension = path.extname(filename).toLowerCase().substring(1) const extension = path.extname(filename).toLowerCase().substring(1)
const mimeType = getMimeTypeFromExtension(extension)
const normalizedFilePath = `/api/files/serve/${encodeURIComponent(cloudKey)}?context=${context}` const normalizedFilePath = `/api/files/serve/${encodeURIComponent(cloudKey)}?context=${context}`
let workspaceIdFromKey: string | undefined let workspaceIdFromKey: string | undefined
@@ -453,6 +502,39 @@ async function handleCloudFile(
const viewerUrl = getViewerUrl(cloudKey, workspaceIdFromKey) const viewerUrl = getViewerUrl(cloudKey, workspaceIdFromKey)
// Store file in execution storage if executionContext is provided
let userFile: UserFile | undefined
if (executionContext) {
// If file is already from execution context, create UserFile reference without re-uploading
if (context === 'execution') {
userFile = {
id: `file_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`,
name: filename,
url: normalizedFilePath,
size: fileBuffer.length,
type: mimeType,
key: cloudKey,
context: 'execution',
}
logger.info(`Created UserFile reference for existing execution file: ${filename}`)
} else {
// Copy from workspace/other storage to execution storage
try {
userFile = await uploadExecutionFile(
executionContext,
fileBuffer,
filename,
mimeType,
userId
)
logger.info(`Copied file to execution storage: ${filename}`, { key: userFile.key })
} catch (uploadError) {
logger.warn(`Failed to copy file to execution storage:`, uploadError)
}
}
}
let parseResult: ParseResult let parseResult: ParseResult
if (extension === 'pdf') { if (extension === 'pdf') {
parseResult = await handlePdfBuffer(fileBuffer, filename, fileType, normalizedFilePath) parseResult = await handlePdfBuffer(fileBuffer, filename, fileType, normalizedFilePath)
@@ -477,6 +559,11 @@ async function handleCloudFile(
parseResult.viewerUrl = viewerUrl parseResult.viewerUrl = viewerUrl
// Attach userFile to the result
if (userFile) {
parseResult.userFile = userFile
}
return parseResult return parseResult
} catch (error) { } catch (error) {
logger.error(`Error handling cloud file ${filePath}:`, error) logger.error(`Error handling cloud file ${filePath}:`, error)
@@ -500,7 +587,8 @@ async function handleCloudFile(
async function handleLocalFile( async function handleLocalFile(
filePath: string, filePath: string,
fileType: string, fileType: string,
userId: string userId: string,
executionContext?: ExecutionContext
): Promise<ParseResult> { ): Promise<ParseResult> {
try { try {
const filename = filePath.split('/').pop() || filePath const filename = filePath.split('/').pop() || filePath
@@ -540,13 +628,32 @@ async function handleLocalFile(
const hash = createHash('md5').update(fileBuffer).digest('hex') const hash = createHash('md5').update(fileBuffer).digest('hex')
const extension = path.extname(filename).toLowerCase().substring(1) const extension = path.extname(filename).toLowerCase().substring(1)
const mimeType = fileType || getMimeTypeFromExtension(extension)
// Store file in execution storage if executionContext is provided
let userFile: UserFile | undefined
if (executionContext) {
try {
userFile = await uploadExecutionFile(
executionContext,
fileBuffer,
filename,
mimeType,
userId
)
logger.info(`Stored local file in execution storage: ${filename}`, { key: userFile.key })
} catch (uploadError) {
logger.warn(`Failed to store local file in execution storage:`, uploadError)
}
}
return { return {
success: true, success: true,
content: result.content, content: result.content,
filePath, filePath,
userFile,
metadata: { metadata: {
fileType: fileType || getMimeTypeFromExtension(extension), fileType: mimeType,
size: stats.size, size: stats.size,
hash, hash,
processingTime: 0, processingTime: 0,

View File

@@ -2,7 +2,7 @@ import { randomUUID } from 'crypto'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod' import { z } from 'zod'
import { getSession } from '@/lib/auth' import { checkHybridAuth } from '@/lib/auth/hybrid'
import { SUPPORTED_FIELD_TYPES } from '@/lib/knowledge/constants' import { SUPPORTED_FIELD_TYPES } from '@/lib/knowledge/constants'
import { createTagDefinition, getTagDefinitions } from '@/lib/knowledge/tags/service' import { createTagDefinition, getTagDefinitions } from '@/lib/knowledge/tags/service'
import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils' import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils'
@@ -19,19 +19,32 @@ export async function GET(req: NextRequest, { params }: { params: Promise<{ id:
try { try {
logger.info(`[${requestId}] Getting tag definitions for knowledge base ${knowledgeBaseId}`) logger.info(`[${requestId}] Getting tag definitions for knowledge base ${knowledgeBaseId}`)
const session = await getSession() const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!session?.user?.id) { if (!auth.success) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
} }
const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, session.user.id) // Only allow session and internal JWT auth (not API key)
if (!accessCheck.hasAccess) { if (auth.authType === 'api_key') {
return NextResponse.json({ error: 'Forbidden' }, { status: 403 }) return NextResponse.json(
{ error: 'API key auth not supported for this endpoint' },
{ status: 401 }
)
}
// For session auth, verify KB access. Internal JWT is trusted.
if (auth.authType === 'session' && auth.userId) {
const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, auth.userId)
if (!accessCheck.hasAccess) {
return NextResponse.json({ error: 'Forbidden' }, { status: 403 })
}
} }
const tagDefinitions = await getTagDefinitions(knowledgeBaseId) const tagDefinitions = await getTagDefinitions(knowledgeBaseId)
logger.info(`[${requestId}] Retrieved ${tagDefinitions.length} tag definitions`) logger.info(
`[${requestId}] Retrieved ${tagDefinitions.length} tag definitions (${auth.authType})`
)
return NextResponse.json({ return NextResponse.json({
success: true, success: true,
@@ -51,14 +64,25 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
try { try {
logger.info(`[${requestId}] Creating tag definition for knowledge base ${knowledgeBaseId}`) logger.info(`[${requestId}] Creating tag definition for knowledge base ${knowledgeBaseId}`)
const session = await getSession() const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!session?.user?.id) { if (!auth.success) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
} }
const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, session.user.id) // Only allow session and internal JWT auth (not API key)
if (!accessCheck.hasAccess) { if (auth.authType === 'api_key') {
return NextResponse.json({ error: 'Forbidden' }, { status: 403 }) return NextResponse.json(
{ error: 'API key auth not supported for this endpoint' },
{ status: 401 }
)
}
// For session auth, verify KB access. Internal JWT is trusted.
if (auth.authType === 'session' && auth.userId) {
const accessCheck = await checkKnowledgeBaseAccess(knowledgeBaseId, auth.userId)
if (!accessCheck.hasAccess) {
return NextResponse.json({ error: 'Forbidden' }, { status: 403 })
}
} }
const body = await req.json() const body = await req.json()

View File

@@ -1,395 +0,0 @@
import { createLogger } from '@sim/logger'
import type { NextRequest } from 'next/server'
import { NextResponse } from 'next/server'
import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateInternalToken } from '@/lib/auth/internal'
import { isDev } from '@/lib/core/config/feature-flags'
import { createPinnedUrl, validateUrlWithDNS } from '@/lib/core/security/input-validation'
import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { executeTool } from '@/tools'
import { getTool, validateRequiredParametersAfterMerge } from '@/tools/utils'
const logger = createLogger('ProxyAPI')
const proxyPostSchema = z.object({
toolId: z.string().min(1, 'toolId is required'),
params: z.record(z.any()).optional().default({}),
executionContext: z
.object({
workflowId: z.string().optional(),
workspaceId: z.string().optional(),
executionId: z.string().optional(),
userId: z.string().optional(),
})
.optional(),
})
/**
* Creates a minimal set of default headers for proxy requests
* @returns Record of HTTP headers
*/
const getProxyHeaders = (): Record<string, string> => {
return {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
Accept: '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
}
}
/**
* Formats a response with CORS headers
* @param responseData Response data object
* @param status HTTP status code
* @returns NextResponse with CORS headers
*/
const formatResponse = (responseData: any, status = 200) => {
return NextResponse.json(responseData, {
status,
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization',
},
})
}
/**
* Creates an error response with consistent formatting
* @param error Error object or message
* @param status HTTP status code
* @param additionalData Additional data to include in the response
* @returns Formatted error response
*/
const createErrorResponse = (error: any, status = 500, additionalData = {}) => {
const errorMessage = error instanceof Error ? error.message : String(error)
const errorStack = error instanceof Error ? error.stack : undefined
logger.error('Creating error response', {
errorMessage,
status,
stack: isDev ? errorStack : undefined,
})
return formatResponse(
{
success: false,
error: errorMessage,
stack: isDev ? errorStack : undefined,
...additionalData,
},
status
)
}
/**
* GET handler for direct external URL proxying
* This allows for GET requests to external APIs
*/
export async function GET(request: Request) {
const url = new URL(request.url)
const targetUrl = url.searchParams.get('url')
const requestId = generateRequestId()
// Vault download proxy: /api/proxy?vaultDownload=1&bucket=...&object=...&credentialId=...
const vaultDownload = url.searchParams.get('vaultDownload')
if (vaultDownload === '1') {
try {
const bucket = url.searchParams.get('bucket')
const objectParam = url.searchParams.get('object')
const credentialId = url.searchParams.get('credentialId')
if (!bucket || !objectParam || !credentialId) {
return createErrorResponse('Missing bucket, object, or credentialId', 400)
}
// Fetch access token using existing token API
const baseUrl = new URL(getBaseUrl())
const tokenUrl = new URL('/api/auth/oauth/token', baseUrl)
// Build headers: forward session cookies if present; include internal auth for server-side
const tokenHeaders: Record<string, string> = { 'Content-Type': 'application/json' }
const incomingCookie = request.headers.get('cookie')
if (incomingCookie) tokenHeaders.Cookie = incomingCookie
try {
const internalToken = await generateInternalToken()
tokenHeaders.Authorization = `Bearer ${internalToken}`
} catch (_e) {
// best-effort internal auth
}
// Optional workflow context for collaboration auth
const workflowId = url.searchParams.get('workflowId') || undefined
const tokenRes = await fetch(tokenUrl.toString(), {
method: 'POST',
headers: tokenHeaders,
body: JSON.stringify({ credentialId, workflowId }),
})
if (!tokenRes.ok) {
const err = await tokenRes.text()
return createErrorResponse(`Failed to fetch access token: ${err}`, 401)
}
const tokenJson = await tokenRes.json()
const accessToken = tokenJson.accessToken
if (!accessToken) {
return createErrorResponse('No access token available', 401)
}
// Avoid double-encoding: incoming object may already be percent-encoded
const objectDecoded = decodeURIComponent(objectParam)
const gcsUrl = `https://storage.googleapis.com/storage/v1/b/${encodeURIComponent(
bucket
)}/o/${encodeURIComponent(objectDecoded)}?alt=media`
const fileRes = await fetch(gcsUrl, {
headers: { Authorization: `Bearer ${accessToken}` },
})
if (!fileRes.ok) {
const errText = await fileRes.text()
return createErrorResponse(errText || 'Failed to download file', fileRes.status)
}
const headers = new Headers()
fileRes.headers.forEach((v, k) => headers.set(k, v))
return new NextResponse(fileRes.body, { status: 200, headers })
} catch (error: any) {
logger.error(`[${requestId}] Vault download proxy failed`, {
error: error instanceof Error ? error.message : String(error),
})
return createErrorResponse('Vault download failed', 500)
}
}
if (!targetUrl) {
logger.error(`[${requestId}] Missing 'url' parameter`)
return createErrorResponse("Missing 'url' parameter", 400)
}
const urlValidation = await validateUrlWithDNS(targetUrl)
if (!urlValidation.isValid) {
logger.warn(`[${requestId}] Blocked proxy request`, {
url: targetUrl.substring(0, 100),
error: urlValidation.error,
})
return createErrorResponse(urlValidation.error || 'Invalid URL', 403)
}
const method = url.searchParams.get('method') || 'GET'
const bodyParam = url.searchParams.get('body')
let body: string | undefined
if (bodyParam && ['POST', 'PUT', 'PATCH'].includes(method.toUpperCase())) {
try {
body = decodeURIComponent(bodyParam)
} catch (error) {
logger.warn(`[${requestId}] Failed to decode body parameter`, error)
}
}
const customHeaders: Record<string, string> = {}
for (const [key, value] of url.searchParams.entries()) {
if (key.startsWith('header.')) {
const headerName = key.substring(7)
customHeaders[headerName] = value
}
}
if (body && !customHeaders['Content-Type']) {
customHeaders['Content-Type'] = 'application/json'
}
logger.info(`[${requestId}] Proxying ${method} request to: ${targetUrl}`)
try {
const pinnedUrl = createPinnedUrl(targetUrl, urlValidation.resolvedIP!)
const response = await fetch(pinnedUrl, {
method: method,
headers: {
...getProxyHeaders(),
...customHeaders,
Host: urlValidation.originalHostname!,
},
body: body || undefined,
})
const contentType = response.headers.get('content-type') || ''
let data
if (contentType.includes('application/json')) {
data = await response.json()
} else {
data = await response.text()
}
const errorMessage = !response.ok
? data && typeof data === 'object' && data.error
? `${data.error.message || JSON.stringify(data.error)}`
: response.statusText || `HTTP error ${response.status}`
: undefined
if (!response.ok) {
logger.error(`[${requestId}] External API error: ${response.status} ${response.statusText}`)
}
return formatResponse({
success: response.ok,
status: response.status,
statusText: response.statusText,
headers: Object.fromEntries(response.headers.entries()),
data,
error: errorMessage,
})
} catch (error: any) {
logger.error(`[${requestId}] Proxy GET request failed`, {
url: targetUrl,
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
})
return createErrorResponse(error)
}
}
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
const startTime = new Date()
const startTimeISO = startTime.toISOString()
try {
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success) {
logger.error(`[${requestId}] Authentication failed for proxy:`, authResult.error)
return createErrorResponse('Unauthorized', 401)
}
let requestBody
try {
requestBody = await request.json()
} catch (parseError) {
logger.error(`[${requestId}] Failed to parse request body`, {
error: parseError instanceof Error ? parseError.message : String(parseError),
})
throw new Error('Invalid JSON in request body')
}
const validationResult = proxyPostSchema.safeParse(requestBody)
if (!validationResult.success) {
logger.error(`[${requestId}] Request validation failed`, {
errors: validationResult.error.errors,
})
const errorMessages = validationResult.error.errors
.map((err) => `${err.path.join('.')}: ${err.message}`)
.join(', ')
throw new Error(`Validation failed: ${errorMessages}`)
}
const { toolId, params } = validationResult.data
logger.info(`[${requestId}] Processing tool: ${toolId}`)
const tool = getTool(toolId)
if (!tool) {
logger.error(`[${requestId}] Tool not found: ${toolId}`)
throw new Error(`Tool not found: ${toolId}`)
}
try {
validateRequiredParametersAfterMerge(toolId, tool, params)
} catch (validationError) {
logger.warn(`[${requestId}] Tool validation failed for ${toolId}`, {
error: validationError instanceof Error ? validationError.message : String(validationError),
})
const endTime = new Date()
const endTimeISO = endTime.toISOString()
const duration = endTime.getTime() - startTime.getTime()
return createErrorResponse(validationError, 400, {
startTime: startTimeISO,
endTime: endTimeISO,
duration,
})
}
const hasFileOutputs =
tool.outputs &&
Object.values(tool.outputs).some(
(output) => output.type === 'file' || output.type === 'file[]'
)
const result = await executeTool(
toolId,
params,
true, // skipProxy (we're already in the proxy)
!hasFileOutputs, // skipPostProcess (don't skip if tool has file outputs)
undefined // execution context is not available in proxy context
)
if (!result.success) {
logger.warn(`[${requestId}] Tool execution failed for ${toolId}`, {
error: result.error || 'Unknown error',
})
throw new Error(result.error || 'Tool execution failed')
}
const endTime = new Date()
const endTimeISO = endTime.toISOString()
const duration = endTime.getTime() - startTime.getTime()
const responseWithTimingData = {
...result,
startTime: startTimeISO,
endTime: endTimeISO,
duration,
timing: {
startTime: startTimeISO,
endTime: endTimeISO,
duration,
},
}
logger.info(`[${requestId}] Tool executed successfully: ${toolId} (${duration}ms)`)
return formatResponse(responseWithTimingData)
} catch (error: any) {
logger.error(`[${requestId}] Proxy request failed`, {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
name: error instanceof Error ? error.name : undefined,
})
const endTime = new Date()
const endTimeISO = endTime.toISOString()
const duration = endTime.getTime() - startTime.getTime()
return createErrorResponse(error, 500, {
startTime: startTimeISO,
endTime: endTimeISO,
duration,
})
}
}
export async function OPTIONS() {
return new NextResponse(null, {
status: 204,
headers: {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, PUT, DELETE, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, Authorization',
'Access-Control-Max-Age': '86400',
},
})
}

View File

@@ -0,0 +1,193 @@
import { db } from '@sim/db'
import { copilotChats, workflow, workspace } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
import { parseWorkflowJson } from '@/lib/workflows/operations/import-export'
import {
loadWorkflowFromNormalizedTables,
saveWorkflowToNormalizedTables,
} from '@/lib/workflows/persistence/utils'
import { sanitizeForExport } from '@/lib/workflows/sanitization/json-sanitizer'
const logger = createLogger('SuperUserImportWorkflow')
interface ImportWorkflowRequest {
workflowId: string
targetWorkspaceId: string
}
/**
* POST /api/superuser/import-workflow
*
* Superuser endpoint to import a workflow by ID along with its copilot chats.
* This creates a copy of the workflow in the target workspace with new IDs.
* Only the workflow structure and copilot chats are copied - no deployments,
* webhooks, triggers, or other sensitive data.
*
* Requires both isSuperUser flag AND superUserModeEnabled setting.
*/
export async function POST(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const { effectiveSuperUser, isSuperUser, superUserModeEnabled } =
await verifyEffectiveSuperUser(session.user.id)
if (!effectiveSuperUser) {
logger.warn('Non-effective-superuser attempted to access import-workflow endpoint', {
userId: session.user.id,
isSuperUser,
superUserModeEnabled,
})
return NextResponse.json({ error: 'Forbidden: Superuser access required' }, { status: 403 })
}
const body: ImportWorkflowRequest = await request.json()
const { workflowId, targetWorkspaceId } = body
if (!workflowId) {
return NextResponse.json({ error: 'workflowId is required' }, { status: 400 })
}
if (!targetWorkspaceId) {
return NextResponse.json({ error: 'targetWorkspaceId is required' }, { status: 400 })
}
// Verify target workspace exists
const [targetWorkspace] = await db
.select({ id: workspace.id, ownerId: workspace.ownerId })
.from(workspace)
.where(eq(workspace.id, targetWorkspaceId))
.limit(1)
if (!targetWorkspace) {
return NextResponse.json({ error: 'Target workspace not found' }, { status: 404 })
}
// Get the source workflow
const [sourceWorkflow] = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
if (!sourceWorkflow) {
return NextResponse.json({ error: 'Source workflow not found' }, { status: 404 })
}
// Load the workflow state from normalized tables
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
if (!normalizedData) {
return NextResponse.json(
{ error: 'Workflow has no normalized data - cannot import' },
{ status: 400 }
)
}
// Use existing export logic to create export format
const workflowState = {
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
metadata: {
name: sourceWorkflow.name,
description: sourceWorkflow.description ?? undefined,
color: sourceWorkflow.color,
},
}
const exportData = sanitizeForExport(workflowState)
// Use existing import logic (parseWorkflowJson regenerates IDs automatically)
const { data: importedData, errors } = parseWorkflowJson(JSON.stringify(exportData))
if (!importedData || errors.length > 0) {
return NextResponse.json(
{ error: `Failed to parse workflow: ${errors.join(', ')}` },
{ status: 400 }
)
}
// Create new workflow record
const newWorkflowId = crypto.randomUUID()
const now = new Date()
await db.insert(workflow).values({
id: newWorkflowId,
userId: session.user.id,
workspaceId: targetWorkspaceId,
folderId: null, // Don't copy folder association
name: `[Debug Import] ${sourceWorkflow.name}`,
description: sourceWorkflow.description,
color: sourceWorkflow.color,
lastSynced: now,
createdAt: now,
updatedAt: now,
isDeployed: false, // Never copy deployment status
runCount: 0,
variables: sourceWorkflow.variables || {},
})
// Save using existing persistence logic
const saveResult = await saveWorkflowToNormalizedTables(newWorkflowId, importedData)
if (!saveResult.success) {
// Clean up the workflow record if save failed
await db.delete(workflow).where(eq(workflow.id, newWorkflowId))
return NextResponse.json(
{ error: `Failed to save workflow state: ${saveResult.error}` },
{ status: 500 }
)
}
// Copy copilot chats associated with the source workflow
const sourceCopilotChats = await db
.select()
.from(copilotChats)
.where(eq(copilotChats.workflowId, workflowId))
let copilotChatsImported = 0
for (const chat of sourceCopilotChats) {
await db.insert(copilotChats).values({
userId: session.user.id,
workflowId: newWorkflowId,
title: chat.title ? `[Import] ${chat.title}` : null,
messages: chat.messages,
model: chat.model,
conversationId: null, // Don't copy conversation ID
previewYaml: chat.previewYaml,
planArtifact: chat.planArtifact,
config: chat.config,
createdAt: new Date(),
updatedAt: new Date(),
})
copilotChatsImported++
}
logger.info('Superuser imported workflow', {
userId: session.user.id,
sourceWorkflowId: workflowId,
newWorkflowId,
targetWorkspaceId,
copilotChatsImported,
})
return NextResponse.json({
success: true,
newWorkflowId,
copilotChatsImported,
})
} catch (error) {
logger.error('Error importing workflow', error)
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -5,7 +5,7 @@ import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { verifySuperUser } from '@/lib/templates/permissions' import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
const logger = createLogger('TemplateApprovalAPI') const logger = createLogger('TemplateApprovalAPI')
@@ -25,8 +25,8 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
const { isSuperUser } = await verifySuperUser(session.user.id) const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
if (!isSuperUser) { if (!effectiveSuperUser) {
logger.warn(`[${requestId}] Non-super user attempted to approve template: ${id}`) logger.warn(`[${requestId}] Non-super user attempted to approve template: ${id}`)
return NextResponse.json({ error: 'Only super users can approve templates' }, { status: 403 }) return NextResponse.json({ error: 'Only super users can approve templates' }, { status: 403 })
} }
@@ -71,8 +71,8 @@ export async function DELETE(
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
const { isSuperUser } = await verifySuperUser(session.user.id) const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
if (!isSuperUser) { if (!effectiveSuperUser) {
logger.warn(`[${requestId}] Non-super user attempted to reject template: ${id}`) logger.warn(`[${requestId}] Non-super user attempted to reject template: ${id}`)
return NextResponse.json({ error: 'Only super users can reject templates' }, { status: 403 }) return NextResponse.json({ error: 'Only super users can reject templates' }, { status: 403 })
} }

View File

@@ -5,7 +5,7 @@ import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server' import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { verifySuperUser } from '@/lib/templates/permissions' import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
const logger = createLogger('TemplateRejectionAPI') const logger = createLogger('TemplateRejectionAPI')
@@ -25,8 +25,8 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
} }
const { isSuperUser } = await verifySuperUser(session.user.id) const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
if (!isSuperUser) { if (!effectiveSuperUser) {
logger.warn(`[${requestId}] Non-super user attempted to reject template: ${id}`) logger.warn(`[${requestId}] Non-super user attempted to reject template: ${id}`)
return NextResponse.json({ error: 'Only super users can reject templates' }, { status: 403 }) return NextResponse.json({ error: 'Only super users can reject templates' }, { status: 403 })
} }

View File

@@ -3,7 +3,6 @@ import {
templateCreators, templateCreators,
templateStars, templateStars,
templates, templates,
user,
workflow, workflow,
workflowDeploymentVersion, workflowDeploymentVersion,
} from '@sim/db/schema' } from '@sim/db/schema'
@@ -14,6 +13,7 @@ import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod' import { z } from 'zod'
import { getSession } from '@/lib/auth' import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { verifyEffectiveSuperUser } from '@/lib/templates/permissions'
import { import {
extractRequiredCredentials, extractRequiredCredentials,
sanitizeCredentials, sanitizeCredentials,
@@ -70,8 +70,8 @@ export async function GET(request: NextRequest) {
logger.debug(`[${requestId}] Fetching templates with params:`, params) logger.debug(`[${requestId}] Fetching templates with params:`, params)
// Check if user is a super user // Check if user is a super user
const currentUser = await db.select().from(user).where(eq(user.id, session.user.id)).limit(1) const { effectiveSuperUser } = await verifyEffectiveSuperUser(session.user.id)
const isSuperUser = currentUser[0]?.isSuperUser || false const isSuperUser = effectiveSuperUser
// Build query conditions // Build query conditions
const conditions = [] const conditions = []

View File

@@ -5,7 +5,11 @@ import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { StorageService } from '@/lib/uploads' import { StorageService } from '@/lib/uploads'
import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils' import {
extractStorageKey,
inferContextFromKey,
isInternalFileUrl,
} from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization' import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -47,13 +51,13 @@ export async function POST(request: NextRequest) {
logger.info(`[${requestId}] Mistral parse request`, { logger.info(`[${requestId}] Mistral parse request`, {
filePath: validatedData.filePath, filePath: validatedData.filePath,
isWorkspaceFile: validatedData.filePath.includes('/api/files/serve/'), isWorkspaceFile: isInternalFileUrl(validatedData.filePath),
userId, userId,
}) })
let fileUrl = validatedData.filePath let fileUrl = validatedData.filePath
if (validatedData.filePath?.includes('/api/files/serve/')) { if (isInternalFileUrl(validatedData.filePath)) {
try { try {
const storageKey = extractStorageKey(validatedData.filePath) const storageKey = extractStorageKey(validatedData.filePath)

View File

@@ -5,7 +5,11 @@ import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { StorageService } from '@/lib/uploads' import { StorageService } from '@/lib/uploads'
import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils' import {
extractStorageKey,
inferContextFromKey,
isInternalFileUrl,
} from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization' import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -48,13 +52,13 @@ export async function POST(request: NextRequest) {
logger.info(`[${requestId}] Pulse parse request`, { logger.info(`[${requestId}] Pulse parse request`, {
filePath: validatedData.filePath, filePath: validatedData.filePath,
isWorkspaceFile: validatedData.filePath.includes('/api/files/serve/'), isWorkspaceFile: isInternalFileUrl(validatedData.filePath),
userId, userId,
}) })
let fileUrl = validatedData.filePath let fileUrl = validatedData.filePath
if (validatedData.filePath?.includes('/api/files/serve/')) { if (isInternalFileUrl(validatedData.filePath)) {
try { try {
const storageKey = extractStorageKey(validatedData.filePath) const storageKey = extractStorageKey(validatedData.filePath)
const context = inferContextFromKey(storageKey) const context = inferContextFromKey(storageKey)

View File

@@ -5,7 +5,11 @@ import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request' import { generateRequestId } from '@/lib/core/utils/request'
import { getBaseUrl } from '@/lib/core/utils/urls' import { getBaseUrl } from '@/lib/core/utils/urls'
import { StorageService } from '@/lib/uploads' import { StorageService } from '@/lib/uploads'
import { extractStorageKey, inferContextFromKey } from '@/lib/uploads/utils/file-utils' import {
extractStorageKey,
inferContextFromKey,
isInternalFileUrl,
} from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization' import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic' export const dynamic = 'force-dynamic'
@@ -44,13 +48,13 @@ export async function POST(request: NextRequest) {
logger.info(`[${requestId}] Reducto parse request`, { logger.info(`[${requestId}] Reducto parse request`, {
filePath: validatedData.filePath, filePath: validatedData.filePath,
isWorkspaceFile: validatedData.filePath.includes('/api/files/serve/'), isWorkspaceFile: isInternalFileUrl(validatedData.filePath),
userId, userId,
}) })
let fileUrl = validatedData.filePath let fileUrl = validatedData.filePath
if (validatedData.filePath?.includes('/api/files/serve/')) { if (isInternalFileUrl(validatedData.filePath)) {
try { try {
const storageKey = extractStorageKey(validatedData.filePath) const storageKey = extractStorageKey(validatedData.filePath)
const context = inferContextFromKey(storageKey) const context = inferContextFromKey(storageKey)

View File

@@ -79,11 +79,13 @@ export async function POST(request: NextRequest) {
// Generate public URL for destination (properly encode the destination key) // Generate public URL for destination (properly encode the destination key)
const encodedDestKey = validatedData.destinationKey.split('/').map(encodeURIComponent).join('/') const encodedDestKey = validatedData.destinationKey.split('/').map(encodeURIComponent).join('/')
const url = `https://${validatedData.destinationBucket}.s3.${validatedData.region}.amazonaws.com/${encodedDestKey}` const url = `https://${validatedData.destinationBucket}.s3.${validatedData.region}.amazonaws.com/${encodedDestKey}`
const uri = `s3://${validatedData.destinationBucket}/${validatedData.destinationKey}`
return NextResponse.json({ return NextResponse.json({
success: true, success: true,
output: { output: {
url, url,
uri,
copySourceVersionId: result.CopySourceVersionId, copySourceVersionId: result.CopySourceVersionId,
versionId: result.VersionId, versionId: result.VersionId,
etag: result.CopyObjectResult?.ETag, etag: result.CopyObjectResult?.ETag,

View File

@@ -117,11 +117,13 @@ export async function POST(request: NextRequest) {
const encodedKey = validatedData.objectKey.split('/').map(encodeURIComponent).join('/') const encodedKey = validatedData.objectKey.split('/').map(encodeURIComponent).join('/')
const url = `https://${validatedData.bucketName}.s3.${validatedData.region}.amazonaws.com/${encodedKey}` const url = `https://${validatedData.bucketName}.s3.${validatedData.region}.amazonaws.com/${encodedKey}`
const uri = `s3://${validatedData.bucketName}/${validatedData.objectKey}`
return NextResponse.json({ return NextResponse.json({
success: true, success: true,
output: { output: {
url, url,
uri,
etag: result.ETag, etag: result.ETag,
location: url, location: url,
key: validatedData.objectKey, key: validatedData.objectKey,

View File

@@ -0,0 +1,637 @@
import crypto from 'crypto'
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import {
validateAwsRegion,
validateExternalUrl,
validateS3BucketName,
} from '@/lib/core/security/input-validation'
import { generateRequestId } from '@/lib/core/utils/request'
import { StorageService } from '@/lib/uploads'
import {
extractStorageKey,
inferContextFromKey,
isInternalFileUrl,
} from '@/lib/uploads/utils/file-utils'
import { verifyFileAccess } from '@/app/api/files/authorization'
export const dynamic = 'force-dynamic'
export const maxDuration = 300 // 5 minutes for large multi-page PDF processing
const logger = createLogger('TextractParseAPI')
const QuerySchema = z.object({
Text: z.string().min(1),
Alias: z.string().optional(),
Pages: z.array(z.string()).optional(),
})
const TextractParseSchema = z
.object({
accessKeyId: z.string().min(1, 'AWS Access Key ID is required'),
secretAccessKey: z.string().min(1, 'AWS Secret Access Key is required'),
region: z.string().min(1, 'AWS region is required'),
processingMode: z.enum(['sync', 'async']).optional().default('sync'),
filePath: z.string().optional(),
s3Uri: z.string().optional(),
featureTypes: z
.array(z.enum(['TABLES', 'FORMS', 'QUERIES', 'SIGNATURES', 'LAYOUT']))
.optional(),
queries: z.array(QuerySchema).optional(),
})
.superRefine((data, ctx) => {
const regionValidation = validateAwsRegion(data.region, 'AWS region')
if (!regionValidation.isValid) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: regionValidation.error,
path: ['region'],
})
}
})
function getSignatureKey(
key: string,
dateStamp: string,
regionName: string,
serviceName: string
): Buffer {
const kDate = crypto.createHmac('sha256', `AWS4${key}`).update(dateStamp).digest()
const kRegion = crypto.createHmac('sha256', kDate).update(regionName).digest()
const kService = crypto.createHmac('sha256', kRegion).update(serviceName).digest()
const kSigning = crypto.createHmac('sha256', kService).update('aws4_request').digest()
return kSigning
}
function signAwsRequest(
method: string,
host: string,
uri: string,
body: string,
accessKeyId: string,
secretAccessKey: string,
region: string,
service: string,
amzTarget: string
): Record<string, string> {
const date = new Date()
const amzDate = date.toISOString().replace(/[:-]|\.\d{3}/g, '')
const dateStamp = amzDate.slice(0, 8)
const payloadHash = crypto.createHash('sha256').update(body).digest('hex')
const canonicalHeaders =
`content-type:application/x-amz-json-1.1\n` +
`host:${host}\n` +
`x-amz-date:${amzDate}\n` +
`x-amz-target:${amzTarget}\n`
const signedHeaders = 'content-type;host;x-amz-date;x-amz-target'
const canonicalRequest = `${method}\n${uri}\n\n${canonicalHeaders}\n${signedHeaders}\n${payloadHash}`
const algorithm = 'AWS4-HMAC-SHA256'
const credentialScope = `${dateStamp}/${region}/${service}/aws4_request`
const stringToSign = `${algorithm}\n${amzDate}\n${credentialScope}\n${crypto.createHash('sha256').update(canonicalRequest).digest('hex')}`
const signingKey = getSignatureKey(secretAccessKey, dateStamp, region, service)
const signature = crypto.createHmac('sha256', signingKey).update(stringToSign).digest('hex')
const authorizationHeader = `${algorithm} Credential=${accessKeyId}/${credentialScope}, SignedHeaders=${signedHeaders}, Signature=${signature}`
return {
'Content-Type': 'application/x-amz-json-1.1',
Host: host,
'X-Amz-Date': amzDate,
'X-Amz-Target': amzTarget,
Authorization: authorizationHeader,
}
}
async function fetchDocumentBytes(url: string): Promise<{ bytes: string; contentType: string }> {
const response = await fetch(url)
if (!response.ok) {
throw new Error(`Failed to fetch document: ${response.statusText}`)
}
const arrayBuffer = await response.arrayBuffer()
const bytes = Buffer.from(arrayBuffer).toString('base64')
const contentType = response.headers.get('content-type') || 'application/octet-stream'
return { bytes, contentType }
}
function parseS3Uri(s3Uri: string): { bucket: string; key: string } {
const match = s3Uri.match(/^s3:\/\/([^/]+)\/(.+)$/)
if (!match) {
throw new Error(
`Invalid S3 URI format: ${s3Uri}. Expected format: s3://bucket-name/path/to/object`
)
}
const bucket = match[1]
const key = match[2]
const bucketValidation = validateS3BucketName(bucket, 'S3 bucket name')
if (!bucketValidation.isValid) {
throw new Error(bucketValidation.error)
}
if (key.includes('..') || key.startsWith('/')) {
throw new Error('S3 key contains invalid path traversal sequences')
}
return { bucket, key }
}
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms))
}
async function callTextractAsync(
host: string,
amzTarget: string,
body: Record<string, unknown>,
accessKeyId: string,
secretAccessKey: string,
region: string
): Promise<Record<string, unknown>> {
const bodyString = JSON.stringify(body)
const headers = signAwsRequest(
'POST',
host,
'/',
bodyString,
accessKeyId,
secretAccessKey,
region,
'textract',
amzTarget
)
const response = await fetch(`https://${host}/`, {
method: 'POST',
headers,
body: bodyString,
})
if (!response.ok) {
const errorText = await response.text()
let errorMessage = `Textract API error: ${response.statusText}`
try {
const errorJson = JSON.parse(errorText)
if (errorJson.Message) {
errorMessage = errorJson.Message
} else if (errorJson.__type) {
errorMessage = `${errorJson.__type}: ${errorJson.message || errorText}`
}
} catch {
// Use default error message
}
throw new Error(errorMessage)
}
return response.json()
}
async function pollForJobCompletion(
host: string,
jobId: string,
accessKeyId: string,
secretAccessKey: string,
region: string,
useAnalyzeDocument: boolean,
requestId: string
): Promise<Record<string, unknown>> {
const pollIntervalMs = 5000 // 5 seconds between polls
const maxPollTimeMs = 180000 // 3 minutes maximum polling time
const maxAttempts = Math.ceil(maxPollTimeMs / pollIntervalMs)
const getTarget = useAnalyzeDocument
? 'Textract.GetDocumentAnalysis'
: 'Textract.GetDocumentTextDetection'
for (let attempt = 0; attempt < maxAttempts; attempt++) {
const result = await callTextractAsync(
host,
getTarget,
{ JobId: jobId },
accessKeyId,
secretAccessKey,
region
)
const jobStatus = result.JobStatus as string
if (jobStatus === 'SUCCEEDED') {
logger.info(`[${requestId}] Async job completed successfully after ${attempt + 1} polls`)
let allBlocks = (result.Blocks as unknown[]) || []
let nextToken = result.NextToken as string | undefined
while (nextToken) {
const nextResult = await callTextractAsync(
host,
getTarget,
{ JobId: jobId, NextToken: nextToken },
accessKeyId,
secretAccessKey,
region
)
allBlocks = allBlocks.concat((nextResult.Blocks as unknown[]) || [])
nextToken = nextResult.NextToken as string | undefined
}
return {
...result,
Blocks: allBlocks,
}
}
if (jobStatus === 'FAILED') {
throw new Error(`Textract job failed: ${result.StatusMessage || 'Unknown error'}`)
}
if (jobStatus === 'PARTIAL_SUCCESS') {
logger.warn(`[${requestId}] Job completed with partial success: ${result.StatusMessage}`)
let allBlocks = (result.Blocks as unknown[]) || []
let nextToken = result.NextToken as string | undefined
while (nextToken) {
const nextResult = await callTextractAsync(
host,
getTarget,
{ JobId: jobId, NextToken: nextToken },
accessKeyId,
secretAccessKey,
region
)
allBlocks = allBlocks.concat((nextResult.Blocks as unknown[]) || [])
nextToken = nextResult.NextToken as string | undefined
}
return {
...result,
Blocks: allBlocks,
}
}
logger.info(`[${requestId}] Job status: ${jobStatus}, attempt ${attempt + 1}/${maxAttempts}`)
await sleep(pollIntervalMs)
}
throw new Error(
`Timeout waiting for Textract job to complete (max ${maxPollTimeMs / 1000} seconds)`
)
}
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const authResult = await checkHybridAuth(request, { requireWorkflowId: false })
if (!authResult.success || !authResult.userId) {
logger.warn(`[${requestId}] Unauthorized Textract parse attempt`, {
error: authResult.error || 'Missing userId',
})
return NextResponse.json(
{
success: false,
error: authResult.error || 'Unauthorized',
},
{ status: 401 }
)
}
const userId = authResult.userId
const body = await request.json()
const validatedData = TextractParseSchema.parse(body)
const processingMode = validatedData.processingMode || 'sync'
const featureTypes = validatedData.featureTypes ?? []
const useAnalyzeDocument = featureTypes.length > 0
const host = `textract.${validatedData.region}.amazonaws.com`
logger.info(`[${requestId}] Textract parse request`, {
processingMode,
filePath: validatedData.filePath?.substring(0, 50),
s3Uri: validatedData.s3Uri?.substring(0, 50),
featureTypes,
userId,
})
if (processingMode === 'async') {
if (!validatedData.s3Uri) {
return NextResponse.json(
{
success: false,
error: 'S3 URI is required for multi-page processing (s3://bucket/key)',
},
{ status: 400 }
)
}
const { bucket: s3Bucket, key: s3Key } = parseS3Uri(validatedData.s3Uri)
logger.info(`[${requestId}] Starting async Textract job`, { s3Bucket, s3Key })
const startTarget = useAnalyzeDocument
? 'Textract.StartDocumentAnalysis'
: 'Textract.StartDocumentTextDetection'
const startBody: Record<string, unknown> = {
DocumentLocation: {
S3Object: {
Bucket: s3Bucket,
Name: s3Key,
},
},
}
if (useAnalyzeDocument) {
startBody.FeatureTypes = featureTypes
if (
validatedData.queries &&
validatedData.queries.length > 0 &&
featureTypes.includes('QUERIES')
) {
startBody.QueriesConfig = {
Queries: validatedData.queries.map((q) => ({
Text: q.Text,
Alias: q.Alias,
Pages: q.Pages,
})),
}
}
}
const startResult = await callTextractAsync(
host,
startTarget,
startBody,
validatedData.accessKeyId,
validatedData.secretAccessKey,
validatedData.region
)
const jobId = startResult.JobId as string
if (!jobId) {
throw new Error('Failed to start Textract job: No JobId returned')
}
logger.info(`[${requestId}] Async job started`, { jobId })
const textractData = await pollForJobCompletion(
host,
jobId,
validatedData.accessKeyId,
validatedData.secretAccessKey,
validatedData.region,
useAnalyzeDocument,
requestId
)
logger.info(`[${requestId}] Textract async parse successful`, {
pageCount: (textractData.DocumentMetadata as { Pages?: number })?.Pages ?? 0,
blockCount: (textractData.Blocks as unknown[])?.length ?? 0,
})
return NextResponse.json({
success: true,
output: {
blocks: textractData.Blocks ?? [],
documentMetadata: {
pages: (textractData.DocumentMetadata as { Pages?: number })?.Pages ?? 0,
},
modelVersion: (textractData.AnalyzeDocumentModelVersion ??
textractData.DetectDocumentTextModelVersion) as string | undefined,
},
})
}
if (!validatedData.filePath) {
return NextResponse.json(
{
success: false,
error: 'File path is required for single-page processing',
},
{ status: 400 }
)
}
let fileUrl = validatedData.filePath
const isInternalFilePath = validatedData.filePath && isInternalFileUrl(validatedData.filePath)
if (isInternalFilePath) {
try {
const storageKey = extractStorageKey(validatedData.filePath)
const context = inferContextFromKey(storageKey)
const hasAccess = await verifyFileAccess(storageKey, userId, undefined, context, false)
if (!hasAccess) {
logger.warn(`[${requestId}] Unauthorized presigned URL generation attempt`, {
userId,
key: storageKey,
context,
})
return NextResponse.json(
{
success: false,
error: 'File not found',
},
{ status: 404 }
)
}
fileUrl = await StorageService.generatePresignedDownloadUrl(storageKey, context, 5 * 60)
logger.info(`[${requestId}] Generated presigned URL for ${context} file`)
} catch (error) {
logger.error(`[${requestId}] Failed to generate presigned URL:`, error)
return NextResponse.json(
{
success: false,
error: 'Failed to generate file access URL',
},
{ status: 500 }
)
}
} else if (validatedData.filePath?.startsWith('/')) {
// Reject arbitrary absolute paths that don't contain /api/files/serve/
logger.warn(`[${requestId}] Invalid internal path`, {
userId,
path: validatedData.filePath.substring(0, 50),
})
return NextResponse.json(
{
success: false,
error: 'Invalid file path. Only uploaded files are supported for internal paths.',
},
{ status: 400 }
)
} else {
const urlValidation = validateExternalUrl(fileUrl, 'Document URL')
if (!urlValidation.isValid) {
logger.warn(`[${requestId}] SSRF attempt blocked`, {
userId,
url: fileUrl.substring(0, 100),
error: urlValidation.error,
})
return NextResponse.json(
{
success: false,
error: urlValidation.error,
},
{ status: 400 }
)
}
}
const { bytes, contentType } = await fetchDocumentBytes(fileUrl)
// Track if this is a PDF for better error messaging
const isPdf = contentType.includes('pdf') || fileUrl.toLowerCase().endsWith('.pdf')
const uri = '/'
let textractBody: Record<string, unknown>
let amzTarget: string
if (useAnalyzeDocument) {
amzTarget = 'Textract.AnalyzeDocument'
textractBody = {
Document: {
Bytes: bytes,
},
FeatureTypes: featureTypes,
}
if (
validatedData.queries &&
validatedData.queries.length > 0 &&
featureTypes.includes('QUERIES')
) {
textractBody.QueriesConfig = {
Queries: validatedData.queries.map((q) => ({
Text: q.Text,
Alias: q.Alias,
Pages: q.Pages,
})),
}
}
} else {
amzTarget = 'Textract.DetectDocumentText'
textractBody = {
Document: {
Bytes: bytes,
},
}
}
const bodyString = JSON.stringify(textractBody)
const headers = signAwsRequest(
'POST',
host,
uri,
bodyString,
validatedData.accessKeyId,
validatedData.secretAccessKey,
validatedData.region,
'textract',
amzTarget
)
const textractResponse = await fetch(`https://${host}${uri}`, {
method: 'POST',
headers,
body: bodyString,
})
if (!textractResponse.ok) {
const errorText = await textractResponse.text()
logger.error(`[${requestId}] Textract API error:`, errorText)
let errorMessage = `Textract API error: ${textractResponse.statusText}`
let isUnsupportedFormat = false
try {
const errorJson = JSON.parse(errorText)
if (errorJson.Message) {
errorMessage = errorJson.Message
} else if (errorJson.__type) {
errorMessage = `${errorJson.__type}: ${errorJson.message || errorText}`
}
// Check for unsupported document format error
isUnsupportedFormat =
errorJson.__type === 'UnsupportedDocumentException' ||
errorJson.Message?.toLowerCase().includes('unsupported document') ||
errorText.toLowerCase().includes('unsupported document')
} catch {
isUnsupportedFormat = errorText.toLowerCase().includes('unsupported document')
}
// Provide helpful message for unsupported format (likely multi-page PDF)
if (isUnsupportedFormat && isPdf) {
errorMessage =
'This document format is not supported in Single Page mode. If this is a multi-page PDF, please use "Multi-Page (PDF, TIFF via S3)" mode instead, which requires uploading your document to S3 first. Single Page mode only supports JPEG, PNG, and single-page PDF files.'
}
return NextResponse.json(
{
success: false,
error: errorMessage,
},
{ status: textractResponse.status }
)
}
const textractData = await textractResponse.json()
logger.info(`[${requestId}] Textract parse successful`, {
pageCount: textractData.DocumentMetadata?.Pages ?? 0,
blockCount: textractData.Blocks?.length ?? 0,
})
return NextResponse.json({
success: true,
output: {
blocks: textractData.Blocks ?? [],
documentMetadata: {
pages: textractData.DocumentMetadata?.Pages ?? 0,
},
modelVersion:
textractData.AnalyzeDocumentModelVersion ??
textractData.DetectDocumentTextModelVersion ??
undefined,
},
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
return NextResponse.json(
{
success: false,
error: 'Invalid request data',
details: error.errors,
},
{ status: 400 }
)
}
logger.error(`[${requestId}] Error in Textract parse:`, error)
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -550,6 +550,8 @@ export interface AdminUserBilling {
totalWebhookTriggers: number totalWebhookTriggers: number
totalScheduledExecutions: number totalScheduledExecutions: number
totalChatExecutions: number totalChatExecutions: number
totalMcpExecutions: number
totalA2aExecutions: number
totalTokensUsed: number totalTokensUsed: number
totalCost: string totalCost: string
currentUsageLimit: string | null currentUsageLimit: string | null

View File

@@ -97,6 +97,8 @@ export const GET = withAdminAuthParams<RouteParams>(async (_, context) => {
totalWebhookTriggers: stats?.totalWebhookTriggers ?? 0, totalWebhookTriggers: stats?.totalWebhookTriggers ?? 0,
totalScheduledExecutions: stats?.totalScheduledExecutions ?? 0, totalScheduledExecutions: stats?.totalScheduledExecutions ?? 0,
totalChatExecutions: stats?.totalChatExecutions ?? 0, totalChatExecutions: stats?.totalChatExecutions ?? 0,
totalMcpExecutions: stats?.totalMcpExecutions ?? 0,
totalA2aExecutions: stats?.totalA2aExecutions ?? 0,
totalTokensUsed: stats?.totalTokensUsed ?? 0, totalTokensUsed: stats?.totalTokensUsed ?? 0,
totalCost: stats?.totalCost ?? '0', totalCost: stats?.totalCost ?? '0',
currentUsageLimit: stats?.currentUsageLimit ?? null, currentUsageLimit: stats?.currentUsageLimit ?? null,

View File

@@ -19,7 +19,7 @@ export interface RateLimitResult {
export async function checkRateLimit( export async function checkRateLimit(
request: NextRequest, request: NextRequest,
endpoint: 'logs' | 'logs-detail' = 'logs' endpoint: 'logs' | 'logs-detail' | 'workflows' | 'workflow-detail' = 'logs'
): Promise<RateLimitResult> { ): Promise<RateLimitResult> {
try { try {
const auth = await authenticateV1Request(request) const auth = await authenticateV1Request(request)

View File

@@ -0,0 +1,102 @@
import { db } from '@sim/db'
import { permissions, workflow, workflowBlocks } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { extractInputFieldsFromBlocks } from '@/lib/workflows/input-format'
import { createApiResponse, getUserLimits } from '@/app/api/v1/logs/meta'
import { checkRateLimit, createRateLimitResponse } from '@/app/api/v1/middleware'
const logger = createLogger('V1WorkflowDetailsAPI')
export const revalidate = 0
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = crypto.randomUUID().slice(0, 8)
try {
const rateLimit = await checkRateLimit(request, 'workflow-detail')
if (!rateLimit.allowed) {
return createRateLimitResponse(rateLimit)
}
const userId = rateLimit.userId!
const { id } = await params
logger.info(`[${requestId}] Fetching workflow details for ${id}`, { userId })
const rows = await db
.select({
id: workflow.id,
name: workflow.name,
description: workflow.description,
color: workflow.color,
folderId: workflow.folderId,
workspaceId: workflow.workspaceId,
isDeployed: workflow.isDeployed,
deployedAt: workflow.deployedAt,
runCount: workflow.runCount,
lastRunAt: workflow.lastRunAt,
variables: workflow.variables,
createdAt: workflow.createdAt,
updatedAt: workflow.updatedAt,
})
.from(workflow)
.innerJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, workflow.workspaceId),
eq(permissions.userId, userId)
)
)
.where(eq(workflow.id, id))
.limit(1)
const workflowData = rows[0]
if (!workflowData) {
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
const blockRows = await db
.select({
id: workflowBlocks.id,
type: workflowBlocks.type,
subBlocks: workflowBlocks.subBlocks,
})
.from(workflowBlocks)
.where(eq(workflowBlocks.workflowId, id))
const blocksRecord = Object.fromEntries(
blockRows.map((block) => [block.id, { type: block.type, subBlocks: block.subBlocks }])
)
const inputs = extractInputFieldsFromBlocks(blocksRecord)
const response = {
id: workflowData.id,
name: workflowData.name,
description: workflowData.description,
color: workflowData.color,
folderId: workflowData.folderId,
workspaceId: workflowData.workspaceId,
isDeployed: workflowData.isDeployed,
deployedAt: workflowData.deployedAt?.toISOString() || null,
runCount: workflowData.runCount,
lastRunAt: workflowData.lastRunAt?.toISOString() || null,
variables: workflowData.variables || {},
inputs,
createdAt: workflowData.createdAt.toISOString(),
updatedAt: workflowData.updatedAt.toISOString(),
}
const limits = await getUserLimits(userId)
const apiResponse = createApiResponse({ data: response }, limits, rateLimit)
return NextResponse.json(apiResponse.body, { headers: apiResponse.headers })
} catch (error: unknown) {
const message = error instanceof Error ? error.message : 'Unknown error'
logger.error(`[${requestId}] Workflow details fetch error`, { error: message })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -0,0 +1,184 @@
import { db } from '@sim/db'
import { permissions, workflow } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, asc, eq, gt, or } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { createApiResponse, getUserLimits } from '@/app/api/v1/logs/meta'
import { checkRateLimit, createRateLimitResponse } from '@/app/api/v1/middleware'
const logger = createLogger('V1WorkflowsAPI')
export const dynamic = 'force-dynamic'
export const revalidate = 0
const QueryParamsSchema = z.object({
workspaceId: z.string(),
folderId: z.string().optional(),
deployedOnly: z.coerce.boolean().optional().default(false),
limit: z.coerce.number().min(1).max(100).optional().default(50),
cursor: z.string().optional(),
})
interface CursorData {
sortOrder: number
createdAt: string
id: string
}
function encodeCursor(data: CursorData): string {
return Buffer.from(JSON.stringify(data)).toString('base64')
}
function decodeCursor(cursor: string): CursorData | null {
try {
return JSON.parse(Buffer.from(cursor, 'base64').toString())
} catch {
return null
}
}
export async function GET(request: NextRequest) {
const requestId = crypto.randomUUID().slice(0, 8)
try {
const rateLimit = await checkRateLimit(request, 'workflows')
if (!rateLimit.allowed) {
return createRateLimitResponse(rateLimit)
}
const userId = rateLimit.userId!
const { searchParams } = new URL(request.url)
const rawParams = Object.fromEntries(searchParams.entries())
const validationResult = QueryParamsSchema.safeParse(rawParams)
if (!validationResult.success) {
return NextResponse.json(
{ error: 'Invalid parameters', details: validationResult.error.errors },
{ status: 400 }
)
}
const params = validationResult.data
logger.info(`[${requestId}] Fetching workflows for workspace ${params.workspaceId}`, {
userId,
filters: {
folderId: params.folderId,
deployedOnly: params.deployedOnly,
},
})
const conditions = [
eq(workflow.workspaceId, params.workspaceId),
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, params.workspaceId),
eq(permissions.userId, userId),
]
if (params.folderId) {
conditions.push(eq(workflow.folderId, params.folderId))
}
if (params.deployedOnly) {
conditions.push(eq(workflow.isDeployed, true))
}
if (params.cursor) {
const cursorData = decodeCursor(params.cursor)
if (cursorData) {
const cursorCondition = or(
gt(workflow.sortOrder, cursorData.sortOrder),
and(
eq(workflow.sortOrder, cursorData.sortOrder),
gt(workflow.createdAt, new Date(cursorData.createdAt))
),
and(
eq(workflow.sortOrder, cursorData.sortOrder),
eq(workflow.createdAt, new Date(cursorData.createdAt)),
gt(workflow.id, cursorData.id)
)
)
if (cursorCondition) {
conditions.push(cursorCondition)
}
}
}
const orderByClause = [asc(workflow.sortOrder), asc(workflow.createdAt), asc(workflow.id)]
const rows = await db
.select({
id: workflow.id,
name: workflow.name,
description: workflow.description,
color: workflow.color,
folderId: workflow.folderId,
workspaceId: workflow.workspaceId,
isDeployed: workflow.isDeployed,
deployedAt: workflow.deployedAt,
runCount: workflow.runCount,
lastRunAt: workflow.lastRunAt,
sortOrder: workflow.sortOrder,
createdAt: workflow.createdAt,
updatedAt: workflow.updatedAt,
})
.from(workflow)
.innerJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, params.workspaceId),
eq(permissions.userId, userId)
)
)
.where(and(...conditions))
.orderBy(...orderByClause)
.limit(params.limit + 1)
const hasMore = rows.length > params.limit
const data = rows.slice(0, params.limit)
let nextCursor: string | undefined
if (hasMore && data.length > 0) {
const lastWorkflow = data[data.length - 1]
nextCursor = encodeCursor({
sortOrder: lastWorkflow.sortOrder,
createdAt: lastWorkflow.createdAt.toISOString(),
id: lastWorkflow.id,
})
}
const formattedWorkflows = data.map((w) => ({
id: w.id,
name: w.name,
description: w.description,
color: w.color,
folderId: w.folderId,
workspaceId: w.workspaceId,
isDeployed: w.isDeployed,
deployedAt: w.deployedAt?.toISOString() || null,
runCount: w.runCount,
lastRunAt: w.lastRunAt?.toISOString() || null,
createdAt: w.createdAt.toISOString(),
updatedAt: w.updatedAt.toISOString(),
}))
const limits = await getUserLimits(userId)
const response = createApiResponse(
{
data: formattedWorkflows,
nextCursor,
},
limits,
rateLimit
)
return NextResponse.json(response.body, { headers: response.headers })
} catch (error: unknown) {
const message = error instanceof Error ? error.message : 'Unknown error'
logger.error(`[${requestId}] Workflows fetch error`, { error: message })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -12,6 +12,10 @@ import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { processInputFileFields } from '@/lib/execution/files' import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing' import { preprocessExecution } from '@/lib/execution/preprocessing'
import { LoggingSession } from '@/lib/logs/execution/logging-session' import { LoggingSession } from '@/lib/logs/execution/logging-session'
import {
cleanupExecutionBase64Cache,
hydrateUserFilesWithBase64,
} from '@/lib/uploads/utils/user-file-base64.server'
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core' import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events' import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager' import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager'
@@ -25,7 +29,7 @@ import type { WorkflowExecutionPayload } from '@/background/workflow-execution'
import { normalizeName } from '@/executor/constants' import { normalizeName } from '@/executor/constants'
import { ExecutionSnapshot } from '@/executor/execution/snapshot' import { ExecutionSnapshot } from '@/executor/execution/snapshot'
import type { ExecutionMetadata, IterationContext } from '@/executor/execution/types' import type { ExecutionMetadata, IterationContext } from '@/executor/execution/types'
import type { StreamingExecution } from '@/executor/types' import type { NormalizedBlockOutput, StreamingExecution } from '@/executor/types'
import { Serializer } from '@/serializer' import { Serializer } from '@/serializer'
import { CORE_TRIGGER_TYPES, type CoreTriggerType } from '@/stores/logs/filters/types' import { CORE_TRIGGER_TYPES, type CoreTriggerType } from '@/stores/logs/filters/types'
@@ -38,6 +42,8 @@ const ExecuteWorkflowSchema = z.object({
useDraftState: z.boolean().optional(), useDraftState: z.boolean().optional(),
input: z.any().optional(), input: z.any().optional(),
isClientSession: z.boolean().optional(), isClientSession: z.boolean().optional(),
includeFileBase64: z.boolean().optional().default(true),
base64MaxBytes: z.number().int().positive().optional(),
workflowStateOverride: z workflowStateOverride: z
.object({ .object({
blocks: z.record(z.any()), blocks: z.record(z.any()),
@@ -214,6 +220,8 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
useDraftState, useDraftState,
input: validatedInput, input: validatedInput,
isClientSession = false, isClientSession = false,
includeFileBase64,
base64MaxBytes,
workflowStateOverride, workflowStateOverride,
} = validation.data } = validation.data
@@ -227,6 +235,8 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
triggerType, triggerType,
stream, stream,
useDraftState, useDraftState,
includeFileBase64,
base64MaxBytes,
workflowStateOverride, workflowStateOverride,
workflowId: _workflowId, // Also exclude workflowId used for internal JWT auth workflowId: _workflowId, // Also exclude workflowId used for internal JWT auth
...rest ...rest
@@ -427,16 +437,31 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
snapshot, snapshot,
callbacks: {}, callbacks: {},
loggingSession, loggingSession,
includeFileBase64,
base64MaxBytes,
}) })
const hasResponseBlock = workflowHasResponseBlock(result) const outputWithBase64 = includeFileBase64
? ((await hydrateUserFilesWithBase64(result.output, {
requestId,
executionId,
maxBytes: base64MaxBytes,
})) as NormalizedBlockOutput)
: result.output
const resultWithBase64 = { ...result, output: outputWithBase64 }
// Cleanup base64 cache for this execution
await cleanupExecutionBase64Cache(executionId)
const hasResponseBlock = workflowHasResponseBlock(resultWithBase64)
if (hasResponseBlock) { if (hasResponseBlock) {
return createHttpResponseFromBlock(result) return createHttpResponseFromBlock(resultWithBase64)
} }
const filteredResult = { const filteredResult = {
success: result.success, success: result.success,
output: result.output, output: outputWithBase64,
error: result.error, error: result.error,
metadata: result.metadata metadata: result.metadata
? { ? {
@@ -498,6 +523,8 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
selectedOutputs: resolvedSelectedOutputs, selectedOutputs: resolvedSelectedOutputs,
isSecureMode: false, isSecureMode: false,
workflowTriggerType: triggerType === 'chat' ? 'chat' : 'api', workflowTriggerType: triggerType === 'chat' ? 'chat' : 'api',
includeFileBase64,
base64MaxBytes,
}, },
executionId, executionId,
}) })
@@ -698,6 +725,8 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}, },
loggingSession, loggingSession,
abortSignal: abortController.signal, abortSignal: abortController.signal,
includeFileBase64,
base64MaxBytes,
}) })
if (result.status === 'paused') { if (result.status === 'paused') {
@@ -750,12 +779,21 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
workflowId, workflowId,
data: { data: {
success: result.success, success: result.success,
output: result.output, output: includeFileBase64
? await hydrateUserFilesWithBase64(result.output, {
requestId,
executionId,
maxBytes: base64MaxBytes,
})
: result.output,
duration: result.metadata?.duration || 0, duration: result.metadata?.duration || 0,
startTime: result.metadata?.startTime || startTime.toISOString(), startTime: result.metadata?.startTime || startTime.toISOString(),
endTime: result.metadata?.endTime || new Date().toISOString(), endTime: result.metadata?.endTime || new Date().toISOString(),
}, },
}) })
// Cleanup base64 cache for this execution
await cleanupExecutionBase64Cache(executionId)
} catch (error: any) { } catch (error: any) {
const errorMessage = error.message || 'Unknown error' const errorMessage = error.message || 'Unknown error'
logger.error(`[${requestId}] SSE execution failed: ${errorMessage}`) logger.error(`[${requestId}] SSE execution failed: ${errorMessage}`)

View File

@@ -33,6 +33,7 @@ const BlockDataSchema = z.object({
doWhileCondition: z.string().optional(), doWhileCondition: z.string().optional(),
parallelType: z.enum(['collection', 'count']).optional(), parallelType: z.enum(['collection', 'count']).optional(),
type: z.string().optional(), type: z.string().optional(),
canonicalModes: z.record(z.enum(['basic', 'advanced'])).optional(),
}) })
const SubBlockStateSchema = z.object({ const SubBlockStateSchema = z.object({

View File

@@ -1,4 +1,4 @@
import React, { type HTMLAttributes, type ReactNode } from 'react' import React, { type HTMLAttributes, memo, type ReactNode, useMemo } from 'react'
import ReactMarkdown from 'react-markdown' import ReactMarkdown from 'react-markdown'
import remarkGfm from 'remark-gfm' import remarkGfm from 'remark-gfm'
import { Tooltip } from '@/components/emcn' import { Tooltip } from '@/components/emcn'
@@ -23,24 +23,16 @@ export function LinkWithPreview({ href, children }: { href: string; children: Re
) )
} }
export default function MarkdownRenderer({ const REMARK_PLUGINS = [remarkGfm]
content,
customLinkComponent,
}: {
content: string
customLinkComponent?: typeof LinkWithPreview
}) {
const LinkComponent = customLinkComponent || LinkWithPreview
const customComponents = { function createCustomComponents(LinkComponent: typeof LinkWithPreview) {
// Paragraph return {
p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => ( p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => (
<p className='mb-1 font-sans text-base text-gray-800 leading-relaxed last:mb-0 dark:text-gray-200'> <p className='mb-1 font-sans text-base text-gray-800 leading-relaxed last:mb-0 dark:text-gray-200'>
{children} {children}
</p> </p>
), ),
// Headings
h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => ( h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
<h1 className='mt-10 mb-5 font-sans font-semibold text-2xl text-gray-900 dark:text-gray-100'> <h1 className='mt-10 mb-5 font-sans font-semibold text-2xl text-gray-900 dark:text-gray-100'>
{children} {children}
@@ -62,7 +54,6 @@ export default function MarkdownRenderer({
</h4> </h4>
), ),
// Lists
ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => ( ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => (
<ul <ul
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200' className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
@@ -89,7 +80,6 @@ export default function MarkdownRenderer({
</li> </li>
), ),
// Code blocks
pre: ({ children }: HTMLAttributes<HTMLPreElement>) => { pre: ({ children }: HTMLAttributes<HTMLPreElement>) => {
let codeProps: HTMLAttributes<HTMLElement> = {} let codeProps: HTMLAttributes<HTMLElement> = {}
let codeContent: ReactNode = children let codeContent: ReactNode = children
@@ -120,7 +110,6 @@ export default function MarkdownRenderer({
) )
}, },
// Inline code
code: ({ code: ({
inline, inline,
className, className,
@@ -144,24 +133,20 @@ export default function MarkdownRenderer({
) )
}, },
// Blockquotes
blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => ( blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => (
<blockquote className='my-4 border-gray-300 border-l-4 py-1 pl-4 font-sans text-gray-700 italic dark:border-gray-600 dark:text-gray-300'> <blockquote className='my-4 border-gray-300 border-l-4 py-1 pl-4 font-sans text-gray-700 italic dark:border-gray-600 dark:text-gray-300'>
{children} {children}
</blockquote> </blockquote>
), ),
// Horizontal rule
hr: () => <hr className='my-8 border-gray-500/[.07] border-t dark:border-gray-400/[.07]' />, hr: () => <hr className='my-8 border-gray-500/[.07] border-t dark:border-gray-400/[.07]' />,
// Links
a: ({ href, children, ...props }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => ( a: ({ href, children, ...props }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => (
<LinkComponent href={href || '#'} {...props}> <LinkComponent href={href || '#'} {...props}>
{children} {children}
</LinkComponent> </LinkComponent>
), ),
// Tables
table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => ( table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => (
<div className='my-4 w-full overflow-x-auto'> <div className='my-4 w-full overflow-x-auto'>
<table className='min-w-full table-auto border border-gray-300 font-sans text-sm dark:border-gray-700'> <table className='min-w-full table-auto border border-gray-300 font-sans text-sm dark:border-gray-700'>
@@ -193,7 +178,6 @@ export default function MarkdownRenderer({
</td> </td>
), ),
// Images
img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => ( img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => (
<img <img
src={src} src={src}
@@ -203,15 +187,33 @@ export default function MarkdownRenderer({
/> />
), ),
} }
}
const DEFAULT_COMPONENTS = createCustomComponents(LinkWithPreview)
const MarkdownRenderer = memo(function MarkdownRenderer({
content,
customLinkComponent,
}: {
content: string
customLinkComponent?: typeof LinkWithPreview
}) {
const components = useMemo(() => {
if (!customLinkComponent) {
return DEFAULT_COMPONENTS
}
return createCustomComponents(customLinkComponent)
}, [customLinkComponent])
// Pre-process content to fix common issues
const processedContent = content.trim() const processedContent = content.trim()
return ( return (
<div className='space-y-4 break-words font-sans text-[#0D0D0D] text-base leading-relaxed dark:text-gray-100'> <div className='space-y-4 break-words font-sans text-[#0D0D0D] text-base leading-relaxed dark:text-gray-100'>
<ReactMarkdown remarkPlugins={[remarkGfm]} components={customComponents}> <ReactMarkdown remarkPlugins={REMARK_PLUGINS} components={components}>
{processedContent} {processedContent}
</ReactMarkdown> </ReactMarkdown>
</div> </div>
) )
} })
export default MarkdownRenderer

View File

@@ -2,7 +2,7 @@
import { useRef, useState } from 'react' import { useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { isUserFile } from '@/lib/core/utils/display-filters' import { isUserFileWithMetadata } from '@/lib/core/utils/user-file'
import type { ChatFile, ChatMessage } from '@/app/chat/components/message/message' import type { ChatFile, ChatMessage } from '@/app/chat/components/message/message'
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants' import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
@@ -17,7 +17,7 @@ function extractFilesFromData(
return files return files
} }
if (isUserFile(data)) { if (isUserFileWithMetadata(data)) {
if (!seenIds.has(data.id)) { if (!seenIds.has(data.id)) {
seenIds.add(data.id) seenIds.add(data.id)
files.push({ files.push({
@@ -232,7 +232,7 @@ export function useChatStreaming() {
return null return null
} }
if (isUserFile(value)) { if (isUserFileWithMetadata(value)) {
return null return null
} }
@@ -285,7 +285,7 @@ export function useChatStreaming() {
const value = getOutputValue(blockOutputs, config.path) const value = getOutputValue(blockOutputs, config.path)
if (isUserFile(value)) { if (isUserFileWithMetadata(value)) {
extractedFiles.push({ extractedFiles.push({
id: value.id, id: value.id,
name: value.name, name: value.name,

View File

@@ -7,7 +7,7 @@ import { generateBrandedMetadata, generateStructuredData } from '@/lib/branding/
import { PostHogProvider } from '@/app/_shell/providers/posthog-provider' import { PostHogProvider } from '@/app/_shell/providers/posthog-provider'
import '@/app/_styles/globals.css' import '@/app/_styles/globals.css'
import { OneDollarStats } from '@/components/analytics/onedollarstats' import { OneDollarStats } from '@/components/analytics/onedollarstats'
import { isReactGrabEnabled } from '@/lib/core/config/feature-flags' import { isReactGrabEnabled, isReactScanEnabled } from '@/lib/core/config/feature-flags'
import { HydrationErrorHandler } from '@/app/_shell/hydration-error-handler' import { HydrationErrorHandler } from '@/app/_shell/hydration-error-handler'
import { QueryProvider } from '@/app/_shell/providers/query-provider' import { QueryProvider } from '@/app/_shell/providers/query-provider'
import { SessionProvider } from '@/app/_shell/providers/session-provider' import { SessionProvider } from '@/app/_shell/providers/session-provider'
@@ -35,6 +35,13 @@ export default function RootLayout({ children }: { children: React.ReactNode })
return ( return (
<html lang='en' suppressHydrationWarning> <html lang='en' suppressHydrationWarning>
<head> <head>
{isReactScanEnabled && (
<Script
src='https://unpkg.com/react-scan/dist/auto.global.js'
crossOrigin='anonymous'
strategy='beforeInteractive'
/>
)}
{isReactGrabEnabled && ( {isReactGrabEnabled && (
<Script <Script
src='https://unpkg.com/react-grab/dist/index.global.js' src='https://unpkg.com/react-grab/dist/index.global.js'

View File

@@ -1,10 +1,13 @@
'use client' 'use client'
import { Suspense, useEffect, useState } from 'react' import { Suspense, useEffect, useState } from 'react'
import { CheckCircle, Heart, Info, Loader2, XCircle } from 'lucide-react' import { Loader2 } from 'lucide-react'
import { useSearchParams } from 'next/navigation' import { useSearchParams } from 'next/navigation'
import { Button, Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui' import { inter } from '@/app/_styles/fonts/inter/inter'
import { useBrandConfig } from '@/lib/branding/branding' import { soehne } from '@/app/_styles/fonts/soehne/soehne'
import { BrandedButton } from '@/app/(auth)/components/branded-button'
import { SupportFooter } from '@/app/(auth)/components/support-footer'
import { InviteLayout } from '@/app/invite/components'
interface UnsubscribeData { interface UnsubscribeData {
success: boolean success: boolean
@@ -27,7 +30,6 @@ function UnsubscribeContent() {
const [error, setError] = useState<string | null>(null) const [error, setError] = useState<string | null>(null)
const [processing, setProcessing] = useState(false) const [processing, setProcessing] = useState(false)
const [unsubscribed, setUnsubscribed] = useState(false) const [unsubscribed, setUnsubscribed] = useState(false)
const brand = useBrandConfig()
const email = searchParams.get('email') const email = searchParams.get('email')
const token = searchParams.get('token') const token = searchParams.get('token')
@@ -109,7 +111,7 @@ function UnsubscribeContent() {
} else { } else {
setError(result.error || 'Failed to unsubscribe') setError(result.error || 'Failed to unsubscribe')
} }
} catch (error) { } catch {
setError('Failed to process unsubscribe request') setError('Failed to process unsubscribe request')
} finally { } finally {
setProcessing(false) setProcessing(false)
@@ -118,272 +120,171 @@ function UnsubscribeContent() {
if (loading) { if (loading) {
return ( return (
<div className='before:-z-50 relative flex min-h-screen items-center justify-center before:pointer-events-none before:fixed before:inset-0 before:bg-white'> <InviteLayout>
<Card className='w-full max-w-md border shadow-sm'> <div className='space-y-1 text-center'>
<CardContent className='flex items-center justify-center p-8'> <h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
<Loader2 className='h-8 w-8 animate-spin text-muted-foreground' /> Loading
</CardContent> </h1>
</Card> <p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
</div> Validating your unsubscribe link...
</p>
</div>
<div className={`${inter.className} mt-8 flex w-full items-center justify-center py-8`}>
<Loader2 className='h-8 w-8 animate-spin text-muted-foreground' />
</div>
<SupportFooter position='absolute' />
</InviteLayout>
) )
} }
if (error) { if (error) {
return ( return (
<div className='before:-z-50 relative flex min-h-screen items-center justify-center p-4 before:pointer-events-none before:fixed before:inset-0 before:bg-white'> <InviteLayout>
<Card className='w-full max-w-md border shadow-sm'> <div className='space-y-1 text-center'>
<CardHeader className='text-center'> <h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
<XCircle className='mx-auto mb-2 h-12 w-12 text-red-500' /> Invalid Unsubscribe Link
<CardTitle className='text-foreground'>Invalid Unsubscribe Link</CardTitle> </h1>
<CardDescription className='text-muted-foreground'> <p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
This unsubscribe link is invalid or has expired {error}
</CardDescription> </p>
</CardHeader> </div>
<CardContent className='space-y-4'>
<div className='rounded-lg border bg-red-50 p-4'>
<p className='text-red-800 text-sm'>
<strong>Error:</strong> {error}
</p>
</div>
<div className='space-y-3'> <div className={`${inter.className} mt-8 w-full max-w-[410px] space-y-3`}>
<p className='text-muted-foreground text-sm'>This could happen if:</p> <BrandedButton onClick={() => window.history.back()}>Go Back</BrandedButton>
<ul className='ml-4 list-inside list-disc space-y-1 text-muted-foreground text-sm'> </div>
<li>The link is missing required parameters</li>
<li>The link has expired or been used already</li>
<li>The link was copied incorrectly</li>
</ul>
</div>
<div className='mt-6 flex flex-col gap-3'> <SupportFooter position='absolute' />
<Button </InviteLayout>
onClick={() =>
window.open(
`mailto:${brand.supportEmail}?subject=Unsubscribe%20Help&body=Hi%2C%20I%20need%20help%20unsubscribing%20from%20emails.%20My%20unsubscribe%20link%20is%20not%20working.`,
'_blank'
)
}
className='w-full bg-[var(--brand-primary-hex)] font-medium text-white shadow-sm transition-colors duration-200 hover:bg-[var(--brand-primary-hover-hex)]'
>
Contact Support
</Button>
<Button onClick={() => window.history.back()} variant='outline' className='w-full'>
Go Back
</Button>
</div>
<div className='mt-4 text-center'>
<p className='text-muted-foreground text-xs'>
Need immediate help? Email us at{' '}
<a
href={`mailto:${brand.supportEmail}`}
className='text-muted-foreground hover:underline'
>
{brand.supportEmail}
</a>
</p>
</div>
</CardContent>
</Card>
</div>
) )
} }
if (data?.isTransactional) { if (data?.isTransactional) {
return ( return (
<div className='before:-z-50 relative flex min-h-screen items-center justify-center p-4 before:pointer-events-none before:fixed before:inset-0 before:bg-white'> <InviteLayout>
<Card className='w-full max-w-md border shadow-sm'> <div className='space-y-1 text-center'>
<CardHeader className='text-center'> <h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
<Info className='mx-auto mb-2 h-12 w-12 text-blue-500' /> Important Account Emails
<CardTitle className='text-foreground'>Important Account Emails</CardTitle> </h1>
<CardDescription className='text-muted-foreground'> <p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
This email contains important information about your account Transactional emails like password resets, account confirmations, and security alerts
</CardDescription> cannot be unsubscribed from as they contain essential information for your account.
</CardHeader> </p>
<CardContent className='space-y-4'> </div>
<div className='rounded-lg border bg-blue-50 p-4'>
<p className='text-blue-800 text-sm'>
<strong>Transactional emails</strong> like password resets, account confirmations,
and security alerts cannot be unsubscribed from as they contain essential
information for your account security and functionality.
</p>
</div>
<div className='space-y-3'> <div className={`${inter.className} mt-8 w-full max-w-[410px] space-y-3`}>
<p className='text-foreground text-sm'> <BrandedButton onClick={() => window.close()}>Close</BrandedButton>
If you no longer wish to receive these emails, you can: </div>
</p>
<ul className='ml-4 list-inside list-disc space-y-1 text-muted-foreground text-sm'>
<li>Close your account entirely</li>
<li>Contact our support team for assistance</li>
</ul>
</div>
<div className='mt-6 flex flex-col gap-3'> <SupportFooter position='absolute' />
<Button </InviteLayout>
onClick={() =>
window.open(
`mailto:${brand.supportEmail}?subject=Account%20Help&body=Hi%2C%20I%20need%20help%20with%20my%20account%20emails.`,
'_blank'
)
}
className='w-full bg-blue-600 text-white hover:bg-blue-700'
>
Contact Support
</Button>
<Button onClick={() => window.close()} variant='outline' className='w-full'>
Close
</Button>
</div>
</CardContent>
</Card>
</div>
) )
} }
if (unsubscribed) { if (unsubscribed) {
return ( return (
<div className='before:-z-50 relative flex min-h-screen items-center justify-center before:pointer-events-none before:fixed before:inset-0 before:bg-white'> <InviteLayout>
<Card className='w-full max-w-md border shadow-sm'> <div className='space-y-1 text-center'>
<CardHeader className='text-center'> <h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
<CheckCircle className='mx-auto mb-2 h-12 w-12 text-green-500' /> Successfully Unsubscribed
<CardTitle className='text-foreground'>Successfully Unsubscribed</CardTitle> </h1>
<CardDescription className='text-muted-foreground'> <p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
You have been unsubscribed from our emails. You will stop receiving emails within 48 You have been unsubscribed from our emails. You will stop receiving emails within 48
hours. hours.
</CardDescription> </p>
</CardHeader> </div>
<CardContent className='text-center'>
<p className='text-muted-foreground text-sm'> <div className={`${inter.className} mt-8 w-full max-w-[410px] space-y-3`}>
If you change your mind, you can always update your email preferences in your account <BrandedButton onClick={() => window.close()}>Close</BrandedButton>
settings or contact us at{' '} </div>
<a
href={`mailto:${brand.supportEmail}`} <SupportFooter position='absolute' />
className='text-muted-foreground hover:underline' </InviteLayout>
>
{brand.supportEmail}
</a>
</p>
</CardContent>
</Card>
</div>
) )
} }
const isAlreadyUnsubscribedFromAll = data?.currentPreferences.unsubscribeAll
return ( return (
<div className='before:-z-50 relative flex min-h-screen items-center justify-center p-4 before:pointer-events-none before:fixed before:inset-0 before:bg-white'> <InviteLayout>
<Card className='w-full max-w-md border shadow-sm'> <div className='space-y-1 text-center'>
<CardHeader className='text-center'> <h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
<Heart className='mx-auto mb-2 h-12 w-12 text-red-500' /> Email Preferences
<CardTitle className='text-foreground'>We&apos;re sorry to see you go!</CardTitle> </h1>
<CardDescription className='text-muted-foreground'> <p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
We understand email preferences are personal. Choose which emails you&apos;d like to Choose which emails you'd like to stop receiving.
stop receiving from Sim. </p>
</CardDescription> <p className={`${inter.className} mt-2 font-[380] text-[14px] text-muted-foreground`}>
<div className='mt-2 rounded-lg border bg-muted/50 p-3'> {data?.email}
<p className='text-muted-foreground text-xs'> </p>
Email: <span className='font-medium text-foreground'>{data?.email}</span> </div>
</p>
</div>
</CardHeader>
<CardContent className='space-y-4'>
<div className='space-y-3'>
<Button
onClick={() => handleUnsubscribe('all')}
disabled={processing || data?.currentPreferences.unsubscribeAll}
variant='destructive'
className='w-full'
>
{data?.currentPreferences.unsubscribeAll ? (
<CheckCircle className='mr-2 h-4 w-4' />
) : null}
{processing
? 'Unsubscribing...'
: data?.currentPreferences.unsubscribeAll
? 'Unsubscribed from All Emails'
: 'Unsubscribe from All Marketing Emails'}
</Button>
<div className='text-center text-muted-foreground text-sm'> <div className={`${inter.className} mt-8 w-full max-w-[410px] space-y-3`}>
or choose specific types: <BrandedButton
</div> onClick={() => handleUnsubscribe('all')}
disabled={processing || isAlreadyUnsubscribedFromAll}
loading={processing}
loadingText='Unsubscribing'
>
{isAlreadyUnsubscribedFromAll
? 'Unsubscribed from All Emails'
: 'Unsubscribe from All Marketing Emails'}
</BrandedButton>
<Button <div className='py-2 text-center'>
onClick={() => handleUnsubscribe('marketing')} <span className={`${inter.className} font-[380] text-[14px] text-muted-foreground`}>
disabled={ or choose specific types
processing || </span>
data?.currentPreferences.unsubscribeAll || </div>
data?.currentPreferences.unsubscribeMarketing
}
variant='outline'
className='w-full'
>
{data?.currentPreferences.unsubscribeMarketing ? (
<CheckCircle className='mr-2 h-4 w-4' />
) : null}
{data?.currentPreferences.unsubscribeMarketing
? 'Unsubscribed from Marketing'
: 'Unsubscribe from Marketing Emails'}
</Button>
<Button <BrandedButton
onClick={() => handleUnsubscribe('updates')} onClick={() => handleUnsubscribe('marketing')}
disabled={ disabled={
processing || processing ||
data?.currentPreferences.unsubscribeAll || isAlreadyUnsubscribedFromAll ||
data?.currentPreferences.unsubscribeUpdates data?.currentPreferences.unsubscribeMarketing
} }
variant='outline' >
className='w-full' {data?.currentPreferences.unsubscribeMarketing
> ? 'Unsubscribed from Marketing'
{data?.currentPreferences.unsubscribeUpdates ? ( : 'Unsubscribe from Marketing Emails'}
<CheckCircle className='mr-2 h-4 w-4' /> </BrandedButton>
) : null}
{data?.currentPreferences.unsubscribeUpdates
? 'Unsubscribed from Updates'
: 'Unsubscribe from Product Updates'}
</Button>
<Button <BrandedButton
onClick={() => handleUnsubscribe('notifications')} onClick={() => handleUnsubscribe('updates')}
disabled={ disabled={
processing || processing ||
data?.currentPreferences.unsubscribeAll || isAlreadyUnsubscribedFromAll ||
data?.currentPreferences.unsubscribeNotifications data?.currentPreferences.unsubscribeUpdates
} }
variant='outline' >
className='w-full' {data?.currentPreferences.unsubscribeUpdates
> ? 'Unsubscribed from Updates'
{data?.currentPreferences.unsubscribeNotifications ? ( : 'Unsubscribe from Product Updates'}
<CheckCircle className='mr-2 h-4 w-4' /> </BrandedButton>
) : null}
{data?.currentPreferences.unsubscribeNotifications
? 'Unsubscribed from Notifications'
: 'Unsubscribe from Notifications'}
</Button>
</div>
<div className='mt-6 space-y-3'> <BrandedButton
<div className='rounded-lg border bg-muted/50 p-3'> onClick={() => handleUnsubscribe('notifications')}
<p className='text-center text-muted-foreground text-xs'> disabled={
<strong>Note:</strong> You&apos;ll continue receiving important account emails like processing ||
password resets and security alerts. isAlreadyUnsubscribedFromAll ||
</p> data?.currentPreferences.unsubscribeNotifications
</div> }
>
{data?.currentPreferences.unsubscribeNotifications
? 'Unsubscribed from Notifications'
: 'Unsubscribe from Notifications'}
</BrandedButton>
</div>
<p className='text-center text-muted-foreground text-xs'> <div className={`${inter.className} mt-6 max-w-[410px] text-center`}>
Questions? Contact us at{' '} <p className='font-[380] text-[13px] text-muted-foreground'>
<a You'll continue receiving important account emails like password resets and security
href={`mailto:${brand.supportEmail}`} alerts.
className='text-muted-foreground hover:underline' </p>
> </div>
{brand.supportEmail}
</a> <SupportFooter position='absolute' />
</p> </InviteLayout>
</div>
</CardContent>
</Card>
</div>
) )
} }
@@ -391,13 +292,20 @@ export default function Unsubscribe() {
return ( return (
<Suspense <Suspense
fallback={ fallback={
<div className='before:-z-50 relative flex min-h-screen items-center justify-center before:pointer-events-none before:fixed before:inset-0 before:bg-white'> <InviteLayout>
<Card className='w-full max-w-md border shadow-sm'> <div className='space-y-1 text-center'>
<CardContent className='flex items-center justify-center p-8'> <h1 className={`${soehne.className} font-medium text-[32px] text-black tracking-tight`}>
<Loader2 className='h-8 w-8 animate-spin text-muted-foreground' /> Loading
</CardContent> </h1>
</Card> <p className={`${inter.className} font-[380] text-[16px] text-muted-foreground`}>
</div> Validating your unsubscribe link...
</p>
</div>
<div className={`${inter.className} mt-8 flex w-full items-center justify-center py-8`}>
<Loader2 className='h-8 w-8 animate-spin text-muted-foreground' />
</div>
<SupportFooter position='absolute' />
</InviteLayout>
} }
> >
<UnsubscribeContent /> <UnsubscribeContent />

View File

@@ -2,7 +2,6 @@
import { useRef, useState } from 'react' import { useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query'
import { import {
Button, Button,
Label, Label,
@@ -14,7 +13,7 @@ import {
Textarea, Textarea,
} from '@/components/emcn' } from '@/components/emcn'
import type { DocumentData } from '@/lib/knowledge/types' import type { DocumentData } from '@/lib/knowledge/types'
import { knowledgeKeys } from '@/hooks/queries/knowledge' import { useCreateChunk } from '@/hooks/queries/knowledge'
const logger = createLogger('CreateChunkModal') const logger = createLogger('CreateChunkModal')
@@ -31,16 +30,20 @@ export function CreateChunkModal({
document, document,
knowledgeBaseId, knowledgeBaseId,
}: CreateChunkModalProps) { }: CreateChunkModalProps) {
const queryClient = useQueryClient() const {
mutate: createChunk,
isPending: isCreating,
error: mutationError,
reset: resetMutation,
} = useCreateChunk()
const [content, setContent] = useState('') const [content, setContent] = useState('')
const [isCreating, setIsCreating] = useState(false)
const [error, setError] = useState<string | null>(null)
const [showUnsavedChangesAlert, setShowUnsavedChangesAlert] = useState(false) const [showUnsavedChangesAlert, setShowUnsavedChangesAlert] = useState(false)
const isProcessingRef = useRef(false) const isProcessingRef = useRef(false)
const error = mutationError?.message ?? null
const hasUnsavedChanges = content.trim().length > 0 const hasUnsavedChanges = content.trim().length > 0
const handleCreateChunk = async () => { const handleCreateChunk = () => {
if (!document || content.trim().length === 0 || isProcessingRef.current) { if (!document || content.trim().length === 0 || isProcessingRef.current) {
if (isProcessingRef.current) { if (isProcessingRef.current) {
logger.warn('Chunk creation already in progress, ignoring duplicate request') logger.warn('Chunk creation already in progress, ignoring duplicate request')
@@ -48,57 +51,32 @@ export function CreateChunkModal({
return return
} }
try { isProcessingRef.current = true
isProcessingRef.current = true
setIsCreating(true)
setError(null)
const response = await fetch( createChunk(
`/api/knowledge/${knowledgeBaseId}/documents/${document.id}/chunks`, {
{ knowledgeBaseId,
method: 'POST', documentId: document.id,
headers: { content: content.trim(),
'Content-Type': 'application/json', enabled: true,
}, },
body: JSON.stringify({ {
content: content.trim(), onSuccess: () => {
enabled: true, isProcessingRef.current = false
}), onClose()
} },
) onError: () => {
isProcessingRef.current = false
if (!response.ok) { },
const result = await response.json()
throw new Error(result.error || 'Failed to create chunk')
} }
)
const result = await response.json()
if (result.success && result.data) {
logger.info('Chunk created successfully:', result.data.id)
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.detail(knowledgeBaseId),
})
onClose()
} else {
throw new Error(result.error || 'Failed to create chunk')
}
} catch (err) {
logger.error('Error creating chunk:', err)
setError(err instanceof Error ? err.message : 'An error occurred')
} finally {
isProcessingRef.current = false
setIsCreating(false)
}
} }
const onClose = () => { const onClose = () => {
onOpenChange(false) onOpenChange(false)
setContent('') setContent('')
setError(null)
setShowUnsavedChangesAlert(false) setShowUnsavedChangesAlert(false)
resetMutation()
} }
const handleCloseAttempt = () => { const handleCloseAttempt = () => {

View File

@@ -1,13 +1,8 @@
'use client' 'use client'
import { useState } from 'react'
import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query'
import { Button, Modal, ModalBody, ModalContent, ModalFooter, ModalHeader } from '@/components/emcn' import { Button, Modal, ModalBody, ModalContent, ModalFooter, ModalHeader } from '@/components/emcn'
import type { ChunkData } from '@/lib/knowledge/types' import type { ChunkData } from '@/lib/knowledge/types'
import { knowledgeKeys } from '@/hooks/queries/knowledge' import { useDeleteChunk } from '@/hooks/queries/knowledge'
const logger = createLogger('DeleteChunkModal')
interface DeleteChunkModalProps { interface DeleteChunkModalProps {
chunk: ChunkData | null chunk: ChunkData | null
@@ -24,44 +19,12 @@ export function DeleteChunkModal({
isOpen, isOpen,
onClose, onClose,
}: DeleteChunkModalProps) { }: DeleteChunkModalProps) {
const queryClient = useQueryClient() const { mutate: deleteChunk, isPending: isDeleting } = useDeleteChunk()
const [isDeleting, setIsDeleting] = useState(false)
const handleDeleteChunk = async () => { const handleDeleteChunk = () => {
if (!chunk || isDeleting) return if (!chunk || isDeleting) return
try { deleteChunk({ knowledgeBaseId, documentId, chunkId: chunk.id }, { onSuccess: onClose })
setIsDeleting(true)
const response = await fetch(
`/api/knowledge/${knowledgeBaseId}/documents/${documentId}/chunks/${chunk.id}`,
{
method: 'DELETE',
}
)
if (!response.ok) {
throw new Error('Failed to delete chunk')
}
const result = await response.json()
if (result.success) {
logger.info('Chunk deleted successfully:', chunk.id)
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.detail(knowledgeBaseId),
})
onClose()
} else {
throw new Error(result.error || 'Failed to delete chunk')
}
} catch (err) {
logger.error('Error deleting chunk:', err)
} finally {
setIsDeleting(false)
}
} }
if (!chunk) return null if (!chunk) return null

View File

@@ -25,6 +25,7 @@ import {
} from '@/hooks/kb/use-knowledge-base-tag-definitions' } from '@/hooks/kb/use-knowledge-base-tag-definitions'
import { useNextAvailableSlot } from '@/hooks/kb/use-next-available-slot' import { useNextAvailableSlot } from '@/hooks/kb/use-next-available-slot'
import { type TagDefinitionInput, useTagDefinitions } from '@/hooks/kb/use-tag-definitions' import { type TagDefinitionInput, useTagDefinitions } from '@/hooks/kb/use-tag-definitions'
import { useUpdateDocumentTags } from '@/hooks/queries/knowledge'
const logger = createLogger('DocumentTagsModal') const logger = createLogger('DocumentTagsModal')
@@ -58,8 +59,6 @@ function formatValueForDisplay(value: string, fieldType: string): string {
try { try {
const date = new Date(value) const date = new Date(value)
if (Number.isNaN(date.getTime())) return value if (Number.isNaN(date.getTime())) return value
// For UTC dates, display the UTC date to prevent timezone shifts
// e.g., 2002-05-16T00:00:00.000Z should show as "May 16, 2002" not "May 15, 2002"
if (typeof value === 'string' && (value.endsWith('Z') || /[+-]\d{2}:\d{2}$/.test(value))) { if (typeof value === 'string' && (value.endsWith('Z') || /[+-]\d{2}:\d{2}$/.test(value))) {
return new Date( return new Date(
date.getUTCFullYear(), date.getUTCFullYear(),
@@ -96,6 +95,7 @@ export function DocumentTagsModal({
const documentTagHook = useTagDefinitions(knowledgeBaseId, documentId) const documentTagHook = useTagDefinitions(knowledgeBaseId, documentId)
const kbTagHook = useKnowledgeBaseTagDefinitions(knowledgeBaseId) const kbTagHook = useKnowledgeBaseTagDefinitions(knowledgeBaseId)
const { getNextAvailableSlot: getServerNextSlot } = useNextAvailableSlot(knowledgeBaseId) const { getNextAvailableSlot: getServerNextSlot } = useNextAvailableSlot(knowledgeBaseId)
const { mutateAsync: updateDocumentTags } = useUpdateDocumentTags()
const { saveTagDefinitions, tagDefinitions, fetchTagDefinitions } = documentTagHook const { saveTagDefinitions, tagDefinitions, fetchTagDefinitions } = documentTagHook
const { tagDefinitions: kbTagDefinitions, fetchTagDefinitions: refreshTagDefinitions } = kbTagHook const { tagDefinitions: kbTagDefinitions, fetchTagDefinitions: refreshTagDefinitions } = kbTagHook
@@ -118,7 +118,6 @@ export function DocumentTagsModal({
const definition = definitions.find((def) => def.tagSlot === slot) const definition = definitions.find((def) => def.tagSlot === slot)
if (rawValue !== null && rawValue !== undefined && definition) { if (rawValue !== null && rawValue !== undefined && definition) {
// Convert value to string for storage
const stringValue = String(rawValue).trim() const stringValue = String(rawValue).trim()
if (stringValue) { if (stringValue) {
tags.push({ tags.push({
@@ -142,41 +141,34 @@ export function DocumentTagsModal({
async (tagsToSave: DocumentTag[]) => { async (tagsToSave: DocumentTag[]) => {
if (!documentData) return if (!documentData) return
try { const tagData: Record<string, string> = {}
const tagData: Record<string, string> = {}
// Only include tags that have values (omit empty ones) ALL_TAG_SLOTS.forEach((slot) => {
// Use empty string for slots that should be cleared const tag = tagsToSave.find((t) => t.slot === slot)
ALL_TAG_SLOTS.forEach((slot) => { if (tag?.value.trim()) {
const tag = tagsToSave.find((t) => t.slot === slot) tagData[slot] = tag.value.trim()
if (tag?.value.trim()) { } else {
tagData[slot] = tag.value.trim() tagData[slot] = ''
} else {
// Use empty string to clear a tag (API schema expects string, not null)
tagData[slot] = ''
}
})
const response = await fetch(`/api/knowledge/${knowledgeBaseId}/documents/${documentId}`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(tagData),
})
if (!response.ok) {
throw new Error('Failed to update document tags')
} }
})
onDocumentUpdate?.(tagData as Record<string, string>) await updateDocumentTags({
await fetchTagDefinitions() knowledgeBaseId,
} catch (error) { documentId,
logger.error('Error updating document tags:', error) tags: tagData,
throw error })
}
onDocumentUpdate?.(tagData)
await fetchTagDefinitions()
}, },
[documentData, knowledgeBaseId, documentId, fetchTagDefinitions, onDocumentUpdate] [
documentData,
knowledgeBaseId,
documentId,
updateDocumentTags,
fetchTagDefinitions,
onDocumentUpdate,
]
) )
const handleRemoveTag = async (index: number) => { const handleRemoveTag = async (index: number) => {

View File

@@ -2,7 +2,6 @@
import { useEffect, useMemo, useRef, useState } from 'react' import { useEffect, useMemo, useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query'
import { ChevronDown, ChevronUp } from 'lucide-react' import { ChevronDown, ChevronUp } from 'lucide-react'
import { import {
Button, Button,
@@ -19,7 +18,7 @@ import {
import type { ChunkData, DocumentData } from '@/lib/knowledge/types' import type { ChunkData, DocumentData } from '@/lib/knowledge/types'
import { getAccurateTokenCount, getTokenStrings } from '@/lib/tokenization/estimators' import { getAccurateTokenCount, getTokenStrings } from '@/lib/tokenization/estimators'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { knowledgeKeys } from '@/hooks/queries/knowledge' import { useUpdateChunk } from '@/hooks/queries/knowledge'
const logger = createLogger('EditChunkModal') const logger = createLogger('EditChunkModal')
@@ -50,17 +49,22 @@ export function EditChunkModal({
onNavigateToPage, onNavigateToPage,
maxChunkSize, maxChunkSize,
}: EditChunkModalProps) { }: EditChunkModalProps) {
const queryClient = useQueryClient()
const userPermissions = useUserPermissionsContext() const userPermissions = useUserPermissionsContext()
const {
mutate: updateChunk,
isPending: isSaving,
error: mutationError,
reset: resetMutation,
} = useUpdateChunk()
const [editedContent, setEditedContent] = useState(chunk?.content || '') const [editedContent, setEditedContent] = useState(chunk?.content || '')
const [isSaving, setIsSaving] = useState(false)
const [isNavigating, setIsNavigating] = useState(false) const [isNavigating, setIsNavigating] = useState(false)
const [error, setError] = useState<string | null>(null)
const [showUnsavedChangesAlert, setShowUnsavedChangesAlert] = useState(false) const [showUnsavedChangesAlert, setShowUnsavedChangesAlert] = useState(false)
const [pendingNavigation, setPendingNavigation] = useState<(() => void) | null>(null) const [pendingNavigation, setPendingNavigation] = useState<(() => void) | null>(null)
const [tokenizerOn, setTokenizerOn] = useState(false) const [tokenizerOn, setTokenizerOn] = useState(false)
const textareaRef = useRef<HTMLTextAreaElement>(null) const textareaRef = useRef<HTMLTextAreaElement>(null)
const error = mutationError?.message ?? null
const hasUnsavedChanges = editedContent !== (chunk?.content || '') const hasUnsavedChanges = editedContent !== (chunk?.content || '')
const tokenStrings = useMemo(() => { const tokenStrings = useMemo(() => {
@@ -102,44 +106,15 @@ export function EditChunkModal({
const canNavigatePrev = currentChunkIndex > 0 || currentPage > 1 const canNavigatePrev = currentChunkIndex > 0 || currentPage > 1
const canNavigateNext = currentChunkIndex < allChunks.length - 1 || currentPage < totalPages const canNavigateNext = currentChunkIndex < allChunks.length - 1 || currentPage < totalPages
const handleSaveContent = async () => { const handleSaveContent = () => {
if (!chunk || !document) return if (!chunk || !document) return
try { updateChunk({
setIsSaving(true) knowledgeBaseId,
setError(null) documentId: document.id,
chunkId: chunk.id,
const response = await fetch( content: editedContent,
`/api/knowledge/${knowledgeBaseId}/documents/${document.id}/chunks/${chunk.id}`, })
{
method: 'PUT',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
content: editedContent,
}),
}
)
if (!response.ok) {
const result = await response.json()
throw new Error(result.error || 'Failed to update chunk')
}
const result = await response.json()
if (result.success) {
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.detail(knowledgeBaseId),
})
}
} catch (err) {
logger.error('Error updating chunk:', err)
setError(err instanceof Error ? err.message : 'An error occurred')
} finally {
setIsSaving(false)
}
} }
const navigateToChunk = async (direction: 'prev' | 'next') => { const navigateToChunk = async (direction: 'prev' | 'next') => {
@@ -165,7 +140,6 @@ export function EditChunkModal({
} }
} catch (err) { } catch (err) {
logger.error(`Error navigating ${direction}:`, err) logger.error(`Error navigating ${direction}:`, err)
setError(`Failed to navigate to ${direction === 'prev' ? 'previous' : 'next'} chunk`)
} finally { } finally {
setIsNavigating(false) setIsNavigating(false)
} }
@@ -185,6 +159,7 @@ export function EditChunkModal({
setPendingNavigation(null) setPendingNavigation(null)
setShowUnsavedChangesAlert(true) setShowUnsavedChangesAlert(true)
} else { } else {
resetMutation()
onClose() onClose()
} }
} }
@@ -195,6 +170,7 @@ export function EditChunkModal({
void pendingNavigation() void pendingNavigation()
setPendingNavigation(null) setPendingNavigation(null)
} else { } else {
resetMutation()
onClose() onClose()
} }
} }

View File

@@ -48,7 +48,13 @@ import { ActionBar } from '@/app/workspace/[workspaceId]/knowledge/[id]/componen
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { useContextMenu } from '@/app/workspace/[workspaceId]/w/components/sidebar/hooks' import { useContextMenu } from '@/app/workspace/[workspaceId]/w/components/sidebar/hooks'
import { useDocument, useDocumentChunks, useKnowledgeBase } from '@/hooks/kb/use-knowledge' import { useDocument, useDocumentChunks, useKnowledgeBase } from '@/hooks/kb/use-knowledge'
import { knowledgeKeys, useDocumentChunkSearchQuery } from '@/hooks/queries/knowledge' import {
knowledgeKeys,
useBulkChunkOperation,
useDeleteDocument,
useDocumentChunkSearchQuery,
useUpdateChunk,
} from '@/hooks/queries/knowledge'
const logger = createLogger('Document') const logger = createLogger('Document')
@@ -403,11 +409,13 @@ export function Document({
const [isCreateChunkModalOpen, setIsCreateChunkModalOpen] = useState(false) const [isCreateChunkModalOpen, setIsCreateChunkModalOpen] = useState(false)
const [chunkToDelete, setChunkToDelete] = useState<ChunkData | null>(null) const [chunkToDelete, setChunkToDelete] = useState<ChunkData | null>(null)
const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false) const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false)
const [isBulkOperating, setIsBulkOperating] = useState(false)
const [showDeleteDocumentDialog, setShowDeleteDocumentDialog] = useState(false) const [showDeleteDocumentDialog, setShowDeleteDocumentDialog] = useState(false)
const [isDeletingDocument, setIsDeletingDocument] = useState(false)
const [contextMenuChunk, setContextMenuChunk] = useState<ChunkData | null>(null) const [contextMenuChunk, setContextMenuChunk] = useState<ChunkData | null>(null)
const { mutate: updateChunkMutation } = useUpdateChunk()
const { mutate: deleteDocumentMutation, isPending: isDeletingDocument } = useDeleteDocument()
const { mutate: bulkChunkMutation, isPending: isBulkOperating } = useBulkChunkOperation()
const { const {
isOpen: isContextMenuOpen, isOpen: isContextMenuOpen,
position: contextMenuPosition, position: contextMenuPosition,
@@ -440,36 +448,23 @@ export function Document({
setSelectedChunk(null) setSelectedChunk(null)
} }
const handleToggleEnabled = async (chunkId: string) => { const handleToggleEnabled = (chunkId: string) => {
const chunk = displayChunks.find((c) => c.id === chunkId) const chunk = displayChunks.find((c) => c.id === chunkId)
if (!chunk) return if (!chunk) return
try { updateChunkMutation(
const response = await fetch( {
`/api/knowledge/${knowledgeBaseId}/documents/${documentId}/chunks/${chunkId}`, knowledgeBaseId,
{ documentId,
method: 'PUT', chunkId,
headers: { enabled: !chunk.enabled,
'Content-Type': 'application/json', },
}, {
body: JSON.stringify({ onSuccess: () => {
enabled: !chunk.enabled, updateChunk(chunkId, { enabled: !chunk.enabled })
}), },
}
)
if (!response.ok) {
throw new Error('Failed to update chunk')
} }
)
const result = await response.json()
if (result.success) {
updateChunk(chunkId, { enabled: !chunk.enabled })
}
} catch (err) {
logger.error('Error updating chunk:', err)
}
} }
const handleDeleteChunk = (chunkId: string) => { const handleDeleteChunk = (chunkId: string) => {
@@ -515,107 +510,65 @@ export function Document({
/** /**
* Handles deleting the document * Handles deleting the document
*/ */
const handleDeleteDocument = async () => { const handleDeleteDocument = () => {
if (!documentData) return if (!documentData) return
try { deleteDocumentMutation(
setIsDeletingDocument(true) { knowledgeBaseId, documentId },
{
const response = await fetch(`/api/knowledge/${knowledgeBaseId}/documents/${documentId}`, { onSuccess: () => {
method: 'DELETE', router.push(`/workspace/${workspaceId}/knowledge/${knowledgeBaseId}`)
}) },
if (!response.ok) {
throw new Error('Failed to delete document')
} }
)
const result = await response.json()
if (result.success) {
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.detail(knowledgeBaseId),
})
router.push(`/workspace/${workspaceId}/knowledge/${knowledgeBaseId}`)
} else {
throw new Error(result.error || 'Failed to delete document')
}
} catch (err) {
logger.error('Error deleting document:', err)
setIsDeletingDocument(false)
}
} }
const performBulkChunkOperation = async ( const performBulkChunkOperation = (
operation: 'enable' | 'disable' | 'delete', operation: 'enable' | 'disable' | 'delete',
chunks: ChunkData[] chunks: ChunkData[]
) => { ) => {
if (chunks.length === 0) return if (chunks.length === 0) return
try { bulkChunkMutation(
setIsBulkOperating(true) {
knowledgeBaseId,
const response = await fetch( documentId,
`/api/knowledge/${knowledgeBaseId}/documents/${documentId}/chunks`, operation,
{ chunkIds: chunks.map((chunk) => chunk.id),
method: 'PATCH', },
headers: { {
'Content-Type': 'application/json', onSuccess: (result) => {
}, if (operation === 'delete' || result.errorCount > 0) {
body: JSON.stringify({ refreshChunks()
operation, } else {
chunkIds: chunks.map((chunk) => chunk.id), chunks.forEach((chunk) => {
}), updateChunk(chunk.id, { enabled: operation === 'enable' })
} })
) }
logger.info(`Successfully ${operation}d ${result.successCount} chunks`)
if (!response.ok) { setSelectedChunks(new Set())
throw new Error(`Failed to ${operation} chunks`) },
} }
)
const result = await response.json()
if (result.success) {
if (operation === 'delete') {
await refreshChunks()
} else {
result.data.results.forEach((opResult: any) => {
if (opResult.operation === operation) {
opResult.chunkIds.forEach((chunkId: string) => {
updateChunk(chunkId, { enabled: operation === 'enable' })
})
}
})
}
logger.info(`Successfully ${operation}d ${result.data.successCount} chunks`)
}
setSelectedChunks(new Set())
} catch (err) {
logger.error(`Error ${operation}ing chunks:`, err)
} finally {
setIsBulkOperating(false)
}
} }
const handleBulkEnable = async () => { const handleBulkEnable = () => {
const chunksToEnable = displayChunks.filter( const chunksToEnable = displayChunks.filter(
(chunk) => selectedChunks.has(chunk.id) && !chunk.enabled (chunk) => selectedChunks.has(chunk.id) && !chunk.enabled
) )
await performBulkChunkOperation('enable', chunksToEnable) performBulkChunkOperation('enable', chunksToEnable)
} }
const handleBulkDisable = async () => { const handleBulkDisable = () => {
const chunksToDisable = displayChunks.filter( const chunksToDisable = displayChunks.filter(
(chunk) => selectedChunks.has(chunk.id) && chunk.enabled (chunk) => selectedChunks.has(chunk.id) && chunk.enabled
) )
await performBulkChunkOperation('disable', chunksToDisable) performBulkChunkOperation('disable', chunksToDisable)
} }
const handleBulkDelete = async () => { const handleBulkDelete = () => {
const chunksToDelete = displayChunks.filter((chunk) => selectedChunks.has(chunk.id)) const chunksToDelete = displayChunks.filter((chunk) => selectedChunks.has(chunk.id))
await performBulkChunkOperation('delete', chunksToDelete) performBulkChunkOperation('delete', chunksToDelete)
} }
const selectedChunksList = displayChunks.filter((chunk) => selectedChunks.has(chunk.id)) const selectedChunksList = displayChunks.filter((chunk) => selectedChunks.has(chunk.id))

View File

@@ -2,7 +2,6 @@
import { useCallback, useEffect, useRef, useState } from 'react' import { useCallback, useEffect, useRef, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query'
import { format } from 'date-fns' import { format } from 'date-fns'
import { import {
AlertCircle, AlertCircle,
@@ -62,7 +61,12 @@ import {
type TagDefinition, type TagDefinition,
useKnowledgeBaseTagDefinitions, useKnowledgeBaseTagDefinitions,
} from '@/hooks/kb/use-knowledge-base-tag-definitions' } from '@/hooks/kb/use-knowledge-base-tag-definitions'
import { knowledgeKeys } from '@/hooks/queries/knowledge' import {
useBulkDocumentOperation,
useDeleteDocument,
useDeleteKnowledgeBase,
useUpdateDocument,
} from '@/hooks/queries/knowledge'
const logger = createLogger('KnowledgeBase') const logger = createLogger('KnowledgeBase')
@@ -407,12 +411,17 @@ export function KnowledgeBase({
id, id,
knowledgeBaseName: passedKnowledgeBaseName, knowledgeBaseName: passedKnowledgeBaseName,
}: KnowledgeBaseProps) { }: KnowledgeBaseProps) {
const queryClient = useQueryClient()
const params = useParams() const params = useParams()
const workspaceId = params.workspaceId as string const workspaceId = params.workspaceId as string
const { removeKnowledgeBase } = useKnowledgeBasesList(workspaceId, { enabled: false }) const { removeKnowledgeBase } = useKnowledgeBasesList(workspaceId, { enabled: false })
const userPermissions = useUserPermissionsContext() const userPermissions = useUserPermissionsContext()
const { mutate: updateDocumentMutation } = useUpdateDocument()
const { mutate: deleteDocumentMutation } = useDeleteDocument()
const { mutate: deleteKnowledgeBaseMutation, isPending: isDeleting } =
useDeleteKnowledgeBase(workspaceId)
const { mutate: bulkDocumentMutation, isPending: isBulkOperating } = useBulkDocumentOperation()
const [searchQuery, setSearchQuery] = useState('') const [searchQuery, setSearchQuery] = useState('')
const [showTagsModal, setShowTagsModal] = useState(false) const [showTagsModal, setShowTagsModal] = useState(false)
@@ -427,8 +436,6 @@ export function KnowledgeBase({
const [selectedDocuments, setSelectedDocuments] = useState<Set<string>>(new Set()) const [selectedDocuments, setSelectedDocuments] = useState<Set<string>>(new Set())
const [showDeleteDialog, setShowDeleteDialog] = useState(false) const [showDeleteDialog, setShowDeleteDialog] = useState(false)
const [showAddDocumentsModal, setShowAddDocumentsModal] = useState(false) const [showAddDocumentsModal, setShowAddDocumentsModal] = useState(false)
const [isDeleting, setIsDeleting] = useState(false)
const [isBulkOperating, setIsBulkOperating] = useState(false)
const [showDeleteDocumentModal, setShowDeleteDocumentModal] = useState(false) const [showDeleteDocumentModal, setShowDeleteDocumentModal] = useState(false)
const [documentToDelete, setDocumentToDelete] = useState<string | null>(null) const [documentToDelete, setDocumentToDelete] = useState<string | null>(null)
const [showBulkDeleteModal, setShowBulkDeleteModal] = useState(false) const [showBulkDeleteModal, setShowBulkDeleteModal] = useState(false)
@@ -550,7 +557,7 @@ export function KnowledgeBase({
/** /**
* Checks for documents with stale processing states and marks them as failed * Checks for documents with stale processing states and marks them as failed
*/ */
const checkForDeadProcesses = async () => { const checkForDeadProcesses = () => {
const now = new Date() const now = new Date()
const DEAD_PROCESS_THRESHOLD_MS = 600 * 1000 // 10 minutes const DEAD_PROCESS_THRESHOLD_MS = 600 * 1000 // 10 minutes
@@ -567,116 +574,79 @@ export function KnowledgeBase({
logger.warn(`Found ${staleDocuments.length} documents with dead processes`) logger.warn(`Found ${staleDocuments.length} documents with dead processes`)
const markFailedPromises = staleDocuments.map(async (doc) => { staleDocuments.forEach((doc) => {
try { updateDocumentMutation(
const response = await fetch(`/api/knowledge/${id}/documents/${doc.id}`, { {
method: 'PUT', knowledgeBaseId: id,
headers: { documentId: doc.id,
'Content-Type': 'application/json', updates: { markFailedDueToTimeout: true },
},
{
onSuccess: () => {
logger.info(`Successfully marked dead process as failed for document: ${doc.filename}`)
}, },
body: JSON.stringify({
markFailedDueToTimeout: true,
}),
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({ error: 'Unknown error' }))
logger.error(`Failed to mark document ${doc.id} as failed: ${errorData.error}`)
return
} }
)
const result = await response.json()
if (result.success) {
logger.info(`Successfully marked dead process as failed for document: ${doc.filename}`)
}
} catch (error) {
logger.error(`Error marking document ${doc.id} as failed:`, error)
}
}) })
await Promise.allSettled(markFailedPromises)
} }
const handleToggleEnabled = async (docId: string) => { const handleToggleEnabled = (docId: string) => {
const document = documents.find((doc) => doc.id === docId) const document = documents.find((doc) => doc.id === docId)
if (!document) return if (!document) return
const newEnabled = !document.enabled const newEnabled = !document.enabled
// Optimistic update
updateDocument(docId, { enabled: newEnabled }) updateDocument(docId, { enabled: newEnabled })
try { updateDocumentMutation(
const response = await fetch(`/api/knowledge/${id}/documents/${docId}`, { {
method: 'PUT', knowledgeBaseId: id,
headers: { documentId: docId,
'Content-Type': 'application/json', updates: { enabled: newEnabled },
},
{
onError: () => {
// Rollback on error
updateDocument(docId, { enabled: !newEnabled })
}, },
body: JSON.stringify({
enabled: newEnabled,
}),
})
if (!response.ok) {
throw new Error('Failed to update document')
} }
)
const result = await response.json()
if (!result.success) {
updateDocument(docId, { enabled: !newEnabled })
}
} catch (err) {
updateDocument(docId, { enabled: !newEnabled })
logger.error('Error updating document:', err)
}
} }
/** /**
* Handles retrying a failed document processing * Handles retrying a failed document processing
*/ */
const handleRetryDocument = async (docId: string) => { const handleRetryDocument = (docId: string) => {
try { // Optimistic update
updateDocument(docId, { updateDocument(docId, {
processingStatus: 'pending', processingStatus: 'pending',
processingError: null, processingError: null,
processingStartedAt: null, processingStartedAt: null,
processingCompletedAt: null, processingCompletedAt: null,
}) })
const response = await fetch(`/api/knowledge/${id}/documents/${docId}`, { updateDocumentMutation(
method: 'PUT', {
headers: { knowledgeBaseId: id,
'Content-Type': 'application/json', documentId: docId,
updates: { retryProcessing: true },
},
{
onSuccess: () => {
refreshDocuments()
logger.info(`Document retry initiated successfully for: ${docId}`)
},
onError: (err) => {
logger.error('Error retrying document:', err)
updateDocument(docId, {
processingStatus: 'failed',
processingError:
err instanceof Error ? err.message : 'Failed to retry document processing',
})
}, },
body: JSON.stringify({
retryProcessing: true,
}),
})
if (!response.ok) {
throw new Error('Failed to retry document processing')
} }
)
const result = await response.json()
if (!result.success) {
throw new Error(result.error || 'Failed to retry document processing')
}
await refreshDocuments()
logger.info(`Document retry initiated successfully for: ${docId}`)
} catch (err) {
logger.error('Error retrying document:', err)
const currentDoc = documents.find((doc) => doc.id === docId)
if (currentDoc) {
updateDocument(docId, {
processingStatus: 'failed',
processingError:
err instanceof Error ? err.message : 'Failed to retry document processing',
})
}
}
} }
/** /**
@@ -694,43 +664,32 @@ export function KnowledgeBase({
const currentDoc = documents.find((doc) => doc.id === documentId) const currentDoc = documents.find((doc) => doc.id === documentId)
const previousName = currentDoc?.filename const previousName = currentDoc?.filename
// Optimistic update
updateDocument(documentId, { filename: newName }) updateDocument(documentId, { filename: newName })
queryClient.setQueryData<DocumentData>(knowledgeKeys.document(id, documentId), (previous) =>
previous ? { ...previous, filename: newName } : previous
)
try { return new Promise<void>((resolve, reject) => {
const response = await fetch(`/api/knowledge/${id}/documents/${documentId}`, { updateDocumentMutation(
method: 'PUT', {
headers: { knowledgeBaseId: id,
'Content-Type': 'application/json', documentId,
updates: { filename: newName },
}, },
body: JSON.stringify({ filename: newName }), {
}) onSuccess: () => {
logger.info(`Document renamed: ${documentId}`)
if (!response.ok) { resolve()
const result = await response.json() },
throw new Error(result.error || 'Failed to rename document') onError: (err) => {
} // Rollback on error
if (previousName !== undefined) {
const result = await response.json() updateDocument(documentId, { filename: previousName })
}
if (!result.success) { logger.error('Error renaming document:', err)
throw new Error(result.error || 'Failed to rename document') reject(err)
} },
}
logger.info(`Document renamed: ${documentId}`) )
} catch (err) { })
if (previousName !== undefined) {
updateDocument(documentId, { filename: previousName })
queryClient.setQueryData<DocumentData>(
knowledgeKeys.document(id, documentId),
(previous) => (previous ? { ...previous, filename: previousName } : previous)
)
}
logger.error('Error renaming document:', err)
throw err
}
} }
/** /**
@@ -744,35 +703,26 @@ export function KnowledgeBase({
/** /**
* Confirms and executes the deletion of a single document * Confirms and executes the deletion of a single document
*/ */
const confirmDeleteDocument = async () => { const confirmDeleteDocument = () => {
if (!documentToDelete) return if (!documentToDelete) return
try { deleteDocumentMutation(
const response = await fetch(`/api/knowledge/${id}/documents/${documentToDelete}`, { { knowledgeBaseId: id, documentId: documentToDelete },
method: 'DELETE', {
}) onSuccess: () => {
refreshDocuments()
if (!response.ok) { setSelectedDocuments((prev) => {
throw new Error('Failed to delete document') const newSet = new Set(prev)
newSet.delete(documentToDelete)
return newSet
})
},
onSettled: () => {
setShowDeleteDocumentModal(false)
setDocumentToDelete(null)
},
} }
)
const result = await response.json()
if (result.success) {
refreshDocuments()
setSelectedDocuments((prev) => {
const newSet = new Set(prev)
newSet.delete(documentToDelete)
return newSet
})
}
} catch (err) {
logger.error('Error deleting document:', err)
} finally {
setShowDeleteDocumentModal(false)
setDocumentToDelete(null)
}
} }
/** /**
@@ -818,32 +768,18 @@ export function KnowledgeBase({
/** /**
* Handles deleting the entire knowledge base * Handles deleting the entire knowledge base
*/ */
const handleDeleteKnowledgeBase = async () => { const handleDeleteKnowledgeBase = () => {
if (!knowledgeBase) return if (!knowledgeBase) return
try { deleteKnowledgeBaseMutation(
setIsDeleting(true) { knowledgeBaseId: id },
{
const response = await fetch(`/api/knowledge/${id}`, { onSuccess: () => {
method: 'DELETE', removeKnowledgeBase(id)
}) router.push(`/workspace/${workspaceId}/knowledge`)
},
if (!response.ok) {
throw new Error('Failed to delete knowledge base')
} }
)
const result = await response.json()
if (result.success) {
removeKnowledgeBase(id)
router.push(`/workspace/${workspaceId}/knowledge`)
} else {
throw new Error(result.error || 'Failed to delete knowledge base')
}
} catch (err) {
logger.error('Error deleting knowledge base:', err)
setIsDeleting(false)
}
} }
/** /**
@@ -856,93 +792,57 @@ export function KnowledgeBase({
/** /**
* Handles bulk enabling of selected documents * Handles bulk enabling of selected documents
*/ */
const handleBulkEnable = async () => { const handleBulkEnable = () => {
const documentsToEnable = documents.filter( const documentsToEnable = documents.filter(
(doc) => selectedDocuments.has(doc.id) && !doc.enabled (doc) => selectedDocuments.has(doc.id) && !doc.enabled
) )
if (documentsToEnable.length === 0) return if (documentsToEnable.length === 0) return
try { bulkDocumentMutation(
setIsBulkOperating(true) {
knowledgeBaseId: id,
const response = await fetch(`/api/knowledge/${id}/documents`, { operation: 'enable',
method: 'PATCH', documentIds: documentsToEnable.map((doc) => doc.id),
headers: { },
'Content-Type': 'application/json', {
onSuccess: (result) => {
result.updatedDocuments?.forEach((updatedDoc) => {
updateDocument(updatedDoc.id, { enabled: updatedDoc.enabled })
})
logger.info(`Successfully enabled ${result.successCount} documents`)
setSelectedDocuments(new Set())
}, },
body: JSON.stringify({
operation: 'enable',
documentIds: documentsToEnable.map((doc) => doc.id),
}),
})
if (!response.ok) {
throw new Error('Failed to enable documents')
} }
)
const result = await response.json()
if (result.success) {
result.data.updatedDocuments.forEach((updatedDoc: { id: string; enabled: boolean }) => {
updateDocument(updatedDoc.id, { enabled: updatedDoc.enabled })
})
logger.info(`Successfully enabled ${result.data.successCount} documents`)
}
setSelectedDocuments(new Set())
} catch (err) {
logger.error('Error enabling documents:', err)
} finally {
setIsBulkOperating(false)
}
} }
/** /**
* Handles bulk disabling of selected documents * Handles bulk disabling of selected documents
*/ */
const handleBulkDisable = async () => { const handleBulkDisable = () => {
const documentsToDisable = documents.filter( const documentsToDisable = documents.filter(
(doc) => selectedDocuments.has(doc.id) && doc.enabled (doc) => selectedDocuments.has(doc.id) && doc.enabled
) )
if (documentsToDisable.length === 0) return if (documentsToDisable.length === 0) return
try { bulkDocumentMutation(
setIsBulkOperating(true) {
knowledgeBaseId: id,
const response = await fetch(`/api/knowledge/${id}/documents`, { operation: 'disable',
method: 'PATCH', documentIds: documentsToDisable.map((doc) => doc.id),
headers: { },
'Content-Type': 'application/json', {
onSuccess: (result) => {
result.updatedDocuments?.forEach((updatedDoc) => {
updateDocument(updatedDoc.id, { enabled: updatedDoc.enabled })
})
logger.info(`Successfully disabled ${result.successCount} documents`)
setSelectedDocuments(new Set())
}, },
body: JSON.stringify({
operation: 'disable',
documentIds: documentsToDisable.map((doc) => doc.id),
}),
})
if (!response.ok) {
throw new Error('Failed to disable documents')
} }
)
const result = await response.json()
if (result.success) {
result.data.updatedDocuments.forEach((updatedDoc: { id: string; enabled: boolean }) => {
updateDocument(updatedDoc.id, { enabled: updatedDoc.enabled })
})
logger.info(`Successfully disabled ${result.data.successCount} documents`)
}
setSelectedDocuments(new Set())
} catch (err) {
logger.error('Error disabling documents:', err)
} finally {
setIsBulkOperating(false)
}
} }
/** /**
@@ -956,44 +856,28 @@ export function KnowledgeBase({
/** /**
* Confirms and executes the bulk deletion of selected documents * Confirms and executes the bulk deletion of selected documents
*/ */
const confirmBulkDelete = async () => { const confirmBulkDelete = () => {
const documentsToDelete = documents.filter((doc) => selectedDocuments.has(doc.id)) const documentsToDelete = documents.filter((doc) => selectedDocuments.has(doc.id))
if (documentsToDelete.length === 0) return if (documentsToDelete.length === 0) return
try { bulkDocumentMutation(
setIsBulkOperating(true) {
knowledgeBaseId: id,
const response = await fetch(`/api/knowledge/${id}/documents`, { operation: 'delete',
method: 'PATCH', documentIds: documentsToDelete.map((doc) => doc.id),
headers: { },
'Content-Type': 'application/json', {
onSuccess: (result) => {
logger.info(`Successfully deleted ${result.successCount} documents`)
refreshDocuments()
setSelectedDocuments(new Set())
},
onSettled: () => {
setShowBulkDeleteModal(false)
}, },
body: JSON.stringify({
operation: 'delete',
documentIds: documentsToDelete.map((doc) => doc.id),
}),
})
if (!response.ok) {
throw new Error('Failed to delete documents')
} }
)
const result = await response.json()
if (result.success) {
logger.info(`Successfully deleted ${result.data.successCount} documents`)
}
await refreshDocuments()
setSelectedDocuments(new Set())
} catch (err) {
logger.error('Error deleting documents:', err)
} finally {
setIsBulkOperating(false)
setShowBulkDeleteModal(false)
}
} }
const selectedDocumentsList = documents.filter((doc) => selectedDocuments.has(doc.id)) const selectedDocumentsList = documents.filter((doc) => selectedDocuments.has(doc.id))

View File

@@ -22,10 +22,10 @@ import {
type TagDefinition, type TagDefinition,
useKnowledgeBaseTagDefinitions, useKnowledgeBaseTagDefinitions,
} from '@/hooks/kb/use-knowledge-base-tag-definitions' } from '@/hooks/kb/use-knowledge-base-tag-definitions'
import { useCreateTagDefinition, useDeleteTagDefinition } from '@/hooks/queries/knowledge'
const logger = createLogger('BaseTagsModal') const logger = createLogger('BaseTagsModal')
/** Field type display labels */
const FIELD_TYPE_LABELS: Record<string, string> = { const FIELD_TYPE_LABELS: Record<string, string> = {
text: 'Text', text: 'Text',
number: 'Number', number: 'Number',
@@ -45,7 +45,6 @@ interface DocumentListProps {
totalCount: number totalCount: number
} }
/** Displays a list of documents affected by tag operations */
function DocumentList({ documents, totalCount }: DocumentListProps) { function DocumentList({ documents, totalCount }: DocumentListProps) {
const displayLimit = 5 const displayLimit = 5
const hasMore = totalCount > displayLimit const hasMore = totalCount > displayLimit
@@ -95,13 +94,14 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
const { tagDefinitions: kbTagDefinitions, fetchTagDefinitions: refreshTagDefinitions } = const { tagDefinitions: kbTagDefinitions, fetchTagDefinitions: refreshTagDefinitions } =
useKnowledgeBaseTagDefinitions(knowledgeBaseId) useKnowledgeBaseTagDefinitions(knowledgeBaseId)
const createTagMutation = useCreateTagDefinition()
const deleteTagMutation = useDeleteTagDefinition()
const [deleteTagDialogOpen, setDeleteTagDialogOpen] = useState(false) const [deleteTagDialogOpen, setDeleteTagDialogOpen] = useState(false)
const [selectedTag, setSelectedTag] = useState<TagDefinition | null>(null) const [selectedTag, setSelectedTag] = useState<TagDefinition | null>(null)
const [viewDocumentsDialogOpen, setViewDocumentsDialogOpen] = useState(false) const [viewDocumentsDialogOpen, setViewDocumentsDialogOpen] = useState(false)
const [isDeletingTag, setIsDeletingTag] = useState(false)
const [tagUsageData, setTagUsageData] = useState<TagUsageData[]>([]) const [tagUsageData, setTagUsageData] = useState<TagUsageData[]>([])
const [isCreatingTag, setIsCreatingTag] = useState(false) const [isCreatingTag, setIsCreatingTag] = useState(false)
const [isSavingTag, setIsSavingTag] = useState(false)
const [createTagForm, setCreateTagForm] = useState({ const [createTagForm, setCreateTagForm] = useState({
displayName: '', displayName: '',
fieldType: 'text', fieldType: 'text',
@@ -177,13 +177,12 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
} }
const tagNameConflict = const tagNameConflict =
isCreatingTag && !isSavingTag && hasTagNameConflict(createTagForm.displayName) isCreatingTag && !createTagMutation.isPending && hasTagNameConflict(createTagForm.displayName)
const canSaveTag = () => { const canSaveTag = () => {
return createTagForm.displayName.trim() && !hasTagNameConflict(createTagForm.displayName) return createTagForm.displayName.trim() && !hasTagNameConflict(createTagForm.displayName)
} }
/** Get slot usage counts per field type */
const getSlotUsageByFieldType = (fieldType: string): { used: number; max: number } => { const getSlotUsageByFieldType = (fieldType: string): { used: number; max: number } => {
const config = TAG_SLOT_CONFIG[fieldType as keyof typeof TAG_SLOT_CONFIG] const config = TAG_SLOT_CONFIG[fieldType as keyof typeof TAG_SLOT_CONFIG]
if (!config) return { used: 0, max: 0 } if (!config) return { used: 0, max: 0 }
@@ -191,13 +190,11 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
return { used, max: config.maxSlots } return { used, max: config.maxSlots }
} }
/** Check if a field type has available slots */
const hasAvailableSlots = (fieldType: string): boolean => { const hasAvailableSlots = (fieldType: string): boolean => {
const { used, max } = getSlotUsageByFieldType(fieldType) const { used, max } = getSlotUsageByFieldType(fieldType)
return used < max return used < max
} }
/** Field type options for Combobox */
const fieldTypeOptions: ComboboxOption[] = useMemo(() => { const fieldTypeOptions: ComboboxOption[] = useMemo(() => {
return SUPPORTED_FIELD_TYPES.filter((type) => hasAvailableSlots(type)).map((type) => { return SUPPORTED_FIELD_TYPES.filter((type) => hasAvailableSlots(type)).map((type) => {
const { used, max } = getSlotUsageByFieldType(type) const { used, max } = getSlotUsageByFieldType(type)
@@ -211,43 +208,17 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
const saveTagDefinition = async () => { const saveTagDefinition = async () => {
if (!canSaveTag()) return if (!canSaveTag()) return
setIsSavingTag(true)
try { try {
// Check if selected field type has available slots
if (!hasAvailableSlots(createTagForm.fieldType)) { if (!hasAvailableSlots(createTagForm.fieldType)) {
throw new Error(`No available slots for ${createTagForm.fieldType} type`) throw new Error(`No available slots for ${createTagForm.fieldType} type`)
} }
// Get the next available slot from the API await createTagMutation.mutateAsync({
const slotResponse = await fetch( knowledgeBaseId,
`/api/knowledge/${knowledgeBaseId}/next-available-slot?fieldType=${createTagForm.fieldType}`
)
if (!slotResponse.ok) {
throw new Error('Failed to get available slot')
}
const slotResult = await slotResponse.json()
if (!slotResult.success || !slotResult.data?.nextAvailableSlot) {
throw new Error('No available tag slots for this field type')
}
const newTagDefinition = {
tagSlot: slotResult.data.nextAvailableSlot,
displayName: createTagForm.displayName.trim(), displayName: createTagForm.displayName.trim(),
fieldType: createTagForm.fieldType, fieldType: createTagForm.fieldType,
}
const response = await fetch(`/api/knowledge/${knowledgeBaseId}/tag-definitions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(newTagDefinition),
}) })
if (!response.ok) {
throw new Error('Failed to create tag definition')
}
await Promise.all([refreshTagDefinitions(), fetchTagUsage()]) await Promise.all([refreshTagDefinitions(), fetchTagUsage()])
setCreateTagForm({ setCreateTagForm({
@@ -257,27 +228,17 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
setIsCreatingTag(false) setIsCreatingTag(false)
} catch (error) { } catch (error) {
logger.error('Error creating tag definition:', error) logger.error('Error creating tag definition:', error)
} finally {
setIsSavingTag(false)
} }
} }
const confirmDeleteTag = async () => { const confirmDeleteTag = async () => {
if (!selectedTag) return if (!selectedTag) return
setIsDeletingTag(true)
try { try {
const response = await fetch( await deleteTagMutation.mutateAsync({
`/api/knowledge/${knowledgeBaseId}/tag-definitions/${selectedTag.id}`, knowledgeBaseId,
{ tagDefinitionId: selectedTag.id,
method: 'DELETE', })
}
)
if (!response.ok) {
const errorText = await response.text()
throw new Error(`Failed to delete tag definition: ${response.status} ${errorText}`)
}
await Promise.all([refreshTagDefinitions(), fetchTagUsage()]) await Promise.all([refreshTagDefinitions(), fetchTagUsage()])
@@ -285,8 +246,6 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
setSelectedTag(null) setSelectedTag(null)
} catch (error) { } catch (error) {
logger.error('Error deleting tag definition:', error) logger.error('Error deleting tag definition:', error)
} finally {
setIsDeletingTag(false)
} }
} }
@@ -433,11 +392,11 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
className='flex-1' className='flex-1'
disabled={ disabled={
!canSaveTag() || !canSaveTag() ||
isSavingTag || createTagMutation.isPending ||
!hasAvailableSlots(createTagForm.fieldType) !hasAvailableSlots(createTagForm.fieldType)
} }
> >
{isSavingTag ? 'Creating...' : 'Create Tag'} {createTagMutation.isPending ? 'Creating...' : 'Create Tag'}
</Button> </Button>
</div> </div>
</div> </div>
@@ -481,13 +440,17 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
<ModalFooter> <ModalFooter>
<Button <Button
variant='default' variant='default'
disabled={isDeletingTag} disabled={deleteTagMutation.isPending}
onClick={() => setDeleteTagDialogOpen(false)} onClick={() => setDeleteTagDialogOpen(false)}
> >
Cancel Cancel
</Button> </Button>
<Button variant='destructive' onClick={confirmDeleteTag} disabled={isDeletingTag}> <Button
{isDeletingTag ? <>Deleting...</> : 'Delete Tag'} variant='destructive'
onClick={confirmDeleteTag}
disabled={deleteTagMutation.isPending}
>
{deleteTagMutation.isPending ? 'Deleting...' : 'Delete Tag'}
</Button> </Button>
</ModalFooter> </ModalFooter>
</ModalContent> </ModalContent>
@@ -499,7 +462,7 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
<ModalHeader>Documents using "{selectedTag?.displayName}"</ModalHeader> <ModalHeader>Documents using "{selectedTag?.displayName}"</ModalHeader>
<ModalBody> <ModalBody>
<div className='space-y-[8px]'> <div className='space-y-[8px]'>
<p className='text-[12px] text-[var(--text-tertiary)]'> <p className='text-[12px] text-[var(--text-secondary)]'>
{selectedTagUsage?.documentCount || 0} document {selectedTagUsage?.documentCount || 0} document
{selectedTagUsage?.documentCount !== 1 ? 's are' : ' is'} currently using this tag {selectedTagUsage?.documentCount !== 1 ? 's are' : ' is'} currently using this tag
definition. definition.
@@ -507,7 +470,7 @@ export function BaseTagsModal({ open, onOpenChange, knowledgeBaseId }: BaseTagsM
{selectedTagUsage?.documentCount === 0 ? ( {selectedTagUsage?.documentCount === 0 ? (
<div className='rounded-[6px] border p-[16px] text-center'> <div className='rounded-[6px] border p-[16px] text-center'>
<p className='text-[12px] text-[var(--text-tertiary)]'> <p className='text-[12px] text-[var(--text-secondary)]'>
This tag definition is not being used by any documents. You can safely delete it This tag definition is not being used by any documents. You can safely delete it
to free up the tag slot. to free up the tag slot.
</p> </p>

View File

@@ -3,7 +3,6 @@
import { useEffect, useRef, useState } from 'react' import { useEffect, useRef, useState } from 'react'
import { zodResolver } from '@hookform/resolvers/zod' import { zodResolver } from '@hookform/resolvers/zod'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query'
import { Loader2, RotateCcw, X } from 'lucide-react' import { Loader2, RotateCcw, X } from 'lucide-react'
import { useParams } from 'next/navigation' import { useParams } from 'next/navigation'
import { useForm } from 'react-hook-form' import { useForm } from 'react-hook-form'
@@ -23,7 +22,7 @@ import { cn } from '@/lib/core/utils/cn'
import { formatFileSize, validateKnowledgeBaseFile } from '@/lib/uploads/utils/file-utils' import { formatFileSize, validateKnowledgeBaseFile } from '@/lib/uploads/utils/file-utils'
import { ACCEPT_ATTRIBUTE } from '@/lib/uploads/utils/validation' import { ACCEPT_ATTRIBUTE } from '@/lib/uploads/utils/validation'
import { useKnowledgeUpload } from '@/app/workspace/[workspaceId]/knowledge/hooks/use-knowledge-upload' import { useKnowledgeUpload } from '@/app/workspace/[workspaceId]/knowledge/hooks/use-knowledge-upload'
import { knowledgeKeys } from '@/hooks/queries/knowledge' import { useCreateKnowledgeBase, useDeleteKnowledgeBase } from '@/hooks/queries/knowledge'
const logger = createLogger('CreateBaseModal') const logger = createLogger('CreateBaseModal')
@@ -82,10 +81,11 @@ interface SubmitStatus {
export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) { export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
const params = useParams() const params = useParams()
const workspaceId = params.workspaceId as string const workspaceId = params.workspaceId as string
const queryClient = useQueryClient()
const createKnowledgeBaseMutation = useCreateKnowledgeBase(workspaceId)
const deleteKnowledgeBaseMutation = useDeleteKnowledgeBase(workspaceId)
const fileInputRef = useRef<HTMLInputElement>(null) const fileInputRef = useRef<HTMLInputElement>(null)
const [isSubmitting, setIsSubmitting] = useState(false)
const [submitStatus, setSubmitStatus] = useState<SubmitStatus | null>(null) const [submitStatus, setSubmitStatus] = useState<SubmitStatus | null>(null)
const [files, setFiles] = useState<FileWithPreview[]>([]) const [files, setFiles] = useState<FileWithPreview[]>([])
const [fileError, setFileError] = useState<string | null>(null) const [fileError, setFileError] = useState<string | null>(null)
@@ -245,12 +245,14 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
}) })
} }
const isSubmitting =
createKnowledgeBaseMutation.isPending || deleteKnowledgeBaseMutation.isPending || isUploading
const onSubmit = async (data: FormValues) => { const onSubmit = async (data: FormValues) => {
setIsSubmitting(true)
setSubmitStatus(null) setSubmitStatus(null)
try { try {
const knowledgeBasePayload = { const newKnowledgeBase = await createKnowledgeBaseMutation.mutateAsync({
name: data.name, name: data.name,
description: data.description || undefined, description: data.description || undefined,
workspaceId: workspaceId, workspaceId: workspaceId,
@@ -259,29 +261,8 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
minSize: data.minChunkSize, minSize: data.minChunkSize,
overlap: data.overlapSize, overlap: data.overlapSize,
}, },
}
const response = await fetch('/api/knowledge', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(knowledgeBasePayload),
}) })
if (!response.ok) {
const errorData = await response.json()
throw new Error(errorData.error || 'Failed to create knowledge base')
}
const result = await response.json()
if (!result.success) {
throw new Error(result.error || 'Failed to create knowledge base')
}
const newKnowledgeBase = result.data
if (files.length > 0) { if (files.length > 0) {
try { try {
const uploadedFiles = await uploadFiles(files, newKnowledgeBase.id, { const uploadedFiles = await uploadFiles(files, newKnowledgeBase.id, {
@@ -293,15 +274,11 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
logger.info(`Successfully uploaded ${uploadedFiles.length} files`) logger.info(`Successfully uploaded ${uploadedFiles.length} files`)
logger.info(`Started processing ${uploadedFiles.length} documents in the background`) logger.info(`Started processing ${uploadedFiles.length} documents in the background`)
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.list(workspaceId),
})
} catch (uploadError) { } catch (uploadError) {
logger.error('File upload failed, deleting knowledge base:', uploadError) logger.error('File upload failed, deleting knowledge base:', uploadError)
try { try {
await fetch(`/api/knowledge/${newKnowledgeBase.id}`, { await deleteKnowledgeBaseMutation.mutateAsync({
method: 'DELETE', knowledgeBaseId: newKnowledgeBase.id,
}) })
logger.info(`Deleted orphaned knowledge base: ${newKnowledgeBase.id}`) logger.info(`Deleted orphaned knowledge base: ${newKnowledgeBase.id}`)
} catch (deleteError) { } catch (deleteError) {
@@ -309,10 +286,6 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
} }
throw uploadError throw uploadError
} }
} else {
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.list(workspaceId),
})
} }
files.forEach((file) => URL.revokeObjectURL(file.preview)) files.forEach((file) => URL.revokeObjectURL(file.preview))
@@ -325,8 +298,6 @@ export function CreateBaseModal({ open, onOpenChange }: CreateBaseModalProps) {
type: 'error', type: 'error',
message: error instanceof Error ? error.message : 'An unknown error occurred', message: error instanceof Error ? error.message : 'An unknown error occurred',
}) })
} finally {
setIsSubmitting(false)
} }
} }

View File

@@ -2,7 +2,6 @@
import { useEffect, useState } from 'react' import { useEffect, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useQueryClient } from '@tanstack/react-query'
import { AlertTriangle, ChevronDown, LibraryBig, MoreHorizontal } from 'lucide-react' import { AlertTriangle, ChevronDown, LibraryBig, MoreHorizontal } from 'lucide-react'
import Link from 'next/link' import Link from 'next/link'
import { import {
@@ -15,7 +14,7 @@ import {
} from '@/components/emcn' } from '@/components/emcn'
import { Trash } from '@/components/emcn/icons/trash' import { Trash } from '@/components/emcn/icons/trash'
import { filterButtonClass } from '@/app/workspace/[workspaceId]/knowledge/components/constants' import { filterButtonClass } from '@/app/workspace/[workspaceId]/knowledge/components/constants'
import { knowledgeKeys } from '@/hooks/queries/knowledge' import { useUpdateKnowledgeBase } from '@/hooks/queries/knowledge'
const logger = createLogger('KnowledgeHeader') const logger = createLogger('KnowledgeHeader')
@@ -54,14 +53,13 @@ interface Workspace {
} }
export function KnowledgeHeader({ breadcrumbs, options }: KnowledgeHeaderProps) { export function KnowledgeHeader({ breadcrumbs, options }: KnowledgeHeaderProps) {
const queryClient = useQueryClient()
const [isActionsPopoverOpen, setIsActionsPopoverOpen] = useState(false) const [isActionsPopoverOpen, setIsActionsPopoverOpen] = useState(false)
const [isWorkspacePopoverOpen, setIsWorkspacePopoverOpen] = useState(false) const [isWorkspacePopoverOpen, setIsWorkspacePopoverOpen] = useState(false)
const [workspaces, setWorkspaces] = useState<Workspace[]>([]) const [workspaces, setWorkspaces] = useState<Workspace[]>([])
const [isLoadingWorkspaces, setIsLoadingWorkspaces] = useState(false) const [isLoadingWorkspaces, setIsLoadingWorkspaces] = useState(false)
const [isUpdatingWorkspace, setIsUpdatingWorkspace] = useState(false)
// Fetch available workspaces const updateKnowledgeBase = useUpdateKnowledgeBase()
useEffect(() => { useEffect(() => {
if (!options?.knowledgeBaseId) return if (!options?.knowledgeBaseId) return
@@ -76,7 +74,6 @@ export function KnowledgeHeader({ breadcrumbs, options }: KnowledgeHeaderProps)
const data = await response.json() const data = await response.json()
// Filter workspaces where user has write/admin permissions
const availableWorkspaces = data.workspaces const availableWorkspaces = data.workspaces
.filter((ws: any) => ws.permissions === 'write' || ws.permissions === 'admin') .filter((ws: any) => ws.permissions === 'write' || ws.permissions === 'admin')
.map((ws: any) => ({ .map((ws: any) => ({
@@ -97,47 +94,27 @@ export function KnowledgeHeader({ breadcrumbs, options }: KnowledgeHeaderProps)
}, [options?.knowledgeBaseId]) }, [options?.knowledgeBaseId])
const handleWorkspaceChange = async (workspaceId: string | null) => { const handleWorkspaceChange = async (workspaceId: string | null) => {
if (isUpdatingWorkspace || !options?.knowledgeBaseId) return if (updateKnowledgeBase.isPending || !options?.knowledgeBaseId) return
try { setIsWorkspacePopoverOpen(false)
setIsUpdatingWorkspace(true)
setIsWorkspacePopoverOpen(false)
const response = await fetch(`/api/knowledge/${options.knowledgeBaseId}`, { updateKnowledgeBase.mutate(
method: 'PUT', {
headers: { knowledgeBaseId: options.knowledgeBaseId,
'Content-Type': 'application/json', updates: { workspaceId },
},
{
onSuccess: () => {
logger.info(
`Knowledge base workspace updated: ${options.knowledgeBaseId} -> ${workspaceId}`
)
options.onWorkspaceChange?.(workspaceId)
},
onError: (err) => {
logger.error('Error updating workspace:', err)
}, },
body: JSON.stringify({
workspaceId,
}),
})
if (!response.ok) {
const result = await response.json()
throw new Error(result.error || 'Failed to update workspace')
} }
)
const result = await response.json()
if (result.success) {
logger.info(
`Knowledge base workspace updated: ${options.knowledgeBaseId} -> ${workspaceId}`
)
await queryClient.invalidateQueries({
queryKey: knowledgeKeys.detail(options.knowledgeBaseId),
})
await options.onWorkspaceChange?.(workspaceId)
} else {
throw new Error(result.error || 'Failed to update workspace')
}
} catch (err) {
logger.error('Error updating workspace:', err)
} finally {
setIsUpdatingWorkspace(false)
}
} }
const currentWorkspace = workspaces.find((ws) => ws.id === options?.currentWorkspaceId) const currentWorkspace = workspaces.find((ws) => ws.id === options?.currentWorkspaceId)
@@ -147,7 +124,6 @@ export function KnowledgeHeader({ breadcrumbs, options }: KnowledgeHeaderProps)
<div className={HEADER_STYLES.container}> <div className={HEADER_STYLES.container}>
<div className={HEADER_STYLES.breadcrumbs}> <div className={HEADER_STYLES.breadcrumbs}>
{breadcrumbs.map((breadcrumb, index) => { {breadcrumbs.map((breadcrumb, index) => {
// Use unique identifier when available, fallback to content-based key
const key = breadcrumb.id || `${breadcrumb.label}-${breadcrumb.href || index}` const key = breadcrumb.id || `${breadcrumb.label}-${breadcrumb.href || index}`
return ( return (
@@ -189,13 +165,13 @@ export function KnowledgeHeader({ breadcrumbs, options }: KnowledgeHeaderProps)
<PopoverTrigger asChild> <PopoverTrigger asChild>
<Button <Button
variant='outline' variant='outline'
disabled={isLoadingWorkspaces || isUpdatingWorkspace} disabled={isLoadingWorkspaces || updateKnowledgeBase.isPending}
className={filterButtonClass} className={filterButtonClass}
> >
<span className='truncate'> <span className='truncate'>
{isLoadingWorkspaces {isLoadingWorkspaces
? 'Loading...' ? 'Loading...'
: isUpdatingWorkspace : updateKnowledgeBase.isPending
? 'Updating...' ? 'Updating...'
: currentWorkspace?.name || 'No workspace'} : currentWorkspace?.name || 'No workspace'}
</span> </span>

View File

@@ -32,6 +32,7 @@ import {
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { useContextMenu } from '@/app/workspace/[workspaceId]/w/components/sidebar/hooks' import { useContextMenu } from '@/app/workspace/[workspaceId]/w/components/sidebar/hooks'
import { useKnowledgeBasesList } from '@/hooks/kb/use-knowledge' import { useKnowledgeBasesList } from '@/hooks/kb/use-knowledge'
import { useDeleteKnowledgeBase, useUpdateKnowledgeBase } from '@/hooks/queries/knowledge'
import { useDebounce } from '@/hooks/use-debounce' import { useDebounce } from '@/hooks/use-debounce'
const logger = createLogger('Knowledge') const logger = createLogger('Knowledge')
@@ -51,10 +52,12 @@ export function Knowledge() {
const params = useParams() const params = useParams()
const workspaceId = params.workspaceId as string const workspaceId = params.workspaceId as string
const { knowledgeBases, isLoading, error, removeKnowledgeBase, updateKnowledgeBase } = const { knowledgeBases, isLoading, error } = useKnowledgeBasesList(workspaceId)
useKnowledgeBasesList(workspaceId)
const userPermissions = useUserPermissionsContext() const userPermissions = useUserPermissionsContext()
const { mutateAsync: updateKnowledgeBaseMutation } = useUpdateKnowledgeBase(workspaceId)
const { mutateAsync: deleteKnowledgeBaseMutation } = useDeleteKnowledgeBase(workspaceId)
const [searchQuery, setSearchQuery] = useState('') const [searchQuery, setSearchQuery] = useState('')
const debouncedSearchQuery = useDebounce(searchQuery, 300) const debouncedSearchQuery = useDebounce(searchQuery, 300)
const [isCreateModalOpen, setIsCreateModalOpen] = useState(false) const [isCreateModalOpen, setIsCreateModalOpen] = useState(false)
@@ -112,29 +115,13 @@ export function Knowledge() {
*/ */
const handleUpdateKnowledgeBase = useCallback( const handleUpdateKnowledgeBase = useCallback(
async (id: string, name: string, description: string) => { async (id: string, name: string, description: string) => {
const response = await fetch(`/api/knowledge/${id}`, { await updateKnowledgeBaseMutation({
method: 'PUT', knowledgeBaseId: id,
headers: { updates: { name, description },
'Content-Type': 'application/json',
},
body: JSON.stringify({ name, description }),
}) })
logger.info(`Knowledge base updated: ${id}`)
if (!response.ok) {
const result = await response.json()
throw new Error(result.error || 'Failed to update knowledge base')
}
const result = await response.json()
if (result.success) {
logger.info(`Knowledge base updated: ${id}`)
updateKnowledgeBase(id, { name, description })
} else {
throw new Error(result.error || 'Failed to update knowledge base')
}
}, },
[updateKnowledgeBase] [updateKnowledgeBaseMutation]
) )
/** /**
@@ -142,25 +129,10 @@ export function Knowledge() {
*/ */
const handleDeleteKnowledgeBase = useCallback( const handleDeleteKnowledgeBase = useCallback(
async (id: string) => { async (id: string) => {
const response = await fetch(`/api/knowledge/${id}`, { await deleteKnowledgeBaseMutation({ knowledgeBaseId: id })
method: 'DELETE', logger.info(`Knowledge base deleted: ${id}`)
})
if (!response.ok) {
const result = await response.json()
throw new Error(result.error || 'Failed to delete knowledge base')
}
const result = await response.json()
if (result.success) {
logger.info(`Knowledge base deleted: ${id}`)
removeKnowledgeBase(id)
} else {
throw new Error(result.error || 'Failed to delete knowledge base')
}
}, },
[removeKnowledgeBase] [deleteKnowledgeBaseMutation]
) )
/** /**

View File

@@ -4,13 +4,13 @@ import type React from 'react'
import { createContext, useContext, useEffect, useMemo, useState } from 'react' import { createContext, useContext, useEffect, useMemo, useState } from 'react'
import { createLogger } from '@sim/logger' import { createLogger } from '@sim/logger'
import { useParams } from 'next/navigation' import { useParams } from 'next/navigation'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { useUserPermissions, type WorkspaceUserPermissions } from '@/hooks/use-user-permissions' import { useUserPermissions, type WorkspaceUserPermissions } from '@/hooks/use-user-permissions'
import { import {
useWorkspacePermissions, useWorkspacePermissions,
type WorkspacePermissions, type WorkspacePermissions,
} from '@/hooks/use-workspace-permissions' } from '@/hooks/use-workspace-permissions'
import { useNotificationStore } from '@/stores/notifications' import { useNotificationStore } from '@/stores/notifications'
import { useOperationQueueStore } from '@/stores/operation-queue/store'
const logger = createLogger('WorkspacePermissionsProvider') const logger = createLogger('WorkspacePermissionsProvider')
@@ -64,8 +64,8 @@ export function WorkspacePermissionsProvider({ children }: WorkspacePermissionsP
// Track whether we've already surfaced an offline notification to avoid duplicates // Track whether we've already surfaced an offline notification to avoid duplicates
const [hasShownOfflineNotification, setHasShownOfflineNotification] = useState(false) const [hasShownOfflineNotification, setHasShownOfflineNotification] = useState(false)
// Get operation error state from collaborative workflow // Get operation error state directly from the store (avoid full useCollaborativeWorkflow subscription)
const { hasOperationError } = useCollaborativeWorkflow() const hasOperationError = useOperationQueueStore((state) => state.hasOperationError)
const addNotification = useNotificationStore((state) => state.addNotification) const addNotification = useNotificationStore((state) => state.addNotification)

View File

@@ -48,17 +48,17 @@ export const ActionBar = memo(
collaborativeBatchToggleBlockEnabled, collaborativeBatchToggleBlockEnabled,
collaborativeBatchToggleBlockHandles, collaborativeBatchToggleBlockHandles,
} = useCollaborativeWorkflow() } = useCollaborativeWorkflow()
const { activeWorkflowId } = useWorkflowRegistry() const { activeWorkflowId, setPendingSelection } = useWorkflowRegistry()
const blocks = useWorkflowStore((state) => state.blocks)
const subBlockStore = useSubBlockStore()
const handleDuplicateBlock = useCallback(() => { const handleDuplicateBlock = useCallback(() => {
const blocks = useWorkflowStore.getState().blocks
const sourceBlock = blocks[blockId] const sourceBlock = blocks[blockId]
if (!sourceBlock) return if (!sourceBlock) return
const newId = crypto.randomUUID() const newId = crypto.randomUUID()
const newName = getUniqueBlockName(sourceBlock.name, blocks) const newName = getUniqueBlockName(sourceBlock.name, blocks)
const subBlockValues = subBlockStore.workflowValues[activeWorkflowId || '']?.[blockId] || {} const subBlockValues =
useSubBlockStore.getState().workflowValues[activeWorkflowId || '']?.[blockId] || {}
const { block, subBlockValues: filteredValues } = prepareDuplicateBlockState({ const { block, subBlockValues: filteredValues } = prepareDuplicateBlockState({
sourceBlock, sourceBlock,
@@ -68,18 +68,10 @@ export const ActionBar = memo(
subBlockValues, subBlockValues,
}) })
setPendingSelection([newId])
collaborativeBatchAddBlocks([block], [], {}, {}, { [newId]: filteredValues }) collaborativeBatchAddBlocks([block], [], {}, {}, { [newId]: filteredValues })
}, [ }, [blockId, activeWorkflowId, collaborativeBatchAddBlocks, setPendingSelection])
blockId,
blocks,
activeWorkflowId,
subBlockStore.workflowValues,
collaborativeBatchAddBlocks,
])
/**
* Optimized single store subscription for all block data
*/
const { isEnabled, horizontalHandles, parentId, parentType } = useWorkflowStore( const { isEnabled, horizontalHandles, parentId, parentType } = useWorkflowStore(
useCallback( useCallback(
(state) => { (state) => {

View File

@@ -129,10 +129,6 @@ export function OutputSelect({
? baselineWorkflow.blocks?.[block.id]?.subBlocks?.responseFormat?.value ? baselineWorkflow.blocks?.[block.id]?.subBlocks?.responseFormat?.value
: subBlockValues?.[block.id]?.responseFormat : subBlockValues?.[block.id]?.responseFormat
const responseFormat = parseResponseFormatSafely(responseFormatValue, block.id) const responseFormat = parseResponseFormatSafely(responseFormatValue, block.id)
const operationValue =
shouldUseBaseline && baselineWorkflow
? baselineWorkflow.blocks?.[block.id]?.subBlocks?.operation?.value
: subBlockValues?.[block.id]?.operation
let outputsToProcess: Record<string, unknown> = {} let outputsToProcess: Record<string, unknown> = {}
@@ -146,10 +142,20 @@ export function OutputSelect({
outputsToProcess = blockConfig?.outputs || {} outputsToProcess = blockConfig?.outputs || {}
} }
} else { } else {
const toolOutputs = // Build subBlocks object for tool selector
blockConfig && typeof operationValue === 'string' const rawSubBlockValues =
? getToolOutputs(blockConfig, operationValue) shouldUseBaseline && baselineWorkflow
: {} ? baselineWorkflow.blocks?.[block.id]?.subBlocks
: subBlockValues?.[block.id]
const subBlocks: Record<string, { value: unknown }> = {}
if (rawSubBlockValues && typeof rawSubBlockValues === 'object') {
for (const [key, val] of Object.entries(rawSubBlockValues)) {
// Handle both { value: ... } and raw value formats
subBlocks[key] = val && typeof val === 'object' && 'value' in val ? val : { value: val }
}
}
const toolOutputs = blockConfig ? getToolOutputs(blockConfig, subBlocks) : {}
outputsToProcess = outputsToProcess =
Object.keys(toolOutputs).length > 0 ? toolOutputs : blockConfig?.outputs || {} Object.keys(toolOutputs).length > 0 ? toolOutputs : blockConfig?.outputs || {}
} }

View File

@@ -3,13 +3,11 @@ import ReactMarkdown from 'react-markdown'
import type { NodeProps } from 'reactflow' import type { NodeProps } from 'reactflow'
import remarkGfm from 'remark-gfm' import remarkGfm from 'remark-gfm'
import { cn } from '@/lib/core/utils/cn' import { cn } from '@/lib/core/utils/cn'
import { BLOCK_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { ActionBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/action-bar/action-bar' import { ActionBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/action-bar/action-bar'
import { useBlockVisual } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks' import { useBlockVisual } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks'
import { import { useBlockDimensions } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-block-dimensions'
BLOCK_DIMENSIONS,
useBlockDimensions,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-block-dimensions'
import { useSubBlockStore } from '@/stores/workflows/subblock/store' import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import type { WorkflowBlockProps } from '../workflow-block/types' import type { WorkflowBlockProps } from '../workflow-block/types'
@@ -168,12 +166,17 @@ const NoteMarkdown = memo(function NoteMarkdown({ content }: { content: string }
) )
}) })
export const NoteBlock = memo(function NoteBlock({ id, data }: NodeProps<NoteBlockNodeData>) { export const NoteBlock = memo(function NoteBlock({
id,
data,
selected,
}: NodeProps<NoteBlockNodeData>) {
const { type, config, name } = data const { type, config, name } = data
const { activeWorkflowId, isEnabled, handleClick, hasRing, ringStyles } = useBlockVisual({ const { activeWorkflowId, isEnabled, handleClick, hasRing, ringStyles } = useBlockVisual({
blockId: id, blockId: id,
data, data,
isSelected: selected,
}) })
const storedValues = useSubBlockStore( const storedValues = useSubBlockStore(
useCallback( useCallback(

View File

@@ -0,0 +1,22 @@
import { PopoverSection } from '@/components/emcn'
/**
* Skeleton loading component for chat history dropdown
* Displays placeholder content while chats are being loaded
*/
export function ChatHistorySkeleton() {
return (
<>
<PopoverSection>
<div className='h-3 w-12 animate-pulse rounded bg-muted/40' />
</PopoverSection>
<div className='flex flex-col gap-0.5'>
{[1, 2, 3].map((i) => (
<div key={i} className='flex h-[25px] items-center px-[6px]'>
<div className='h-3 w-full animate-pulse rounded bg-muted/40' />
</div>
))}
</div>
</>
)
}

View File

@@ -0,0 +1,79 @@
import { Button } from '@/components/emcn'
type CheckpointConfirmationVariant = 'restore' | 'discard'
interface CheckpointConfirmationProps {
/** Confirmation variant - 'restore' for reverting, 'discard' for edit with checkpoint options */
variant: CheckpointConfirmationVariant
/** Whether an action is currently processing */
isProcessing: boolean
/** Callback when cancel is clicked */
onCancel: () => void
/** Callback when revert is clicked */
onRevert: () => void
/** Callback when continue is clicked (only for 'discard' variant) */
onContinue?: () => void
}
/**
* Inline confirmation for checkpoint operations
* Supports two variants:
* - 'restore': Simple revert confirmation with warning
* - 'discard': Edit with checkpoint options (revert or continue without revert)
*/
export function CheckpointConfirmation({
variant,
isProcessing,
onCancel,
onRevert,
onContinue,
}: CheckpointConfirmationProps) {
const isRestoreVariant = variant === 'restore'
return (
<div className='mt-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] p-[10px]'>
<p className='mb-[8px] text-[12px] text-[var(--text-primary)]'>
{isRestoreVariant ? (
<>
Revert to checkpoint? This will restore your workflow to the state saved at this
checkpoint.{' '}
<span className='text-[var(--text-error)]'>This action cannot be undone.</span>
</>
) : (
'Continue from a previous message?'
)}
</p>
<div className='flex gap-[8px]'>
<Button
onClick={onCancel}
variant='active'
size='sm'
className='flex-1'
disabled={isProcessing}
>
Cancel
</Button>
<Button
onClick={onRevert}
variant='destructive'
size='sm'
className='flex-1'
disabled={isProcessing}
>
{isProcessing ? 'Reverting...' : 'Revert'}
</Button>
{!isRestoreVariant && onContinue && (
<Button
onClick={onContinue}
variant='tertiary'
size='sm'
className='flex-1'
disabled={isProcessing}
>
Continue
</Button>
)}
</div>
</div>
)
}

View File

@@ -1,5 +1,6 @@
export * from './checkpoint-confirmation'
export * from './file-display' export * from './file-display'
export { default as CopilotMarkdownRenderer } from './markdown-renderer' export { CopilotMarkdownRenderer } from './markdown-renderer'
export * from './smooth-streaming' export * from './smooth-streaming'
export * from './thinking-block' export * from './thinking-block'
export * from './usage-limit-actions' export * from './usage-limit-actions'

View File

@@ -0,0 +1 @@
export { default as CopilotMarkdownRenderer } from './markdown-renderer'

View File

@@ -6,6 +6,8 @@ import ReactMarkdown from 'react-markdown'
import remarkGfm from 'remark-gfm' import remarkGfm from 'remark-gfm'
import { Code, Tooltip } from '@/components/emcn' import { Code, Tooltip } from '@/components/emcn'
const REMARK_PLUGINS = [remarkGfm]
/** /**
* Recursively extracts text content from React elements * Recursively extracts text content from React elements
* @param element - React node to extract text from * @param element - React node to extract text from
@@ -149,14 +151,12 @@ interface CopilotMarkdownRendererProps {
* Tighter spacing compared to traditional prose for better chat UX * Tighter spacing compared to traditional prose for better chat UX
*/ */
const markdownComponents = { const markdownComponents = {
// Paragraphs - tight spacing, no margin on last
p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => ( p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => (
<p className='mb-1.5 font-base font-season text-[var(--text-primary)] text-sm leading-[1.4] last:mb-0 dark:font-[470]'> <p className='mb-1.5 font-base font-season text-[var(--text-primary)] text-sm leading-[1.4] last:mb-0 dark:font-[470]'>
{children} {children}
</p> </p>
), ),
// Headings - minimal margins for chat context
h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => ( h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
<h1 className='mt-2 mb-1 font-season font-semibold text-[var(--text-primary)] text-base first:mt-0'> <h1 className='mt-2 mb-1 font-season font-semibold text-[var(--text-primary)] text-base first:mt-0'>
{children} {children}
@@ -178,7 +178,6 @@ const markdownComponents = {
</h4> </h4>
), ),
// Lists - compact spacing
ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => ( ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => (
<ul <ul
className='my-1 space-y-0.5 pl-5 font-base font-season text-[var(--text-primary)] dark:font-[470]' className='my-1 space-y-0.5 pl-5 font-base font-season text-[var(--text-primary)] dark:font-[470]'
@@ -204,7 +203,6 @@ const markdownComponents = {
</li> </li>
), ),
// Code blocks - handled by CodeBlock component
pre: ({ children }: React.HTMLAttributes<HTMLPreElement>) => { pre: ({ children }: React.HTMLAttributes<HTMLPreElement>) => {
let codeContent: React.ReactNode = children let codeContent: React.ReactNode = children
let language = 'code' let language = 'code'
@@ -243,7 +241,6 @@ const markdownComponents = {
return <CodeBlock code={actualCodeText} language={language} /> return <CodeBlock code={actualCodeText} language={language} />
}, },
// Inline code
code: ({ code: ({
className, className,
children, children,
@@ -257,7 +254,6 @@ const markdownComponents = {
</code> </code>
), ),
// Text formatting
strong: ({ children }: React.HTMLAttributes<HTMLElement>) => ( strong: ({ children }: React.HTMLAttributes<HTMLElement>) => (
<strong className='font-semibold text-[var(--text-primary)]'>{children}</strong> <strong className='font-semibold text-[var(--text-primary)]'>{children}</strong>
), ),
@@ -271,22 +267,18 @@ const markdownComponents = {
<i className='text-[var(--text-primary)] italic'>{children}</i> <i className='text-[var(--text-primary)] italic'>{children}</i>
), ),
// Blockquote - compact
blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => ( blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => (
<blockquote className='my-1.5 border-[var(--border-1)] border-l-2 py-0.5 pl-3 font-season text-[var(--text-secondary)] text-sm italic'> <blockquote className='my-1.5 border-[var(--border-1)] border-l-2 py-0.5 pl-3 font-season text-[var(--text-secondary)] text-sm italic'>
{children} {children}
</blockquote> </blockquote>
), ),
// Horizontal rule
hr: () => <hr className='my-3 border-[var(--divider)] border-t' />, hr: () => <hr className='my-3 border-[var(--divider)] border-t' />,
// Links
a: ({ href, children }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => ( a: ({ href, children }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => (
<LinkWithPreview href={href || '#'}>{children}</LinkWithPreview> <LinkWithPreview href={href || '#'}>{children}</LinkWithPreview>
), ),
// Tables - compact
table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => ( table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => (
<div className='my-2 max-w-full overflow-x-auto'> <div className='my-2 max-w-full overflow-x-auto'>
<table className='min-w-full table-auto border border-[var(--border-1)] font-season text-xs'> <table className='min-w-full table-auto border border-[var(--border-1)] font-season text-xs'>
@@ -314,7 +306,6 @@ const markdownComponents = {
</td> </td>
), ),
// Images
img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => ( img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => (
<img src={src} alt={alt || 'Image'} className='my-2 h-auto max-w-full rounded-md' {...props} /> <img src={src} alt={alt || 'Image'} className='my-2 h-auto max-w-full rounded-md' {...props} />
), ),
@@ -330,7 +321,7 @@ const markdownComponents = {
function CopilotMarkdownRenderer({ content }: CopilotMarkdownRendererProps) { function CopilotMarkdownRenderer({ content }: CopilotMarkdownRendererProps) {
return ( return (
<div className='max-w-full break-words font-base font-season text-[var(--text-primary)] text-sm leading-[1.4] dark:font-[470] [&_*]:max-w-full [&_a]:break-all [&_code:not(pre_code)]:break-words [&_li]:break-words [&_p]:break-words'> <div className='max-w-full break-words font-base font-season text-[var(--text-primary)] text-sm leading-[1.4] dark:font-[470] [&_*]:max-w-full [&_a]:break-all [&_code:not(pre_code)]:break-words [&_li]:break-words [&_p]:break-words'>
<ReactMarkdown remarkPlugins={[remarkGfm]} components={markdownComponents}> <ReactMarkdown remarkPlugins={REMARK_PLUGINS} components={markdownComponents}>
{content} {content}
</ReactMarkdown> </ReactMarkdown>
</div> </div>

View File

@@ -1,27 +1,17 @@
import { memo, useEffect, useRef, useState } from 'react' import { memo, useEffect, useRef, useState } from 'react'
import { cn } from '@/lib/core/utils/cn' import { cn } from '@/lib/core/utils/cn'
import CopilotMarkdownRenderer from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer' import { CopilotMarkdownRenderer } from '../markdown-renderer'
/** /** Character animation delay in milliseconds */
* Character animation delay in milliseconds
*/
const CHARACTER_DELAY = 3 const CHARACTER_DELAY = 3
/** /** Props for the StreamingIndicator component */
* Props for the StreamingIndicator component
*/
interface StreamingIndicatorProps { interface StreamingIndicatorProps {
/** Optional class name for layout adjustments */ /** Optional class name for layout adjustments */
className?: string className?: string
} }
/** /** Shows animated dots during message streaming when no content has arrived */
* StreamingIndicator shows animated dots during message streaming
* Used as a standalone indicator when no content has arrived yet
*
* @param props - Component props
* @returns Animated loading indicator
*/
export const StreamingIndicator = memo(({ className }: StreamingIndicatorProps) => ( export const StreamingIndicator = memo(({ className }: StreamingIndicatorProps) => (
<div className={cn('flex h-[1.25rem] items-center text-muted-foreground', className)}> <div className={cn('flex h-[1.25rem] items-center text-muted-foreground', className)}>
<div className='flex space-x-0.5'> <div className='flex space-x-0.5'>
@@ -34,9 +24,7 @@ export const StreamingIndicator = memo(({ className }: StreamingIndicatorProps)
StreamingIndicator.displayName = 'StreamingIndicator' StreamingIndicator.displayName = 'StreamingIndicator'
/** /** Props for the SmoothStreamingText component */
* Props for the SmoothStreamingText component
*/
interface SmoothStreamingTextProps { interface SmoothStreamingTextProps {
/** Content to display with streaming animation */ /** Content to display with streaming animation */
content: string content: string
@@ -44,20 +32,12 @@ interface SmoothStreamingTextProps {
isStreaming: boolean isStreaming: boolean
} }
/** /** Displays text with character-by-character animation for smooth streaming */
* SmoothStreamingText component displays text with character-by-character animation
* Creates a smooth streaming effect for AI responses
*
* @param props - Component props
* @returns Streaming text with smooth animation
*/
export const SmoothStreamingText = memo( export const SmoothStreamingText = memo(
({ content, isStreaming }: SmoothStreamingTextProps) => { ({ content, isStreaming }: SmoothStreamingTextProps) => {
// Initialize with full content when not streaming to avoid flash on page load
const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content)) const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content))
const contentRef = useRef(content) const contentRef = useRef(content)
const timeoutRef = useRef<NodeJS.Timeout | null>(null) const timeoutRef = useRef<NodeJS.Timeout | null>(null)
// Initialize index based on streaming state
const indexRef = useRef(isStreaming ? 0 : content.length) const indexRef = useRef(isStreaming ? 0 : content.length)
const isAnimatingRef = useRef(false) const isAnimatingRef = useRef(false)
@@ -95,7 +75,6 @@ export const SmoothStreamingText = memo(
} }
} }
} else { } else {
// Streaming ended - show full content immediately
if (timeoutRef.current) { if (timeoutRef.current) {
clearTimeout(timeoutRef.current) clearTimeout(timeoutRef.current)
} }
@@ -119,7 +98,6 @@ export const SmoothStreamingText = memo(
) )
}, },
(prevProps, nextProps) => { (prevProps, nextProps) => {
// Prevent re-renders during streaming unless content actually changed
return ( return (
prevProps.content === nextProps.content && prevProps.isStreaming === nextProps.isStreaming prevProps.content === nextProps.content && prevProps.isStreaming === nextProps.isStreaming
) )

View File

@@ -3,66 +3,45 @@
import { memo, useEffect, useMemo, useRef, useState } from 'react' import { memo, useEffect, useMemo, useRef, useState } from 'react'
import clsx from 'clsx' import clsx from 'clsx'
import { ChevronUp } from 'lucide-react' import { ChevronUp } from 'lucide-react'
import CopilotMarkdownRenderer from './markdown-renderer' import { CopilotMarkdownRenderer } from '../markdown-renderer'
/** /** Removes thinking tags (raw or escaped) and special tags from streamed content */
* Removes thinking tags (raw or escaped) from streamed content.
*/
function stripThinkingTags(text: string): string { function stripThinkingTags(text: string): string {
return text return text
.replace(/<\/?thinking[^>]*>/gi, '') .replace(/<\/?thinking[^>]*>/gi, '')
.replace(/&lt;\/?thinking[^&]*&gt;/gi, '') .replace(/&lt;\/?thinking[^&]*&gt;/gi, '')
.replace(/<options>[\s\S]*?<\/options>/gi, '')
.replace(/<options>[\s\S]*$/gi, '')
.replace(/<plan>[\s\S]*?<\/plan>/gi, '')
.replace(/<plan>[\s\S]*$/gi, '')
.trim() .trim()
} }
/** /** Interval for auto-scroll during streaming (ms) */
* Max height for thinking content before internal scrolling kicks in
*/
const THINKING_MAX_HEIGHT = 150
/**
* Height threshold before gradient fade kicks in
*/
const GRADIENT_THRESHOLD = 100
/**
* Interval for auto-scroll during streaming (ms)
*/
const SCROLL_INTERVAL = 50 const SCROLL_INTERVAL = 50
/** /** Timer update interval in milliseconds */
* Timer update interval in milliseconds
*/
const TIMER_UPDATE_INTERVAL = 100 const TIMER_UPDATE_INTERVAL = 100
/** /** Thinking text streaming delay - faster than main text */
* Thinking text streaming - much faster than main text
* Essentially instant with minimal delay
*/
const THINKING_DELAY = 0.5 const THINKING_DELAY = 0.5
const THINKING_CHARS_PER_FRAME = 3 const THINKING_CHARS_PER_FRAME = 3
/** /** Props for the SmoothThinkingText component */
* Props for the SmoothThinkingText component
*/
interface SmoothThinkingTextProps { interface SmoothThinkingTextProps {
content: string content: string
isStreaming: boolean isStreaming: boolean
} }
/** /**
* SmoothThinkingText renders thinking content with fast streaming animation * Renders thinking content with fast streaming animation.
* Uses gradient fade at top when content is tall enough
*/ */
const SmoothThinkingText = memo( const SmoothThinkingText = memo(
({ content, isStreaming }: SmoothThinkingTextProps) => { ({ content, isStreaming }: SmoothThinkingTextProps) => {
// Initialize with full content when not streaming to avoid flash on page load
const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content)) const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content))
const [showGradient, setShowGradient] = useState(false)
const contentRef = useRef(content) const contentRef = useRef(content)
const textRef = useRef<HTMLDivElement>(null) const textRef = useRef<HTMLDivElement>(null)
const rafRef = useRef<number | null>(null) const rafRef = useRef<number | null>(null)
// Initialize index based on streaming state
const indexRef = useRef(isStreaming ? 0 : content.length) const indexRef = useRef(isStreaming ? 0 : content.length)
const lastFrameTimeRef = useRef<number>(0) const lastFrameTimeRef = useRef<number>(0)
const isAnimatingRef = useRef(false) const isAnimatingRef = useRef(false)
@@ -88,7 +67,6 @@ const SmoothThinkingText = memo(
if (elapsed >= THINKING_DELAY) { if (elapsed >= THINKING_DELAY) {
if (currentIndex < currentContent.length) { if (currentIndex < currentContent.length) {
// Reveal multiple characters per frame for faster streaming
const newIndex = Math.min( const newIndex = Math.min(
currentIndex + THINKING_CHARS_PER_FRAME, currentIndex + THINKING_CHARS_PER_FRAME,
currentContent.length currentContent.length
@@ -110,7 +88,6 @@ const SmoothThinkingText = memo(
rafRef.current = requestAnimationFrame(animateText) rafRef.current = requestAnimationFrame(animateText)
} }
} else { } else {
// Streaming ended - show full content immediately
if (rafRef.current) { if (rafRef.current) {
cancelAnimationFrame(rafRef.current) cancelAnimationFrame(rafRef.current)
} }
@@ -127,30 +104,10 @@ const SmoothThinkingText = memo(
} }
}, [content, isStreaming]) }, [content, isStreaming])
// Check if content height exceeds threshold for gradient
useEffect(() => {
if (textRef.current && isStreaming) {
const height = textRef.current.scrollHeight
setShowGradient(height > GRADIENT_THRESHOLD)
} else {
setShowGradient(false)
}
}, [displayedContent, isStreaming])
// Apply vertical gradient fade at the top only when content is tall enough
const gradientStyle =
isStreaming && showGradient
? {
maskImage: 'linear-gradient(to bottom, transparent 0%, black 30%, black 100%)',
WebkitMaskImage: 'linear-gradient(to bottom, transparent 0%, black 30%, black 100%)',
}
: undefined
return ( return (
<div <div
ref={textRef} ref={textRef}
className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-6 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]' className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-6 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]'
style={gradientStyle}
> >
<CopilotMarkdownRenderer content={displayedContent} /> <CopilotMarkdownRenderer content={displayedContent} />
</div> </div>
@@ -165,9 +122,7 @@ const SmoothThinkingText = memo(
SmoothThinkingText.displayName = 'SmoothThinkingText' SmoothThinkingText.displayName = 'SmoothThinkingText'
/** /** Props for the ThinkingBlock component */
* Props for the ThinkingBlock component
*/
interface ThinkingBlockProps { interface ThinkingBlockProps {
/** Content of the thinking block */ /** Content of the thinking block */
content: string content: string
@@ -182,13 +137,8 @@ interface ThinkingBlockProps {
} }
/** /**
* ThinkingBlock component displays AI reasoning/thinking process * Displays AI reasoning/thinking process with collapsible content and duration timer.
* Shows collapsible content with duration timer * Auto-expands during streaming and collapses when complete.
* Auto-expands during streaming and collapses when complete
* Auto-collapses when a tool call or other content comes in after it
*
* @param props - Component props
* @returns Thinking block with expandable content and timer
*/ */
export function ThinkingBlock({ export function ThinkingBlock({
content, content,
@@ -197,7 +147,6 @@ export function ThinkingBlock({
label = 'Thought', label = 'Thought',
hasSpecialTags = false, hasSpecialTags = false,
}: ThinkingBlockProps) { }: ThinkingBlockProps) {
// Strip thinking tags from content on render to handle persisted messages
const cleanContent = useMemo(() => stripThinkingTags(content || ''), [content]) const cleanContent = useMemo(() => stripThinkingTags(content || ''), [content])
const [isExpanded, setIsExpanded] = useState(false) const [isExpanded, setIsExpanded] = useState(false)
@@ -209,12 +158,8 @@ export function ThinkingBlock({
const lastScrollTopRef = useRef(0) const lastScrollTopRef = useRef(0)
const programmaticScrollRef = useRef(false) const programmaticScrollRef = useRef(false)
/** /** Auto-expands during streaming, auto-collapses when streaming ends or following content arrives */
* Auto-expands block when streaming with content
* Auto-collapses when streaming ends OR when following content arrives
*/
useEffect(() => { useEffect(() => {
// Collapse if streaming ended, there's following content, or special tags arrived
if (!isStreaming || hasFollowingContent || hasSpecialTags) { if (!isStreaming || hasFollowingContent || hasSpecialTags) {
setIsExpanded(false) setIsExpanded(false)
userCollapsedRef.current = false userCollapsedRef.current = false
@@ -227,7 +172,6 @@ export function ThinkingBlock({
} }
}, [isStreaming, cleanContent, hasFollowingContent, hasSpecialTags]) }, [isStreaming, cleanContent, hasFollowingContent, hasSpecialTags])
// Reset start time when streaming begins
useEffect(() => { useEffect(() => {
if (isStreaming && !hasFollowingContent) { if (isStreaming && !hasFollowingContent) {
startTimeRef.current = Date.now() startTimeRef.current = Date.now()
@@ -236,9 +180,7 @@ export function ThinkingBlock({
} }
}, [isStreaming, hasFollowingContent]) }, [isStreaming, hasFollowingContent])
// Update duration timer during streaming (stop when following content arrives)
useEffect(() => { useEffect(() => {
// Stop timer if not streaming or if there's following content (thinking is done)
if (!isStreaming || hasFollowingContent) return if (!isStreaming || hasFollowingContent) return
const interval = setInterval(() => { const interval = setInterval(() => {
@@ -248,7 +190,6 @@ export function ThinkingBlock({
return () => clearInterval(interval) return () => clearInterval(interval)
}, [isStreaming, hasFollowingContent]) }, [isStreaming, hasFollowingContent])
// Handle scroll events to detect user scrolling away
useEffect(() => { useEffect(() => {
const container = scrollContainerRef.current const container = scrollContainerRef.current
if (!container || !isExpanded) return if (!container || !isExpanded) return
@@ -267,7 +208,6 @@ export function ThinkingBlock({
setUserHasScrolledAway(true) setUserHasScrolledAway(true)
} }
// Re-stick if user scrolls back to bottom with intent
if (userHasScrolledAway && isNearBottom && delta > 10) { if (userHasScrolledAway && isNearBottom && delta > 10) {
setUserHasScrolledAway(false) setUserHasScrolledAway(false)
} }
@@ -281,7 +221,6 @@ export function ThinkingBlock({
return () => container.removeEventListener('scroll', handleScroll) return () => container.removeEventListener('scroll', handleScroll)
}, [isExpanded, userHasScrolledAway]) }, [isExpanded, userHasScrolledAway])
// Smart auto-scroll: always scroll to bottom while streaming unless user scrolled away
useEffect(() => { useEffect(() => {
if (!isStreaming || !isExpanded || userHasScrolledAway) return if (!isStreaming || !isExpanded || userHasScrolledAway) return
@@ -302,20 +241,16 @@ export function ThinkingBlock({
return () => window.clearInterval(intervalId) return () => window.clearInterval(intervalId)
}, [isStreaming, isExpanded, userHasScrolledAway]) }, [isStreaming, isExpanded, userHasScrolledAway])
/** /** Formats duration in milliseconds to seconds (minimum 1s) */
* Formats duration in milliseconds to seconds
* Always shows seconds, rounded to nearest whole second, minimum 1s
*/
const formatDuration = (ms: number) => { const formatDuration = (ms: number) => {
const seconds = Math.max(1, Math.round(ms / 1000)) const seconds = Math.max(1, Math.round(ms / 1000))
return `${seconds}s` return `${seconds}s`
} }
const hasContent = cleanContent.length > 0 const hasContent = cleanContent.length > 0
// Thinking is "done" when streaming ends OR when there's following content (like a tool call) OR when special tags appear
const isThinkingDone = !isStreaming || hasFollowingContent || hasSpecialTags const isThinkingDone = !isStreaming || hasFollowingContent || hasSpecialTags
const durationText = `${label} for ${formatDuration(duration)}` const durationText = `${label} for ${formatDuration(duration)}`
// Convert past tense label to present tense for streaming (e.g., "Thought" → "Thinking")
const getStreamingLabel = (lbl: string) => { const getStreamingLabel = (lbl: string) => {
if (lbl === 'Thought') return 'Thinking' if (lbl === 'Thought') return 'Thinking'
if (lbl.endsWith('ed')) return `${lbl.slice(0, -2)}ing` if (lbl.endsWith('ed')) return `${lbl.slice(0, -2)}ing`
@@ -323,11 +258,9 @@ export function ThinkingBlock({
} }
const streamingLabel = getStreamingLabel(label) const streamingLabel = getStreamingLabel(label)
// During streaming: show header with shimmer effect + expanded content
if (!isThinkingDone) { if (!isThinkingDone) {
return ( return (
<div> <div>
{/* Define shimmer keyframes */}
<style>{` <style>{`
@keyframes thinking-shimmer { @keyframes thinking-shimmer {
0% { background-position: 150% 0; } 0% { background-position: 150% 0; }
@@ -396,7 +329,6 @@ export function ThinkingBlock({
) )
} }
// After done: show collapsible header with duration
return ( return (
<div> <div>
<button <button
@@ -426,7 +358,6 @@ export function ThinkingBlock({
isExpanded ? 'mt-1.5 max-h-[150px] opacity-100' : 'max-h-0 opacity-0' isExpanded ? 'mt-1.5 max-h-[150px] opacity-100' : 'max-h-0 opacity-0'
)} )}
> >
{/* Completed thinking text - dimmed with markdown */}
<div className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-6 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]'> <div className='[&_*]:!text-[var(--text-muted)] [&_*]:!text-[12px] [&_*]:!leading-[1.4] [&_p]:!m-0 [&_p]:!mb-1 [&_h1]:!text-[12px] [&_h1]:!font-semibold [&_h1]:!m-0 [&_h1]:!mb-1 [&_h2]:!text-[12px] [&_h2]:!font-semibold [&_h2]:!m-0 [&_h2]:!mb-1 [&_h3]:!text-[12px] [&_h3]:!font-semibold [&_h3]:!m-0 [&_h3]:!mb-1 [&_code]:!text-[11px] [&_ul]:!pl-5 [&_ul]:!my-1 [&_ol]:!pl-6 [&_ol]:!my-1 [&_li]:!my-0.5 [&_li]:!py-0 font-season text-[12px] text-[var(--text-muted)]'>
<CopilotMarkdownRenderer content={cleanContent} /> <CopilotMarkdownRenderer content={cleanContent} />
</div> </div>

View File

@@ -9,18 +9,20 @@ import {
ToolCall, ToolCall,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components' } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components'
import { import {
CheckpointConfirmation,
FileAttachmentDisplay, FileAttachmentDisplay,
SmoothStreamingText, SmoothStreamingText,
StreamingIndicator, StreamingIndicator,
ThinkingBlock, ThinkingBlock,
UsageLimitActions, UsageLimitActions,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components' } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components'
import CopilotMarkdownRenderer from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer' import { CopilotMarkdownRenderer } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer'
import { import {
useCheckpointManagement, useCheckpointManagement,
useMessageEditing, useMessageEditing,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/hooks' } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/hooks'
import { UserInput } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/user-input' import { UserInput } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/user-input'
import { buildMentionHighlightNodes } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/utils'
import type { CopilotMessage as CopilotMessageType } from '@/stores/panel' import type { CopilotMessage as CopilotMessageType } from '@/stores/panel'
import { useCopilotStore } from '@/stores/panel' import { useCopilotStore } from '@/stores/panel'
@@ -68,7 +70,6 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
const isUser = message.role === 'user' const isUser = message.role === 'user'
const isAssistant = message.role === 'assistant' const isAssistant = message.role === 'assistant'
// Store state
const { const {
messageCheckpoints: allMessageCheckpoints, messageCheckpoints: allMessageCheckpoints,
messages, messages,
@@ -79,23 +80,18 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
isAborting, isAborting,
} = useCopilotStore() } = useCopilotStore()
// Get checkpoints for this message if it's a user message
const messageCheckpoints = isUser ? allMessageCheckpoints[message.id] || [] : [] const messageCheckpoints = isUser ? allMessageCheckpoints[message.id] || [] : []
const hasCheckpoints = messageCheckpoints.length > 0 && messageCheckpoints.some((cp) => cp?.id) const hasCheckpoints = messageCheckpoints.length > 0 && messageCheckpoints.some((cp) => cp?.id)
// Check if this is the last user message (for showing abort button)
const isLastUserMessage = useMemo(() => { const isLastUserMessage = useMemo(() => {
if (!isUser) return false if (!isUser) return false
const userMessages = messages.filter((m) => m.role === 'user') const userMessages = messages.filter((m) => m.role === 'user')
return userMessages.length > 0 && userMessages[userMessages.length - 1]?.id === message.id return userMessages.length > 0 && userMessages[userMessages.length - 1]?.id === message.id
}, [isUser, messages, message.id]) }, [isUser, messages, message.id])
// UI state
const [isHoveringMessage, setIsHoveringMessage] = useState(false) const [isHoveringMessage, setIsHoveringMessage] = useState(false)
const cancelEditRef = useRef<(() => void) | null>(null) const cancelEditRef = useRef<(() => void) | null>(null)
// Checkpoint management hook
const { const {
showRestoreConfirmation, showRestoreConfirmation,
showCheckpointDiscardModal, showCheckpointDiscardModal,
@@ -118,7 +114,6 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
() => cancelEditRef.current?.() () => cancelEditRef.current?.()
) )
// Message editing hook
const { const {
isEditMode, isEditMode,
isExpanded, isExpanded,
@@ -147,27 +142,20 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
cancelEditRef.current = handleCancelEdit cancelEditRef.current = handleCancelEdit
// Get clean text content with double newline parsing
const cleanTextContent = useMemo(() => { const cleanTextContent = useMemo(() => {
if (!message.content) return '' if (!message.content) return ''
// Parse out excessive newlines (more than 2 consecutive newlines)
return message.content.replace(/\n{3,}/g, '\n\n') return message.content.replace(/\n{3,}/g, '\n\n')
}, [message.content]) }, [message.content])
// Parse special tags from message content (options, plan)
// Parse during streaming to show options/plan as they stream in
const parsedTags = useMemo(() => { const parsedTags = useMemo(() => {
if (isUser) return null if (isUser) return null
// Try message.content first
if (message.content) { if (message.content) {
const parsed = parseSpecialTags(message.content) const parsed = parseSpecialTags(message.content)
if (parsed.options || parsed.plan) return parsed if (parsed.options || parsed.plan) return parsed
} }
// During streaming, check content blocks for options/plan if (message.contentBlocks && message.contentBlocks.length > 0) {
if (isStreaming && message.contentBlocks && message.contentBlocks.length > 0) {
for (const block of message.contentBlocks) { for (const block of message.contentBlocks) {
if (block.type === 'text' && block.content) { if (block.type === 'text' && block.content) {
const parsed = parseSpecialTags(block.content) const parsed = parseSpecialTags(block.content)
@@ -176,23 +164,42 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
} }
} }
return message.content ? parseSpecialTags(message.content) : null return null
}, [message.content, message.contentBlocks, isUser, isStreaming]) }, [message.content, message.contentBlocks, isUser])
const selectedOptionKey = useMemo(() => {
if (!parsedTags?.options || isStreaming) return null
const currentIndex = messages.findIndex((m) => m.id === message.id)
if (currentIndex === -1 || currentIndex >= messages.length - 1) return null
const nextMessage = messages[currentIndex + 1]
if (!nextMessage || nextMessage.role !== 'user') return null
const nextContent = nextMessage.content?.trim()
if (!nextContent) return null
for (const [key, option] of Object.entries(parsedTags.options)) {
const optionTitle = typeof option === 'string' ? option : option.title
if (nextContent === optionTitle) {
return key
}
}
return null
}, [parsedTags?.options, messages, message.id, isStreaming])
// Get sendMessage from store for continuation actions
const sendMessage = useCopilotStore((s) => s.sendMessage) const sendMessage = useCopilotStore((s) => s.sendMessage)
// Handler for option selection
const handleOptionSelect = useCallback( const handleOptionSelect = useCallback(
(_optionKey: string, optionText: string) => { (_optionKey: string, optionText: string) => {
// Send the option text as a message
sendMessage(optionText) sendMessage(optionText)
}, },
[sendMessage] [sendMessage]
) )
// Memoize content blocks to avoid re-rendering unchanged blocks const isActivelyStreaming = isLastMessage && isStreaming
// No entrance animations to prevent layout shift
const memoizedContentBlocks = useMemo(() => { const memoizedContentBlocks = useMemo(() => {
if (!message.contentBlocks || message.contentBlocks.length === 0) { if (!message.contentBlocks || message.contentBlocks.length === 0) {
return null return null
@@ -202,21 +209,21 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
if (block.type === 'text') { if (block.type === 'text') {
const isLastTextBlock = const isLastTextBlock =
index === message.contentBlocks!.length - 1 && block.type === 'text' index === message.contentBlocks!.length - 1 && block.type === 'text'
// Always strip special tags from display (they're rendered separately as options/plan)
const parsed = parseSpecialTags(block.content) const parsed = parseSpecialTags(block.content)
const cleanBlockContent = parsed.cleanContent.replace(/\n{3,}/g, '\n\n') const cleanBlockContent = parsed.cleanContent.replace(/\n{3,}/g, '\n\n')
// Skip if no content after stripping tags
if (!cleanBlockContent.trim()) return null if (!cleanBlockContent.trim()) return null
// Use smooth streaming for the last text block if we're streaming const shouldUseSmoothing = isActivelyStreaming && isLastTextBlock
const shouldUseSmoothing = isStreaming && isLastTextBlock
const blockKey = `text-${index}-${block.timestamp || index}` const blockKey = `text-${index}-${block.timestamp || index}`
return ( return (
<div key={blockKey} className='w-full max-w-full'> <div key={blockKey} className='w-full max-w-full'>
{shouldUseSmoothing ? ( {shouldUseSmoothing ? (
<SmoothStreamingText content={cleanBlockContent} isStreaming={isStreaming} /> <SmoothStreamingText
content={cleanBlockContent}
isStreaming={isActivelyStreaming}
/>
) : ( ) : (
<CopilotMarkdownRenderer content={cleanBlockContent} /> <CopilotMarkdownRenderer content={cleanBlockContent} />
)} )}
@@ -224,9 +231,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
) )
} }
if (block.type === 'thinking') { if (block.type === 'thinking') {
// Check if there are any blocks after this one (tool calls, text, etc.)
const hasFollowingContent = index < message.contentBlocks!.length - 1 const hasFollowingContent = index < message.contentBlocks!.length - 1
// Check if special tags (options, plan) are present - should also close thinking
const hasSpecialTags = !!(parsedTags?.options || parsedTags?.plan) const hasSpecialTags = !!(parsedTags?.options || parsedTags?.plan)
const blockKey = `thinking-${index}-${block.timestamp || index}` const blockKey = `thinking-${index}-${block.timestamp || index}`
@@ -234,7 +239,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
<div key={blockKey} className='w-full'> <div key={blockKey} className='w-full'>
<ThinkingBlock <ThinkingBlock
content={block.content} content={block.content}
isStreaming={isStreaming} isStreaming={isActivelyStreaming}
hasFollowingContent={hasFollowingContent} hasFollowingContent={hasFollowingContent}
hasSpecialTags={hasSpecialTags} hasSpecialTags={hasSpecialTags}
/> />
@@ -246,18 +251,22 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return ( return (
<div key={blockKey}> <div key={blockKey}>
<ToolCall toolCallId={block.toolCall.id} toolCall={block.toolCall} /> <ToolCall
toolCallId={block.toolCall.id}
toolCall={block.toolCall}
isCurrentMessage={isLastMessage}
/>
</div> </div>
) )
} }
return null return null
}) })
}, [message.contentBlocks, isStreaming, parsedTags]) }, [message.contentBlocks, isActivelyStreaming, parsedTags, isLastMessage])
if (isUser) { if (isUser) {
return ( return (
<div <div
className={`w-full max-w-full overflow-hidden transition-opacity duration-200 [max-width:var(--panel-max-width)] ${isDimmed ? 'opacity-40' : 'opacity-100'}`} className={`w-full max-w-full flex-none overflow-hidden transition-opacity duration-200 [max-width:var(--panel-max-width)] ${isDimmed ? 'opacity-40' : 'opacity-100'}`}
style={{ '--panel-max-width': `${panelWidth - 16}px` } as React.CSSProperties} style={{ '--panel-max-width': `${panelWidth - 16}px` } as React.CSSProperties}
> >
{isEditMode ? ( {isEditMode ? (
@@ -288,42 +297,15 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
initialContexts={message.contexts} initialContexts={message.contexts}
/> />
{/* Inline Checkpoint Discard Confirmation - shown below input in edit mode */} {/* Inline checkpoint confirmation - shown below input in edit mode */}
{showCheckpointDiscardModal && ( {showCheckpointDiscardModal && (
<div className='mt-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] p-[10px]'> <CheckpointConfirmation
<p className='mb-[8px] text-[12px] text-[var(--text-primary)]'> variant='discard'
Continue from a previous message? isProcessing={isProcessingDiscard}
</p> onCancel={handleCancelCheckpointDiscard}
<div className='flex gap-[8px]'> onRevert={handleContinueAndRevert}
<Button onContinue={handleContinueWithoutRevert}
onClick={handleCancelCheckpointDiscard} />
variant='active'
size='sm'
className='flex-1'
disabled={isProcessingDiscard}
>
Cancel
</Button>
<Button
onClick={handleContinueAndRevert}
variant='destructive'
size='sm'
className='flex-1'
disabled={isProcessingDiscard}
>
{isProcessingDiscard ? 'Reverting...' : 'Revert'}
</Button>
<Button
onClick={handleContinueWithoutRevert}
variant='tertiary'
size='sm'
className='flex-1'
disabled={isProcessingDiscard}
>
Continue
</Button>
</div>
</div>
)} )}
</div> </div>
) : ( ) : (
@@ -348,46 +330,15 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
ref={messageContentRef} ref={messageContentRef}
className={`relative whitespace-pre-wrap break-words px-[2px] py-1 font-medium font-sans text-[var(--text-primary)] text-sm leading-[1.25rem] ${isSendingMessage && isLastUserMessage && isHoveringMessage ? 'pr-7' : ''} ${!isExpanded && needsExpansion ? 'max-h-[60px] overflow-hidden' : 'overflow-visible'}`} className={`relative whitespace-pre-wrap break-words px-[2px] py-1 font-medium font-sans text-[var(--text-primary)] text-sm leading-[1.25rem] ${isSendingMessage && isLastUserMessage && isHoveringMessage ? 'pr-7' : ''} ${!isExpanded && needsExpansion ? 'max-h-[60px] overflow-hidden' : 'overflow-visible'}`}
> >
{(() => { {buildMentionHighlightNodes(
const text = message.content || '' message.content || '',
const contexts: any[] = Array.isArray((message as any).contexts) message.contexts || [],
? ((message as any).contexts as any[]) (token, key) => (
: [] <span key={key} className='rounded-[4px] bg-[rgba(50,189,126,0.65)] py-[1px]'>
{token}
// Build tokens with their prefixes (@ for mentions, / for commands) </span>
const tokens = contexts )
.filter((c) => c?.kind !== 'current_workflow' && c?.label) )}
.map((c) => {
const prefix = c?.kind === 'slash_command' ? '/' : '@'
return `${prefix}${c.label}`
})
if (!tokens.length) return text
const escapeRegex = (s: string) => s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
const pattern = new RegExp(`(${tokens.map(escapeRegex).join('|')})`, 'g')
const nodes: React.ReactNode[] = []
let lastIndex = 0
let match: RegExpExecArray | null
while ((match = pattern.exec(text)) !== null) {
const i = match.index
const before = text.slice(lastIndex, i)
if (before) nodes.push(before)
const mention = match[0]
nodes.push(
<span
key={`mention-${i}-${lastIndex}`}
className='rounded-[4px] bg-[rgba(50,189,126,0.65)] py-[1px]'
>
{mention}
</span>
)
lastIndex = i + mention.length
}
const tail = text.slice(lastIndex)
if (tail) nodes.push(tail)
return nodes
})()}
</div> </div>
{/* Gradient fade when truncated - applies to entire message box */} {/* Gradient fade when truncated - applies to entire message box */}
@@ -437,65 +388,30 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
</div> </div>
)} )}
{/* Inline Restore Checkpoint Confirmation */} {/* Inline restore checkpoint confirmation */}
{showRestoreConfirmation && ( {showRestoreConfirmation && (
<div className='mt-[8px] rounded-[4px] border border-[var(--border)] bg-[var(--surface-4)] p-[10px]'> <CheckpointConfirmation
<p className='mb-[8px] text-[12px] text-[var(--text-primary)]'> variant='restore'
Revert to checkpoint? This will restore your workflow to the state saved at this isProcessing={isReverting}
checkpoint.{' '} onCancel={handleCancelRevert}
<span className='text-[var(--text-error)]'>This action cannot be undone.</span> onRevert={handleConfirmRevert}
</p> />
<div className='flex gap-[8px]'>
<Button
onClick={handleCancelRevert}
variant='active'
size='sm'
className='flex-1'
disabled={isReverting}
>
Cancel
</Button>
<Button
onClick={handleConfirmRevert}
variant='destructive'
size='sm'
className='flex-1'
disabled={isReverting}
>
{isReverting ? 'Reverting...' : 'Revert'}
</Button>
</div>
</div>
)} )}
</div> </div>
) )
} }
// Check if there's any visible content in the blocks
const hasVisibleContent = useMemo(() => {
if (!message.contentBlocks || message.contentBlocks.length === 0) return false
return message.contentBlocks.some((block) => {
if (block.type === 'text') {
const parsed = parseSpecialTags(block.content)
return parsed.cleanContent.trim().length > 0
}
return block.type === 'thinking' || block.type === 'tool_call'
})
}, [message.contentBlocks])
if (isAssistant) { if (isAssistant) {
return ( return (
<div <div
className={`w-full max-w-full overflow-hidden [max-width:var(--panel-max-width)] ${isDimmed ? 'opacity-40' : 'opacity-100'}`} className={`w-full max-w-full flex-none overflow-hidden [max-width:var(--panel-max-width)] ${isDimmed ? 'opacity-40' : 'opacity-100'}`}
style={{ '--panel-max-width': `${panelWidth - 16}px` } as React.CSSProperties} style={{ '--panel-max-width': `${panelWidth - 16}px` } as React.CSSProperties}
> >
<div className='max-w-full space-y-1 px-[2px]'> <div className='max-w-full space-y-[4px] px-[2px] pb-[4px]'>
{/* Content blocks in chronological order */} {/* Content blocks in chronological order */}
{memoizedContentBlocks} {memoizedContentBlocks || (isStreaming && <div className='min-h-0' />)}
{isStreaming && ( {isStreaming && <StreamingIndicator />}
<StreamingIndicator className={!hasVisibleContent ? 'mt-1' : undefined} />
)}
{message.errorType === 'usage_limit' && ( {message.errorType === 'usage_limit' && (
<div className='flex gap-1.5'> <div className='flex gap-1.5'>
@@ -534,6 +450,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
isLastMessage && !isStreaming && parsedTags.optionsComplete === true isLastMessage && !isStreaming && parsedTags.optionsComplete === true
} }
streaming={isStreaming || !parsedTags.optionsComplete} streaming={isStreaming || !parsedTags.optionsComplete}
selectedOptionKey={selectedOptionKey}
/> />
)} )}
</div> </div>
@@ -544,50 +461,22 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return null return null
}, },
(prevProps, nextProps) => { (prevProps, nextProps) => {
// Custom comparison function for better streaming performance
const prevMessage = prevProps.message const prevMessage = prevProps.message
const nextMessage = nextProps.message const nextMessage = nextProps.message
// If message IDs are different, always re-render if (prevMessage.id !== nextMessage.id) return false
if (prevMessage.id !== nextMessage.id) { if (prevProps.isStreaming !== nextProps.isStreaming) return false
return false if (prevProps.isDimmed !== nextProps.isDimmed) return false
} if (prevProps.panelWidth !== nextProps.panelWidth) return false
if (prevProps.checkpointCount !== nextProps.checkpointCount) return false
if (prevProps.isLastMessage !== nextProps.isLastMessage) return false
// If streaming state changed, re-render
if (prevProps.isStreaming !== nextProps.isStreaming) {
return false
}
// If dimmed state changed, re-render
if (prevProps.isDimmed !== nextProps.isDimmed) {
return false
}
// If panel width changed, re-render
if (prevProps.panelWidth !== nextProps.panelWidth) {
return false
}
// If checkpoint count changed, re-render
if (prevProps.checkpointCount !== nextProps.checkpointCount) {
return false
}
// If isLastMessage changed, re-render (for options visibility)
if (prevProps.isLastMessage !== nextProps.isLastMessage) {
return false
}
// For streaming messages, check if content actually changed
if (nextProps.isStreaming) { if (nextProps.isStreaming) {
const prevBlocks = prevMessage.contentBlocks || [] const prevBlocks = prevMessage.contentBlocks || []
const nextBlocks = nextMessage.contentBlocks || [] const nextBlocks = nextMessage.contentBlocks || []
if (prevBlocks.length !== nextBlocks.length) { if (prevBlocks.length !== nextBlocks.length) return false
return false // Content blocks changed
}
// Helper: get last block content by type
const getLastBlockContent = (blocks: any[], type: 'text' | 'thinking'): string | null => { const getLastBlockContent = (blocks: any[], type: 'text' | 'thinking'): string | null => {
for (let i = blocks.length - 1; i >= 0; i--) { for (let i = blocks.length - 1; i >= 0; i--) {
const block = blocks[i] const block = blocks[i]
@@ -598,7 +487,6 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return null return null
} }
// Re-render if the last text block content changed
const prevLastTextContent = getLastBlockContent(prevBlocks as any[], 'text') const prevLastTextContent = getLastBlockContent(prevBlocks as any[], 'text')
const nextLastTextContent = getLastBlockContent(nextBlocks as any[], 'text') const nextLastTextContent = getLastBlockContent(nextBlocks as any[], 'text')
if ( if (
@@ -609,7 +497,6 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return false return false
} }
// Re-render if the last thinking block content changed
const prevLastThinkingContent = getLastBlockContent(prevBlocks as any[], 'thinking') const prevLastThinkingContent = getLastBlockContent(prevBlocks as any[], 'thinking')
const nextLastThinkingContent = getLastBlockContent(nextBlocks as any[], 'thinking') const nextLastThinkingContent = getLastBlockContent(nextBlocks as any[], 'thinking')
if ( if (
@@ -620,24 +507,18 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return false return false
} }
// Check if tool calls changed
const prevToolCalls = prevMessage.toolCalls || [] const prevToolCalls = prevMessage.toolCalls || []
const nextToolCalls = nextMessage.toolCalls || [] const nextToolCalls = nextMessage.toolCalls || []
if (prevToolCalls.length !== nextToolCalls.length) { if (prevToolCalls.length !== nextToolCalls.length) return false
return false // Tool calls count changed
}
for (let i = 0; i < nextToolCalls.length; i++) { for (let i = 0; i < nextToolCalls.length; i++) {
if (prevToolCalls[i]?.state !== nextToolCalls[i]?.state) { if (prevToolCalls[i]?.state !== nextToolCalls[i]?.state) return false
return false // Tool call state changed
}
} }
return true return true
} }
// For non-streaming messages, do a deeper comparison including tool call states
if ( if (
prevMessage.content !== nextMessage.content || prevMessage.content !== nextMessage.content ||
prevMessage.role !== nextMessage.role || prevMessage.role !== nextMessage.role ||
@@ -647,16 +528,12 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
return false return false
} }
// Check tool call states for non-streaming messages too
const prevToolCalls = prevMessage.toolCalls || [] const prevToolCalls = prevMessage.toolCalls || []
const nextToolCalls = nextMessage.toolCalls || [] const nextToolCalls = nextMessage.toolCalls || []
for (let i = 0; i < nextToolCalls.length; i++) { for (let i = 0; i < nextToolCalls.length; i++) {
if (prevToolCalls[i]?.state !== nextToolCalls[i]?.state) { if (prevToolCalls[i]?.state !== nextToolCalls[i]?.state) return false
return false // Tool call state changed
}
} }
// Check contentBlocks tool call states
const prevContentBlocks = prevMessage.contentBlocks || [] const prevContentBlocks = prevMessage.contentBlocks || []
const nextContentBlocks = nextMessage.contentBlocks || [] const nextContentBlocks = nextMessage.contentBlocks || []
for (let i = 0; i < nextContentBlocks.length; i++) { for (let i = 0; i < nextContentBlocks.length; i++) {
@@ -667,7 +544,7 @@ const CopilotMessage: FC<CopilotMessageProps> = memo(
nextBlock?.type === 'tool_call' && nextBlock?.type === 'tool_call' &&
prevBlock.toolCall?.state !== nextBlock.toolCall?.state prevBlock.toolCall?.state !== nextBlock.toolCall?.state
) { ) {
return false // ContentBlock tool call state changed return false
} }
} }

View File

@@ -1,4 +1,2 @@
export { useCheckpointManagement } from './use-checkpoint-management' export { useCheckpointManagement } from './use-checkpoint-management'
export { useMessageEditing } from './use-message-editing' export { useMessageEditing } from './use-message-editing'
export { useMessageFeedback } from './use-message-feedback'
export { useSuccessTimers } from './use-success-timers'

View File

@@ -15,6 +15,7 @@ const logger = createLogger('useCheckpointManagement')
* @param messageCheckpoints - Checkpoints for this message * @param messageCheckpoints - Checkpoints for this message
* @param onRevertModeChange - Callback for revert mode changes * @param onRevertModeChange - Callback for revert mode changes
* @param onEditModeChange - Callback for edit mode changes * @param onEditModeChange - Callback for edit mode changes
* @param onCancelEdit - Callback when edit is cancelled
* @returns Checkpoint management utilities * @returns Checkpoint management utilities
*/ */
export function useCheckpointManagement( export function useCheckpointManagement(
@@ -37,17 +38,13 @@ export function useCheckpointManagement(
const { revertToCheckpoint, currentChat } = useCopilotStore() const { revertToCheckpoint, currentChat } = useCopilotStore()
/** /** Initiates checkpoint revert confirmation */
* Handles initiating checkpoint revert
*/
const handleRevertToCheckpoint = useCallback(() => { const handleRevertToCheckpoint = useCallback(() => {
setShowRestoreConfirmation(true) setShowRestoreConfirmation(true)
onRevertModeChange?.(true) onRevertModeChange?.(true)
}, [onRevertModeChange]) }, [onRevertModeChange])
/** /** Confirms and executes checkpoint revert */
* Confirms checkpoint revert and updates state
*/
const handleConfirmRevert = useCallback(async () => { const handleConfirmRevert = useCallback(async () => {
if (messageCheckpoints.length > 0) { if (messageCheckpoints.length > 0) {
const latestCheckpoint = messageCheckpoints[0] const latestCheckpoint = messageCheckpoints[0]
@@ -116,18 +113,13 @@ export function useCheckpointManagement(
onRevertModeChange, onRevertModeChange,
]) ])
/** /** Cancels checkpoint revert */
* Cancels checkpoint revert
*/
const handleCancelRevert = useCallback(() => { const handleCancelRevert = useCallback(() => {
setShowRestoreConfirmation(false) setShowRestoreConfirmation(false)
onRevertModeChange?.(false) onRevertModeChange?.(false)
}, [onRevertModeChange]) }, [onRevertModeChange])
/** /** Reverts to checkpoint then proceeds with pending edit */
* Handles "Continue and revert" action for checkpoint discard modal
* Reverts to checkpoint then proceeds with pending edit
*/
const handleContinueAndRevert = useCallback(async () => { const handleContinueAndRevert = useCallback(async () => {
setIsProcessingDiscard(true) setIsProcessingDiscard(true)
try { try {
@@ -184,9 +176,7 @@ export function useCheckpointManagement(
} }
}, [messageCheckpoints, revertToCheckpoint, message, messages, onEditModeChange, onCancelEdit]) }, [messageCheckpoints, revertToCheckpoint, message, messages, onEditModeChange, onCancelEdit])
/** /** Cancels checkpoint discard and clears pending edit */
* Cancels checkpoint discard and clears pending edit
*/
const handleCancelCheckpointDiscard = useCallback(() => { const handleCancelCheckpointDiscard = useCallback(() => {
setShowCheckpointDiscardModal(false) setShowCheckpointDiscardModal(false)
onEditModeChange?.(false) onEditModeChange?.(false)
@@ -194,11 +184,11 @@ export function useCheckpointManagement(
pendingEditRef.current = null pendingEditRef.current = null
}, [onEditModeChange, onCancelEdit]) }, [onEditModeChange, onCancelEdit])
/** /** Continues with edit without reverting checkpoint */
* Continues with edit WITHOUT reverting checkpoint
*/
const handleContinueWithoutRevert = useCallback(async () => { const handleContinueWithoutRevert = useCallback(async () => {
setShowCheckpointDiscardModal(false) setShowCheckpointDiscardModal(false)
onEditModeChange?.(false)
onCancelEdit?.()
if (pendingEditRef.current) { if (pendingEditRef.current) {
const { message: msg, fileAttachments, contexts } = pendingEditRef.current const { message: msg, fileAttachments, contexts } = pendingEditRef.current
@@ -225,43 +215,34 @@ export function useCheckpointManagement(
} }
}, [message, messages, onEditModeChange, onCancelEdit]) }, [message, messages, onEditModeChange, onCancelEdit])
/** /** Handles keyboard events for confirmation dialogs */
* Handles keyboard events for restore confirmation (Escape/Enter)
*/
useEffect(() => { useEffect(() => {
if (!showRestoreConfirmation) return const isActive = showRestoreConfirmation || showCheckpointDiscardModal
if (!isActive) return
const handleKeyDown = (event: KeyboardEvent) => { const handleKeyDown = (event: KeyboardEvent) => {
if (event.defaultPrevented) return
if (event.key === 'Escape') { if (event.key === 'Escape') {
handleCancelRevert() if (showRestoreConfirmation) handleCancelRevert()
else handleCancelCheckpointDiscard()
} else if (event.key === 'Enter') { } else if (event.key === 'Enter') {
event.preventDefault() event.preventDefault()
handleConfirmRevert() if (showRestoreConfirmation) handleConfirmRevert()
else handleContinueAndRevert()
} }
} }
document.addEventListener('keydown', handleKeyDown) document.addEventListener('keydown', handleKeyDown)
return () => document.removeEventListener('keydown', handleKeyDown) return () => document.removeEventListener('keydown', handleKeyDown)
}, [showRestoreConfirmation, handleCancelRevert, handleConfirmRevert]) }, [
showRestoreConfirmation,
/** showCheckpointDiscardModal,
* Handles keyboard events for checkpoint discard modal (Escape/Enter) handleCancelRevert,
*/ handleConfirmRevert,
useEffect(() => { handleCancelCheckpointDiscard,
if (!showCheckpointDiscardModal) return handleContinueAndRevert,
])
const handleCheckpointDiscardKeyDown = async (event: KeyboardEvent) => {
if (event.key === 'Escape') {
handleCancelCheckpointDiscard()
} else if (event.key === 'Enter') {
event.preventDefault()
await handleContinueAndRevert()
}
}
document.addEventListener('keydown', handleCheckpointDiscardKeyDown)
return () => document.removeEventListener('keydown', handleCheckpointDiscardKeyDown)
}, [showCheckpointDiscardModal, handleCancelCheckpointDiscard, handleContinueAndRevert])
return { return {
// State // State

Some files were not shown because too many files have changed in this diff Show More