Compare commits

..

36 Commits

Author SHA1 Message Date
waleed
e6c7bd3534 feat(kb): added tags information to kb docs table 2025-12-26 02:06:50 -08:00
Waleed
b7f6bab282 feat(tests): added testing package, overhauled tests (#2586)
* feat(tests): added testing package, overhauled tests

* fix build
2025-12-25 16:06:47 -08:00
Waleed
61e7213425 feat(i18n): update translations (#2585)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-25 13:33:14 -08:00
Waleed
3201abab56 improvement(schedules): use tanstack query to fetch schedule data, cleanup ui on schedule info component (#2584)
* improvement(schedules): use tanstack query to fetch schedule data, cleanup ui on schedule info component

* update trigger-save UI, increase auto disable to 100 consecutive from 10

* updated docs

* consolidate consts
2025-12-25 12:09:58 -08:00
Waleed
d79696beae feat(docs): added vector search (#2583)
* feat(docs): added vector search

* ack comments
2025-12-25 11:00:57 -08:00
Waleed
f604ca39a5 feat(chat-otp): added db fallback for chat otp (#2582)
* feat(chat-otp): added db fallback for chat otp

* ack PR comments
2025-12-25 09:37:20 -08:00
Waleed
26ec12599f improvement(byok): updated styling for byok page (#2581) 2025-12-25 08:36:55 -08:00
Waleed
97372533ec feat(i18n): update translations (#2578) 2025-12-24 23:37:35 -08:00
Vikhyath Mondreti
66766a9d81 improvement(byok): remove web search block exa (#2579)
* remove exa from byok

* improvement(byok): remove web search block exa

* fix autolayout

* fix type
2025-12-24 19:26:48 -08:00
Vikhyath Mondreti
47a259b428 feat(byok): byok for hosted model capabilities (#2574)
* feat(byok): byok for hosted model capabilities

* fix type

* add ignore lint

* accidentally added feature flags

* centralize byok fetch for LLM calls

* remove feature flags ts

* fix tests

* update docs
2025-12-24 18:20:54 -08:00
Waleed
40a6bf5c8c improvement(variables): update workflows to use deployed variables, not local ones to align with the rest of the canvas components (#2577)
* improvement(variables): update workflows to use deployed variables, not local ones to align with the rest of the canvas components

* update change detection to ignore trigger id since it is runtime metadata and not actually required to be redeployed
2025-12-24 17:40:23 -08:00
Waleed
da7eca9590 fix(change-detection): move change detection logic to client-side to prevent unnecessary API calls, consolidate utils (#2576)
* fix(change-detection): move change detection logic to client-side to prevent unnecessary API calls, consolidate utils

* added tests

* ack PR comments

* added isPublished to API response
2025-12-24 17:16:35 -08:00
Waleed
92b2e34d25 feat(autolayout): add fitToView on autolayout and reduce horizontal spacing between blocks (#2575)
* feat(autolayout): add fitToView on autolayout and reduce horizontal spacing between blocks

* remove additional yaml code
2025-12-24 16:19:29 -08:00
Vikhyath Mondreti
77521a3a57 fix(cancel-workflow-exec): move cancellation tracking for multi-task envs to redis (#2573)
* fix(cancel-workflow-exec): move cancellation tracking for multi-task envs to redis

* cleanup cancellation keys after execution
2025-12-24 11:51:09 -08:00
Waleed
cb8b9c547a fix(router): update router to handle azure creds the same way the agent block does (#2572)
* fix(router): update router to handle azure creds the same way the agent block does

* cleanup
2025-12-24 10:22:47 -08:00
Vikhyath Mondreti
b1cd8d151d fix(executor): workflow abort has to send abort signal to route for correct state update (#2571) 2025-12-24 02:50:58 -08:00
Waleed
1145f5c043 fix(shortcut): fixed global keyboard commands provider to follow latest ref pattern (#2569)
* fix(shortcut): fixed global commands provider to follow best practices

* cleanup

* ack PR comment
2025-12-24 00:25:15 -08:00
Waleed
3a50ce4d99 feat(i18n): update translations (#2568)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-23 19:03:43 -08:00
Waleed
810d2089cf feat(schedules): remove save button for schedules, couple schedule deployment with workflow deployment (#2566)
* feat(schedules): remove save button for schedules, couple schedule deployment with workflow deployment

* added tests

* ack PR comments

* update turborepo

* cleanup, edge cases

* ack PR comment
2025-12-23 18:53:40 -08:00
Vikhyath Mondreti
8c89507247 improvement(logs): state machine of workflow execution (#2560)
* improvement(logs): state machine of workflow execution

* cleanup more code

* fallback consistency

* fix labels

* backfill in migration correctly

* make streaming stop in chat window correctly
2025-12-23 18:27:19 -08:00
Vikhyath Mondreti
169dd4a503 fix(grafana): tool outputs (#2565)
* fix(grafana): list annotations outputs

* fix more grafana tools
2025-12-23 17:23:12 -08:00
Vikhyath Mondreti
dc4e5d3bdc fix(dropbox): access type param pass through to get refresh token (#2564) 2025-12-23 16:32:58 -08:00
Emir Karabeg
31de55cbdf feat(ux): add expandFolder to auto expand folders on nested folder creation (#2562)
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-12-23 16:27:00 -08:00
Waleed
eaca49037d fix(ui): remove css transition on popover and dropdown items to avoid flicker (#2563) 2025-12-23 15:46:27 -08:00
Waleed
2d26c0cb32 feat(i18n): update translations (#2561)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-23 15:42:54 -08:00
Waleed
cdf3d759b9 fix(jina): removed conditionally included outputs from jina (#2559)
* fix(jina): removed conditionally included outputs from jina

* ack PR comments
2025-12-23 15:20:10 -08:00
Vikhyath Mondreti
bf8fbebe22 improvement(code-quality): centralize regex checks, normalization (#2554)
* improvement(code-quality): centralize regex checks, normalization

* simplify resolution

* fix(copilot): don't allow duplicate name blocks

* centralize uuid check
2025-12-23 15:12:04 -08:00
Waleed
b23299dae4 feat(i18n): update translations (#2558)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-23 14:54:44 -08:00
Waleed
6c8f1a81c1 feat(tools): added grain and circleback (#2557)
* feat(tools): added grain and circleback

* oauth to pat and tool fix

* updated docs

* remove remaining oauth references

* cleanup

---------

Co-authored-by: aadamgough <adam@sim.ai>
2025-12-23 14:34:12 -08:00
Waleed
2c36926a4e fix(perplexity): remove deprecated perplexity sonar reasoning model (#2556) 2025-12-23 13:24:18 -08:00
Waleed
89c1085950 improvement(vertex): added vertex to all LLM-based blocks, fixed refresh (#2555)
* improvement(vertex): added vertex to all LLM-based blocks, fixed refresh

* fix build
2025-12-23 13:11:56 -08:00
Waleed
4e09c389e8 improvement(usage): update usage limit in realtime, standardize token output object across providers (#2553)
* improvement(usage-limit): update usage in real time, fix token output object

* updated tokenBreakdown to tokens, standardized input/output/total token object type across providers

* update remaining references

* ack PR comment

* remove singleton query client instance from hooks, leave only in zustand
2025-12-23 13:04:47 -08:00
Vikhyath Mondreti
641ac58017 fix(frozen-canvas): need to fetch the deployment version correctly (#2552) 2025-12-23 11:37:07 -08:00
Waleed
6c1e4ff7d6 improvement(oauth): remove unused scope hints (#2551)
* improvement(oauth): remove unused scope hints

* improvement(oauth): remove scopeHints and extraneous oauth provider data

* cleanup
2025-12-23 11:26:49 -08:00
Waleed
40e30a11e9 improvement(logs): update logs export route to respect filters (#2550) 2025-12-23 10:23:27 -08:00
Vikhyath Mondreti
d1ebad912e fix memory migration (#2548) 2025-12-23 01:19:29 -08:00
446 changed files with 55109 additions and 9216 deletions

View File

@@ -1,16 +1,126 @@
import { createFromSource } from 'fumadocs-core/search/server'
import { source } from '@/lib/source'
import { sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { db, docsEmbeddings } from '@/lib/db'
import { generateSearchEmbedding } from '@/lib/embeddings'
export const revalidate = 3600 // Revalidate every hour
export const runtime = 'nodejs'
export const revalidate = 0
export const { GET } = createFromSource(source, {
localeMap: {
en: { language: 'english' },
es: { language: 'spanish' },
fr: { language: 'french' },
de: { language: 'german' },
// ja and zh are not supported by the stemmer library, so we'll skip language config for them
ja: {},
zh: {},
},
})
/**
* Hybrid search API endpoint
* - English: Vector embeddings + keyword search
* - Other languages: Keyword search only
*/
export async function GET(request: NextRequest) {
try {
const searchParams = request.nextUrl.searchParams
const query = searchParams.get('query') || searchParams.get('q') || ''
const locale = searchParams.get('locale') || 'en'
const limit = Number.parseInt(searchParams.get('limit') || '10', 10)
if (!query || query.trim().length === 0) {
return NextResponse.json([])
}
const candidateLimit = limit * 3
const similarityThreshold = 0.6
const localeMap: Record<string, string> = {
en: 'english',
es: 'spanish',
fr: 'french',
de: 'german',
ja: 'simple', // PostgreSQL doesn't have Japanese support, use simple
zh: 'simple', // PostgreSQL doesn't have Chinese support, use simple
}
const tsConfig = localeMap[locale] || 'simple'
const useVectorSearch = locale === 'en'
let vectorResults: Array<{
chunkId: string
chunkText: string
sourceDocument: string
sourceLink: string
headerText: string
headerLevel: number
similarity: number
searchType: string
}> = []
if (useVectorSearch) {
const queryEmbedding = await generateSearchEmbedding(query)
vectorResults = await db
.select({
chunkId: docsEmbeddings.chunkId,
chunkText: docsEmbeddings.chunkText,
sourceDocument: docsEmbeddings.sourceDocument,
sourceLink: docsEmbeddings.sourceLink,
headerText: docsEmbeddings.headerText,
headerLevel: docsEmbeddings.headerLevel,
similarity: sql<number>`1 - (${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector)`,
searchType: sql<string>`'vector'`,
})
.from(docsEmbeddings)
.where(
sql`1 - (${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector) >= ${similarityThreshold}`
)
.orderBy(sql`${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector`)
.limit(candidateLimit)
}
const keywordResults = await db
.select({
chunkId: docsEmbeddings.chunkId,
chunkText: docsEmbeddings.chunkText,
sourceDocument: docsEmbeddings.sourceDocument,
sourceLink: docsEmbeddings.sourceLink,
headerText: docsEmbeddings.headerText,
headerLevel: docsEmbeddings.headerLevel,
similarity: sql<number>`ts_rank(${docsEmbeddings.chunkTextTsv}, plainto_tsquery(${tsConfig}, ${query}))`,
searchType: sql<string>`'keyword'`,
})
.from(docsEmbeddings)
.where(sql`${docsEmbeddings.chunkTextTsv} @@ plainto_tsquery(${tsConfig}, ${query})`)
.orderBy(
sql`ts_rank(${docsEmbeddings.chunkTextTsv}, plainto_tsquery(${tsConfig}, ${query})) DESC`
)
.limit(candidateLimit)
const seenIds = new Set<string>()
const mergedResults = []
for (let i = 0; i < Math.max(vectorResults.length, keywordResults.length); i++) {
if (i < vectorResults.length && !seenIds.has(vectorResults[i].chunkId)) {
mergedResults.push(vectorResults[i])
seenIds.add(vectorResults[i].chunkId)
}
if (i < keywordResults.length && !seenIds.has(keywordResults[i].chunkId)) {
mergedResults.push(keywordResults[i])
seenIds.add(keywordResults[i].chunkId)
}
}
const filteredResults = mergedResults.slice(0, limit)
const searchResults = filteredResults.map((result) => {
const title = result.headerText || result.sourceDocument.replace('.mdx', '')
const pathParts = result.sourceDocument
.replace('.mdx', '')
.split('/')
.map((part) => part.charAt(0).toUpperCase() + part.slice(1))
return {
id: result.chunkId,
type: 'page' as const,
url: result.sourceLink,
content: title,
breadcrumbs: pathParts,
}
})
return NextResponse.json(searchResults)
} catch (error) {
console.error('Semantic search error:', error)
return NextResponse.json([])
}
}

File diff suppressed because one or more lines are too long

View File

@@ -13,6 +13,7 @@ import {
BrainIcon,
BrowserUseIcon,
CalendlyIcon,
CirclebackIcon,
ClayIcon,
ConfluenceIcon,
CursorIcon,
@@ -40,6 +41,7 @@ import {
GoogleSlidesIcon,
GoogleVaultIcon,
GrafanaIcon,
GrainIcon,
HubspotIcon,
HuggingFaceIcon,
HunterIOIcon,
@@ -128,6 +130,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
asana: AsanaIcon,
browser_use: BrowserUseIcon,
calendly: CalendlyIcon,
circleback: CirclebackIcon,
clay: ClayIcon,
confluence: ConfluenceIcon,
cursor: CursorIcon,
@@ -154,6 +157,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
google_slides: GoogleSlidesIcon,
google_vault: GoogleVaultIcon,
grafana: GrafanaIcon,
grain: GrainIcon,
hubspot: HubspotIcon,
huggingface: HuggingFaceIcon,
hunter: HunterIOIcon,

View File

@@ -105,28 +105,32 @@ Die Modellaufschlüsselung zeigt:
Die angezeigten Preise entsprechen den Tarifen vom 10. September 2025. Überprüfen Sie die Dokumentation der Anbieter für aktuelle Preise.
</Callout>
## Bring Your Own Key (BYOK)
Sie können Ihre eigenen API-Schlüssel für gehostete Modelle (OpenAI, Anthropic, Google, Mistral) unter **Einstellungen → BYOK** verwenden, um Basispreise zu zahlen. Schlüssel werden verschlüsselt und gelten arbeitsbereichsweit.
## Strategien zur Kostenoptimierung
- **Modellauswahl**: Wählen Sie Modelle basierend auf der Komplexität der Aufgabe. Einfache Aufgaben können GPT-4.1-nano verwenden, während komplexes Denken möglicherweise o1 oder Claude Opus erfordert.
- **Prompt-Engineering**: Gut strukturierte, präzise Prompts reduzieren den Token-Verbrauch ohne Qualitätseinbußen.
- **Modellauswahl**: Wählen Sie Modelle basierend auf der Aufgabenkomplexität. Einfache Aufgaben können GPT-4.1-nano verwenden, während komplexes Reasoning o1 oder Claude Opus erfordern könnte.
- **Prompt Engineering**: Gut strukturierte, prägnante Prompts reduzieren den Token-Verbrauch ohne Qualitätsverlust.
- **Lokale Modelle**: Verwenden Sie Ollama oder VLLM für unkritische Aufgaben, um API-Kosten vollständig zu eliminieren.
- **Caching und Wiederverwendung**: Speichern Sie häufig verwendete Ergebnisse in Variablen oder Dateien, um wiederholte KI-Modellaufrufe zu vermeiden.
- **Batch-Verarbeitung**: Verarbeiten Sie mehrere Elemente in einer einzigen KI-Anfrage anstatt einzelne Aufrufe zu tätigen.
- **Caching und Wiederverwendung**: Speichern Sie häufig verwendete Ergebnisse in Variablen oder Dateien, um wiederholte AI-Modellaufrufe zu vermeiden.
- **Batch-Verarbeitung**: Verarbeiten Sie mehrere Elemente in einer einzigen AI-Anfrage, anstatt einzelne Aufrufe zu tätigen.
## Nutzungsüberwachung
Überwachen Sie Ihre Nutzung und Abrechnung unter Einstellungen → Abonnement:
- **Aktuelle Nutzung**: Echtzeit-Nutzung und -Kosten für den aktuellen Zeitraum
- **Nutzungslimits**: Plangrenzen mit visuellen Fortschrittsanzeigen
- **Aktuelle Nutzung**: Echtzeit-Nutzung und Kosten für den aktuellen Zeitraum
- **Nutzungslimits**: Plan-Limits mit visuellen Fortschrittsindikatoren
- **Abrechnungsdetails**: Prognostizierte Gebühren und Mindestverpflichtungen
- **Planverwaltung**: Upgrade-Optionen und Abrechnungsverlauf
- **Plan-Verwaltung**: Upgrade-Optionen und Abrechnungsverlauf
### Programmatische Nutzungsverfolgung
### Programmatisches Nutzungs-Tracking
Sie können Ihre aktuelle Nutzung und Limits programmatisch über die API abfragen:
**Endpunkt:**
**Endpoint:**
```text
GET /api/users/me/usage-limits
@@ -172,69 +176,69 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
```
**Rate-Limit-Felder:**
- `requestsPerMinute`: Dauerhafte Rate-Begrenzung (Tokens werden mit dieser Rate aufgefüllt)
- `maxBurst`: Maximale Tokens, die Sie ansammeln können (Burst-Kapazität)
- `remaining`: Aktuell verfügbare Tokens (können bis zu `maxBurst` sein)
- `requestsPerMinute`: Dauerhaftes Rate-Limit (Tokens werden mit dieser Rate aufgefüllt)
- `maxBurst`: Maximale Tokens, die Sie akkumulieren können (Burst-Kapazität)
- `remaining`: Aktuell verfügbare Tokens (kann bis zu `maxBurst` betragen)
**Antwortfelder:**
- `currentPeriodCost` spiegelt die Nutzung in der aktuellen Abrechnungsperiode wider
- `limit` wird von individuellen Limits (Free/Pro) oder gepoolten Organisationslimits (Team/Enterprise) abgeleitet
- `plan` ist der aktive Plan mit der höchsten Priorität, der mit Ihrem Benutzer verknüpft ist
- `currentPeriodCost` spiegelt die Nutzung im aktuellen Abrechnungszeitraum wider
- `limit` wird aus individuellen Limits (Free/Pro) oder gepoolten Organisationslimits (Team/Enterprise) abgeleitet
- `plan` ist der Plan mit der höchsten Priorität, der Ihrem Benutzer zugeordnet ist
## Plan-Limits
Verschiedene Abonnementpläne haben unterschiedliche Nutzungslimits:
Verschiedene Abonnement-Pläne haben unterschiedliche Nutzungslimits:
| Plan | Monatliches Nutzungslimit | Ratenlimits (pro Minute) |
|------|-------------------|-------------------------|
| **Free** | 20 $ | 5 synchron, 10 asynchron |
| **Pro** | 100 $ | 10 synchron, 50 asynchron |
| **Team** | 500 $ (gepoolt) | 50 synchron, 100 asynchron |
| **Free** | 20 $ | 5 sync, 10 async |
| **Pro** | 100 $ | 10 sync, 50 async |
| **Team** | 500 $ (gemeinsam) | 50 sync, 100 async |
| **Enterprise** | Individuell | Individuell |
## Abrechnungsmodell
Sim verwendet ein **Basisabonnement + Mehrverbrauch**-Abrechnungsmodell:
Sim verwendet ein **Basis-Abonnement + Mehrverbrauch**-Abrechnungsmodell:
### Wie es funktioniert
### So funktioniert es
**Pro-Plan ($20/Monat):**
- Monatliches Abonnement beinhaltet $20 Nutzung
- Nutzung unter $20 → Keine zusätzlichen Kosten
- Nutzung über $20 → Zahlen Sie den Mehrverbrauch am Monatsende
- Beispiel: $35 Nutzung = $20 (Abonnement) + $15 (Mehrverbrauch)
**Pro-Plan (20 $/Monat):**
- Monatsabonnement beinhaltet 20 $ Nutzung
- Nutzung unter 20 $ → Keine zusätzlichen Gebühren
- Nutzung über 20 $ → Mehrverbrauch am Monatsende zahlen
- Beispiel: 35 $ Nutzung = 20 $ (Abonnement) + 15 $ (Mehrverbrauch)
**Team-Plan ($40/Benutzer/Monat):**
- Gepoolte Nutzung für alle Teammitglieder
- Mehrverbrauch wird aus der Gesamtnutzung des Teams berechnet
**Team-Plan (40 $/Platz/Monat):**
- Gemeinsame Nutzung über alle Teammitglieder
- Mehrverbrauch wird aus der gesamten Team-Nutzung berechnet
- Organisationsinhaber erhält eine Rechnung
**Enterprise-Pläne:**
- Fester monatlicher Preis, kein Mehrverbrauch
- Fester Monatspreis, kein Mehrverbrauch
- Individuelle Nutzungslimits gemäß Vereinbarung
### Schwellenwert-Abrechnung
Wenn der nicht abgerechnete Mehrverbrauch $50 erreicht, berechnet Sim automatisch den gesamten nicht abgerechneten Betrag.
Wenn der nicht abgerechnete Mehrverbrauch 50 $ erreicht, rechnet Sim automatisch den gesamten nicht abgerechneten Betrag ab.
**Beispiel:**
- Tag 10: $70 Mehrverbrauch → Sofortige Abrechnung von $70
- Tag 15: Zusätzliche $35 Nutzung ($105 insgesamt) → Bereits abgerechnet, keine Aktion
- Tag 20: Weitere $50 Nutzung ($155 insgesamt, $85 nicht abgerechnet) → Sofortige Abrechnung von $85
- Tag 10: 70 $ Mehrverbrauch → 70 $ sofort abrechnen
- Tag 15: Zusätzliche 35 $ Nutzung (105 $ gesamt) → Bereits abgerechnet, keine Aktion
- Tag 20: Weitere 50 $ Nutzung (155 $ gesamt, 85 $ nicht abgerechnet) → 85 $ sofort abrechnen
Dies verteilt große Überziehungsgebühren über den Monat, anstatt eine große Rechnung am Ende des Abrechnungszeitraums zu erhalten.
Dies verteilt große Mehrverbrauchsgebühren über den Monat, anstatt einer großen Rechnung am Periodenende.
## Best Practices für Kostenmanagement
1. **Regelmäßig überwachen**: Überprüfen Sie Ihr Nutzungs-Dashboard häufig, um Überraschungen zu vermeiden
2. **Budgets festlegen**: Nutzen Sie Planlimits als Leitplanken für Ihre Ausgaben
2. **Budgets festlegen**: Nutzen Sie Plan-Limits als Leitplanken für Ihre Ausgaben
3. **Workflows optimieren**: Überprüfen Sie kostenintensive Ausführungen und optimieren Sie Prompts oder Modellauswahl
4. **Passende Modelle verwenden**: Passen Sie die Modellkomplexität an die Aufgabenanforderungen an
5. **Ähnliche Aufgaben bündeln**: Kombinieren Sie wenn möglich mehrere Anfragen, um den Overhead zu reduzieren
5. **Ähnliche Aufgaben bündeln**: Kombinieren Sie mehrere Anfragen, wenn möglich, um Overhead zu reduzieren
## Nächste Schritte
- Überprüfen Sie Ihre aktuelle Nutzung unter [Einstellungen → Abonnement](https://sim.ai/settings/subscription)
- Erfahren Sie mehr über [Protokollierung](/execution/logging), um Ausführungsdetails zu verfolgen
- Erkunden Sie die [Externe API](/execution/api) für programmatische Kostenüberwachung
- Entdecken Sie die [externe API](/execution/api) für programmatische Kostenüberwachung
- Sehen Sie sich [Workflow-Optimierungstechniken](/blocks) an, um Kosten zu reduzieren

View File

@@ -0,0 +1,59 @@
---
title: Circleback
description: KI-gestützte Meeting-Notizen und Aufgaben
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="circleback"
color="linear-gradient(180deg, #E0F7FA 0%, #FFFFFF 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[Circleback](https://circleback.ai/) ist eine KI-gestützte Plattform, die Meeting-Notizen, Aufgaben, Transkripte und Aufzeichnungen für Ihr Team automatisiert. Wenn ein Meeting abgeschlossen ist, verarbeitet Circleback die Konversation und liefert detaillierte Notizen und Aufgaben sowie ein Transkript und eine Aufzeichnung (sofern verfügbar). Dies hilft Teams dabei, Erkenntnisse effizient zu erfassen, Aufgaben zu verteilen und sicherzustellen, dass nichts übersehen wird alles nahtlos in Ihre Workflows integriert.
Mit der Sim Circleback-Integration können Sie:
- **Detaillierte Meeting-Notizen und Aufgaben erhalten**: Sammeln Sie automatisch gut formatierte Meeting-Zusammenfassungen und verfolgen Sie umsetzbare Aufgaben, die während Ihrer Anrufe besprochen wurden.
- **Auf vollständige Meeting-Aufzeichnungen und Transkripte zugreifen**: Erhalten Sie die vollständige Konversation und die zugehörige Aufzeichnung, um wichtige Momente einfach zu überprüfen oder mit Kollegen zu teilen.
- **Teilnehmerinformationen und Meeting-Kontext erfassen**: Teilnehmerlisten, Meeting-Metadaten und Tags helfen dabei, Ihre Daten organisiert und umsetzbar zu halten.
- **Erkenntnisse direkt in Ihre Workflows liefern**: Lösen Sie Automatisierungen aus oder senden Sie Circleback-Daten an andere Systeme, sobald ein Meeting beendet ist, mithilfe der leistungsstarken Webhook-Trigger von Sim.
**So funktioniert es in Sim:**
Circleback verwendet Webhook-Trigger: Sobald ein Meeting verarbeitet wurde, werden die Daten automatisch an Ihren Agenten oder Ihre Automatisierung übertragen. Sie können weitere Automatisierungen basierend auf folgenden Ereignissen erstellen:
- Meeting abgeschlossen (alle verarbeiteten Daten verfügbar)
- Neue Notizen (Notizen sind verfügbar, noch bevor das Meeting vollständig verarbeitet ist)
- Raw-Webhook-Integration für erweiterte Anwendungsfälle
**Die folgenden Informationen sind in der Circleback-Meeting-Webhook-Payload verfügbar:**
| Feld | Typ | Beschreibung |
|----------------|---------|----------------------------------------------------|
| `id` | number | Circleback Meeting-ID |
| `name` | string | Meeting-Titel |
| `url` | string | Virtueller Meeting-Link (Zoom, Meet, Teams usw.) |
| `createdAt` | string | Zeitstempel der Meeting-Erstellung |
| `duration` | number | Dauer in Sekunden |
| `recordingUrl` | string | Aufzeichnungs-URL (24 Stunden gültig) |
| `tags` | json | Array von Tags |
| `icalUid` | string | Kalender-Event-ID |
| `attendees` | json | Array von Teilnehmer-Objekten |
| `notes` | string | Meeting-Notizen in Markdown |
| `actionItems` | json | Array von Aufgaben |
| `transcript` | json | Array von Transkript-Segmenten |
| `insights` | json | Vom Nutzer erstellte Insights |
| `meeting` | json | Vollständige Meeting-Daten |
Egal, ob Sie sofortige Zusammenfassungen verteilen, Aufgaben protokollieren oder benutzerdefinierte Workflows erstellen möchten, die durch neue Meeting-Daten ausgelöst werden Circleback und Sim machen es nahtlos, alles rund um Ihre Meetings automatisch zu verwalten.
{/* MANUAL-CONTENT-END */}
## Nutzungsanleitung
Erhalten Sie Meeting-Notizen, Aufgaben, Transkripte und Aufzeichnungen, wenn Meetings verarbeitet werden. Circleback nutzt Webhooks, um Daten an Ihre Workflows zu übermitteln.
## Hinweise
- Kategorie: `triggers`
- Typ: `circleback`

View File

@@ -0,0 +1,218 @@
---
title: Grain
description: Zugriff auf Meeting-Aufzeichnungen, Transkripte und KI-Zusammenfassungen
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="grain"
color="#F6FAF9"
/>
{/* MANUAL-CONTENT-START:intro */}
[Grain](https://grain.com/) ist eine moderne Plattform zum Erfassen, Speichern und Teilen von Meeting-Aufzeichnungen, Transkripten, Highlights und KI-gestützten Zusammenfassungen. Grain ermöglicht es Teams, Gespräche in umsetzbare Erkenntnisse zu verwandeln und alle über wichtige Momente aus Meetings auf dem Laufenden zu halten.
Mit Grain können Sie:
- **Auf durchsuchbare Aufzeichnungen und Transkripte zugreifen**: Finden und überprüfen Sie jedes Meeting nach Stichwort, Teilnehmer oder Thema.
- **Highlights und Clips teilen**: Erfassen Sie wichtige Momente und teilen Sie kurze Video-/Audio-Highlights in Ihrem Team oder in Workflows.
- **KI-generierte Zusammenfassungen erhalten**: Erstellen Sie automatisch Meeting-Zusammenfassungen, Aktionspunkte und wichtige Erkenntnisse mithilfe der fortschrittlichen KI von Grain.
- **Meetings nach Team oder Typ organisieren**: Taggen und kategorisieren Sie Aufzeichnungen für einfachen Zugriff und Reporting.
Die Sim-Grain-Integration ermöglicht es Ihren Agenten:
- Meeting-Aufzeichnungen und Details nach flexiblen Filtern (Datum/Uhrzeit, Teilnehmer, Team usw.) aufzulisten, zu suchen und abzurufen.
- Auf KI-Zusammenfassungen, Teilnehmer, Highlights und andere Metadaten für Meetings zuzugreifen, um Automatisierungen oder Analysen zu unterstützen.
- Workflows auszulösen, sobald neue Meetings verarbeitet, Zusammenfassungen generiert oder Highlights über Grain-Webhooks erstellt werden.
- Grain-Daten einfach in andere Tools zu überführen oder Teammitglieder zu benachrichtigen, sobald etwas Wichtiges in einem Meeting passiert.
Ob Sie Follow-up-Aktionen automatisieren, wichtige Gespräche dokumentieren oder Erkenntnisse in Ihrer Organisation sichtbar machen möchten Grain und Sim machen es einfach, Meeting-Intelligence mit Ihren Workflows zu verbinden.
{/* MANUAL-CONTENT-END */}
## Nutzungsanweisungen
Integrieren Sie Grain in Ihren Workflow. Greifen Sie auf Meeting-Aufzeichnungen, Transkripte, Highlights und KI-generierte Zusammenfassungen zu. Kann auch Workflows basierend auf Grain-Webhook-Ereignissen auslösen.
## Tools
### `grain_list_recordings`
Aufzeichnungen von Grain mit optionalen Filtern und Paginierung auflisten
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain API-Schlüssel \(Personal Access Token\) |
| `cursor` | string | Nein | Paginierungs-Cursor für nächste Seite |
| `beforeDatetime` | string | Nein | Nur Aufzeichnungen vor diesem ISO8601-Zeitstempel |
| `afterDatetime` | string | Nein | Nur Aufzeichnungen nach diesem ISO8601-Zeitstempel |
| `participantScope` | string | Nein | Filter: "internal" oder "external" |
| `titleSearch` | string | Nein | Suchbegriff zum Filtern nach Aufzeichnungstitel |
| `teamId` | string | Nein | Nach Team-UUID filtern |
| `meetingTypeId` | string | Nein | Nach Meeting-Typ-UUID filtern |
| `includeHighlights` | boolean | Nein | Highlights/Clips in Antwort einschließen |
| `includeParticipants` | boolean | Nein | Teilnehmerliste in Antwort einschließen |
| `includeAiSummary` | boolean | Nein | KI-generierte Zusammenfassung einschließen |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `recordings` | array | Array von Aufzeichnungsobjekten |
### `grain_get_recording`
Details einer einzelnen Aufzeichnung nach ID abrufen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain API-Schlüssel \(Personal Access Token\) |
| `recordingId` | string | Ja | Die Aufzeichnungs-UUID |
| `includeHighlights` | boolean | Nein | Highlights/Clips einschließen |
| `includeParticipants` | boolean | Nein | Teilnehmerliste einschließen |
| `includeAiSummary` | boolean | Nein | KI-Zusammenfassung einschließen |
| `includeCalendarEvent` | boolean | Nein | Kalenderereignisdaten einschließen |
| `includeHubspot` | boolean | Nein | HubSpot-Verknüpfungen einschließen |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `id` | string | Aufnahme-UUID |
| `title` | string | Aufnahmetitel |
| `start_datetime` | string | ISO8601-Startzeitstempel |
| `end_datetime` | string | ISO8601-Endzeitstempel |
| `duration_ms` | number | Dauer in Millisekunden |
| `media_type` | string | audio, transcript oder video |
| `source` | string | Aufnahmequelle \(zoom, meet, teams, etc.\) |
| `url` | string | URL zur Ansicht in Grain |
| `thumbnail_url` | string | Vorschaubild-URL |
| `tags` | array | Array von Tag-Strings |
| `teams` | array | Teams, zu denen die Aufnahme gehört |
| `meeting_type` | object | Meeting-Typ-Informationen \(id, name, scope\) |
| `highlights` | array | Highlights \(falls enthalten\) |
| `participants` | array | Teilnehmer \(falls enthalten\) |
| `ai_summary` | object | KI-Zusammenfassungstext \(falls enthalten\) |
| `calendar_event` | object | Kalenderereignisdaten \(falls enthalten\) |
| `hubspot` | object | HubSpot-Verknüpfungen \(falls enthalten\) |
### `grain_get_transcript`
Vollständiges Transkript einer Aufnahme abrufen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain-API-Schlüssel \(Personal Access Token\) |
| `recordingId` | string | Ja | Die Aufnahme-UUID |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `transcript` | array | Array von Transkriptabschnitten |
### `grain_list_teams`
Alle Teams im Workspace auflisten
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain API-Schlüssel \(Personal Access Token\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `teams` | array | Array von Team-Objekten |
### `grain_list_meeting_types`
Alle Meeting-Typen im Workspace auflisten
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain API-Schlüssel \(Personal Access Token\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `meeting_types` | array | Array von Meeting-Typ-Objekten |
### `grain_create_hook`
Einen Webhook erstellen, um Aufzeichnungs-Events zu empfangen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain API-Schlüssel \(Personal Access Token\) |
| `hookUrl` | string | Ja | Webhook-Endpunkt-URL \(muss mit 2xx antworten\) |
| `filterBeforeDatetime` | string | Nein | Filter: Aufzeichnungen vor diesem Datum |
| `filterAfterDatetime` | string | Nein | Filter: Aufzeichnungen nach diesem Datum |
| `filterParticipantScope` | string | Nein | Filter: "internal" oder "external" |
| `filterTeamId` | string | Nein | Filter: spezifische Team-UUID |
| `filterMeetingTypeId` | string | Nein | Filter: spezifischer Meeting-Typ |
| `includeHighlights` | boolean | Nein | Highlights in Webhook-Payload einschließen |
| `includeParticipants` | boolean | Nein | Teilnehmer in Webhook-Payload einschließen |
| `includeAiSummary` | boolean | Nein | KI-Zusammenfassung in Webhook-Payload einschließen |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `id` | string | Hook-UUID |
| `enabled` | boolean | Ob der Hook aktiv ist |
| `hook_url` | string | Die Webhook-URL |
| `filter` | object | Angewendete Filter |
| `include` | object | Enthaltene Felder |
| `inserted_at` | string | ISO8601-Erstellungszeitstempel |
### `grain_list_hooks`
Alle Webhooks für das Konto auflisten
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain-API-Schlüssel \(Personal Access Token\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `hooks` | array | Array von Hook-Objekten |
### `grain_delete_hook`
Einen Webhook anhand der ID löschen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Ja | Grain-API-Schlüssel \(Personal Access Token\) |
| `hookId` | string | Ja | Die zu löschende Hook-UUID |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `success` | boolean | True, wenn der Webhook erfolgreich gelöscht wurde |
## Hinweise
- Kategorie: `tools`
- Typ: `grain`

View File

@@ -61,8 +61,6 @@ Extrahieren und verarbeiten Sie Webinhalte in sauberen, LLM-freundlichen Text mi
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `content` | string | Der extrahierte Inhalt von der URL, verarbeitet zu sauberem, LLM-freundlichem Text |
| `links` | array | Liste der auf der Seite gefundenen Links (wenn gatherLinks oder withLinksummary aktiviert ist) |
| `images` | array | Liste der auf der Seite gefundenen Bilder (wenn withImagesummary aktiviert ist) |
### `jina_search`

View File

@@ -42,13 +42,14 @@ Senden Sie eine Chat-Completion-Anfrage an jeden unterstützten LLM-Anbieter
| `model` | string | Ja | Das zu verwendende Modell \(z. B. gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| `systemPrompt` | string | Nein | System-Prompt zur Festlegung des Verhaltens des Assistenten |
| `context` | string | Ja | Die Benutzernachricht oder der Kontext, der an das Modell gesendet werden soll |
| `apiKey` | string | Nein | API-Schlüssel für den Anbieter \(verwendet Plattform-Schlüssel, falls nicht für gehostete Modelle angegeben\) |
| `temperature` | number | Nein | Temperatur für die Antwortgenerierung \(0-2\) |
| `apiKey` | string | Nein | API-Schlüssel für den Anbieter \(verwendet den Plattformschlüssel, falls nicht für gehostete Modelle angegeben\) |
| `temperature` | number | Nein | Temperatur für die Antwortgenerierung \(02\) |
| `maxTokens` | number | Nein | Maximale Anzahl von Tokens in der Antwort |
| `azureEndpoint` | string | Nein | Azure OpenAI-Endpunkt-URL |
| `azureApiVersion` | string | Nein | Azure OpenAI-API-Version |
| `vertexProject` | string | Nein | Google Cloud-Projekt-ID für Vertex AI |
| `vertexLocation` | string | Nein | Google Cloud-Standort für Vertex AI \(Standard: us-central1\) |
| `vertexCredential` | string | Nein | Google Cloud OAuth-Anmeldeinformations-ID für Vertex AI |
#### Ausgabe

View File

@@ -5,7 +5,6 @@ title: Zeitplan
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
Der Zeitplan-Block löst Workflows automatisch nach einem wiederkehrenden Zeitplan zu bestimmten Intervallen oder Zeiten aus.
@@ -21,67 +20,58 @@ Der Zeitplan-Block löst Workflows automatisch nach einem wiederkehrenden Zeitpl
## Zeitplan-Optionen
Konfigurieren Sie, wann Ihr Workflow ausgeführt wird, mit den Dropdown-Optionen:
Konfigurieren Sie, wann Ihr Workflow ausgeführt wird:
<Tabs items={['Einfache Intervalle', 'Cron-Ausdrücke']}>
<Tab>
<ul className="list-disc space-y-1 pl-6">
<li><strong>Alle paar Minuten</strong>: 5, 15, 30 Minuten-Intervalle</li>
<li><strong>Stündlich</strong>: Jede Stunde oder alle paar Stunden</li>
<li><strong>Täglich</strong>: Einmal oder mehrmals pro Tag</li>
<li><strong>Wöchentlich</strong>: Bestimmte Wochentage</li>
<li><strong>Monatlich</strong>: Bestimmte Tage des Monats</li>
<li><strong>Alle X Minuten</strong>: Ausführung in Minutenintervallen (1-1440)</li>
<li><strong>Stündlich</strong>: Ausführung zu einer bestimmten Minute jeder Stunde</li>
<li><strong>Täglich</strong>: Ausführung zu einer bestimmten Uhrzeit jeden Tag</li>
<li><strong>Wöchentlich</strong>: Ausführung an einem bestimmten Tag und einer bestimmten Uhrzeit jede Woche</li>
<li><strong>Monatlich</strong>: Ausführung an einem bestimmten Tag und einer bestimmten Uhrzeit jeden Monat</li>
</ul>
</Tab>
<Tab>
<p>Verwenden Sie Cron-Ausdrücke für erweiterte Zeitplanung:</p>
<p>Verwenden Sie Cron-Ausdrücke für erweiterte Planung:</p>
<div className="text-sm space-y-1">
<div><code>0 9 * * 1-5</code> - Jeden Wochentag um 9 Uhr</div>
<div><code>*/15 * * * *</code> - Alle 15 Minuten</div>
<div><code>0 0 1 * *</code> - Am ersten Tag jedes Monats</div>
<div><code>0 0 1 * *</code> - Erster Tag jedes Monats</div>
</div>
</Tab>
</Tabs>
## Zeitpläne konfigurieren
## Aktivierung
Wenn ein Workflow geplant ist:
- Der Zeitplan wird **aktiv** und zeigt die nächste Ausführungszeit an
- Klicken Sie auf die Schaltfläche **"Geplant"**, um den Zeitplan zu deaktivieren
- Zeitpläne werden nach **3 aufeinanderfolgenden Fehlern** automatisch deaktiviert
Zeitpläne sind an die Workflow-Bereitstellung gebunden:
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-2.png"
alt="Aktiver Zeitplan-Block"
width={500}
height={400}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Deaktivierter Zeitplan"
width={500}
height={350}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Deaktivierter Zeitplan"
width={500}
height={400}
className="my-6"
/>
</div>
Deaktivierte Zeitpläne zeigen an, wann sie zuletzt aktiv waren. Klicken Sie auf das **"Deaktiviert"**-Badge, um den Zeitplan wieder zu aktivieren.
- **Workflow bereitstellen** → Zeitplan wird aktiv und beginnt mit der Ausführung
- **Workflow-Bereitstellung aufheben** → Zeitplan wird entfernt
- **Workflow erneut bereitstellen** → Zeitplan wird mit aktueller Konfiguration neu erstellt
<Callout>
Zeitplan-Blöcke können keine eingehenden Verbindungen empfangen und dienen ausschließlich als Workflow-Auslöser.
Sie müssen Ihren Workflow bereitstellen, damit der Zeitplan mit der Ausführung beginnt. Konfigurieren Sie den Zeitplan-Block und stellen Sie ihn dann über die Symbolleiste bereit.
</Callout>
## Automatische Deaktivierung
Zeitpläne werden nach **100 aufeinanderfolgenden Fehlern** automatisch deaktiviert, um unkontrollierte Fehler zu verhindern. Bei Deaktivierung:
- Erscheint ein Warnhinweis auf dem Zeitplan-Block
- Die Ausführung des Zeitplans wird gestoppt
- Klicken Sie auf den Hinweis, um den Zeitplan zu reaktivieren
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Deaktivierter Zeitplan"
width={500}
height={400}
className="my-6"
/>
</div>
<Callout>
Zeitplan-Blöcke können keine eingehenden Verbindungen empfangen und dienen ausschließlich als Workflow-Einstiegspunkte.
</Callout>

View File

@@ -104,6 +104,10 @@ The model breakdown shows:
Pricing shown reflects rates as of September 10, 2025. Check provider documentation for current pricing.
</Callout>
## Bring Your Own Key (BYOK)
You can use your own API keys for hosted models (OpenAI, Anthropic, Google, Mistral) in **Settings → BYOK** to pay base prices. Keys are encrypted and apply workspace-wide.
## Cost Optimization Strategies
- **Model Selection**: Choose models based on task complexity. Simple tasks can use GPT-4.1-nano while complex reasoning might need o1 or Claude Opus.

View File

@@ -0,0 +1,64 @@
---
title: Circleback
description: AI-powered meeting notes and action items
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="circleback"
color="linear-gradient(180deg, #E0F7FA 0%, #FFFFFF 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[Circleback](https://circleback.ai/) is an AI-powered platform that automates meeting notes, action items, transcripts, and recordings for your team. When a meeting is completed, Circleback processes the conversation and provides detailed notes and action items, along with a transcript and a recording (when available). This helps teams efficiently capture insights, distribute action items, and ensure nothing is missed—all seamlessly integrated into your workflows.
With the Sim Circleback integration, you can:
- **Receive detailed meeting notes and action items**: Automatically collect well-formatted meeting summaries and track actionable tasks discussed during your calls.
- **Access complete meeting recordings and transcripts**: Get the full conversation and the associated recording, making it easy to review key moments or share with colleagues.
- **Capture attendee information and meeting context**: Attendee lists, meeting metadata, and tags help keep your data organized and actionable.
- **Deliver insights directly into your workflows**: Trigger automations or send Circleback data to other systems the moment a meeting is done, using Sims powerful webhook triggers.
**How it works in Sim:**
Circleback uses webhook triggers: whenever a meeting is processed, data is pushed automatically to your agent or automation. You can build further automations based on:
- Meeting completed (all processed data available)
- New notes (notes ready even before full meeting is processed)
- Raw webhook integration for advanced use cases
**The following information is available in the Circleback meeting webhook payload:**
| Field | Type | Description |
|----------------|---------|----------------------------------------------------|
| `id` | number | Circleback meeting ID |
| `name` | string | Meeting title |
| `url` | string | Virtual meeting URL (Zoom, Meet, Teams, etc.) |
| `createdAt` | string | Meeting creation timestamp |
| `duration` | number | Duration in seconds |
| `recordingUrl` | string | Recording URL (valid 24 hours) |
| `tags` | json | Array of tags |
| `icalUid` | string | Calendar event ID |
| `attendees` | json | Array of attendee objects |
| `notes` | string | Meeting notes in Markdown |
| `actionItems` | json | Array of action items |
| `transcript` | json | Array of transcript segments |
| `insights` | json | User-created insights |
| `meeting` | json | Full meeting payload |
Whether you want to distribute instant summaries, log action items, or build custom workflows triggered by new meeting data, Circleback and Sim make it seamless to handle everything related to your meetings—automatically.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Receive meeting notes, action items, transcripts, and recordings when meetings are processed. Circleback uses webhooks to push data to your workflows.
## Notes
- Category: `triggers`
- Type: `circleback`

View File

@@ -0,0 +1,223 @@
---
title: Grain
description: Access meeting recordings, transcripts, and AI summaries
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="grain"
color="#F6FAF9"
/>
{/* MANUAL-CONTENT-START:intro */}
[Grain](https://grain.com/) is a modern platform for capturing, storing, and sharing meeting recordings, transcripts, highlights, and AI-powered summaries. Grain enables teams to turn conversations into actionable insights and keep everyone aligned on key moments from meetings.
With Grain, you can:
- **Access searchable recordings and transcripts**: Find and review every meeting by keyword, participant, or topic.
- **Share highlights and clips**: Capture important moments and share short video/audio highlights across your team or workflows.
- **Get AI-generated summaries**: Automatically produce meeting summaries, action items, and key insights using Grains advanced AI.
- **Organize meetings by team or type**: Tag and categorize recordings for easy access and reporting.
The Sim Grain integration empowers your agents to:
- List, search, and retrieve meeting recordings and details by flexible filters (datetime, participant, team, etc).
- Access AI summaries, participants, highlights, and other metadata for meetings to power automations or analysis.
- Trigger workflows whenever new meetings are processed, summaries are generated, or highlights are created via Grain webhooks.
- Easily bridge Grain data into other tools or notify teammates the moment something important happens in a meeting.
Whether you want to automate follow-up actions, keep records of important conversations, or surface insights across your organization, Grain and Sim make it easy to connect meeting intelligence to your workflows.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate Grain into your workflow. Access meeting recordings, transcripts, highlights, and AI-generated summaries. Can also trigger workflows based on Grain webhook events.
## Tools
### `grain_list_recordings`
List recordings from Grain with optional filters and pagination
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
| `cursor` | string | No | Pagination cursor for next page |
| `beforeDatetime` | string | No | Only recordings before this ISO8601 timestamp |
| `afterDatetime` | string | No | Only recordings after this ISO8601 timestamp |
| `participantScope` | string | No | Filter: "internal" or "external" |
| `titleSearch` | string | No | Search term to filter by recording title |
| `teamId` | string | No | Filter by team UUID |
| `meetingTypeId` | string | No | Filter by meeting type UUID |
| `includeHighlights` | boolean | No | Include highlights/clips in response |
| `includeParticipants` | boolean | No | Include participant list in response |
| `includeAiSummary` | boolean | No | Include AI-generated summary |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `recordings` | array | Array of recording objects |
### `grain_get_recording`
Get details of a single recording by ID
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
| `recordingId` | string | Yes | The recording UUID |
| `includeHighlights` | boolean | No | Include highlights/clips |
| `includeParticipants` | boolean | No | Include participant list |
| `includeAiSummary` | boolean | No | Include AI summary |
| `includeCalendarEvent` | boolean | No | Include calendar event data |
| `includeHubspot` | boolean | No | Include HubSpot associations |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Recording UUID |
| `title` | string | Recording title |
| `start_datetime` | string | ISO8601 start timestamp |
| `end_datetime` | string | ISO8601 end timestamp |
| `duration_ms` | number | Duration in milliseconds |
| `media_type` | string | audio, transcript, or video |
| `source` | string | Recording source \(zoom, meet, teams, etc.\) |
| `url` | string | URL to view in Grain |
| `thumbnail_url` | string | Thumbnail image URL |
| `tags` | array | Array of tag strings |
| `teams` | array | Teams the recording belongs to |
| `meeting_type` | object | Meeting type info \(id, name, scope\) |
| `highlights` | array | Highlights \(if included\) |
| `participants` | array | Participants \(if included\) |
| `ai_summary` | object | AI summary text \(if included\) |
| `calendar_event` | object | Calendar event data \(if included\) |
| `hubspot` | object | HubSpot associations \(if included\) |
### `grain_get_transcript`
Get the full transcript of a recording
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
| `recordingId` | string | Yes | The recording UUID |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `transcript` | array | Array of transcript sections |
### `grain_list_teams`
List all teams in the workspace
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `teams` | array | Array of team objects |
### `grain_list_meeting_types`
List all meeting types in the workspace
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `meeting_types` | array | Array of meeting type objects |
### `grain_create_hook`
Create a webhook to receive recording events
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
| `hookUrl` | string | Yes | Webhook endpoint URL \(must respond 2xx\) |
| `filterBeforeDatetime` | string | No | Filter: recordings before this date |
| `filterAfterDatetime` | string | No | Filter: recordings after this date |
| `filterParticipantScope` | string | No | Filter: "internal" or "external" |
| `filterTeamId` | string | No | Filter: specific team UUID |
| `filterMeetingTypeId` | string | No | Filter: specific meeting type |
| `includeHighlights` | boolean | No | Include highlights in webhook payload |
| `includeParticipants` | boolean | No | Include participants in webhook payload |
| `includeAiSummary` | boolean | No | Include AI summary in webhook payload |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | Hook UUID |
| `enabled` | boolean | Whether hook is active |
| `hook_url` | string | The webhook URL |
| `filter` | object | Applied filters |
| `include` | object | Included fields |
| `inserted_at` | string | ISO8601 creation timestamp |
### `grain_list_hooks`
List all webhooks for the account
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `hooks` | array | Array of hook objects |
### `grain_delete_hook`
Delete a webhook by ID
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Yes | Grain API key \(Personal Access Token\) |
| `hookId` | string | Yes | The hook UUID to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | True when webhook was successfully deleted |
## Notes
- Category: `tools`
- Type: `grain`

View File

@@ -64,8 +64,6 @@ Extract and process web content into clean, LLM-friendly text using Jina AI Read
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `content` | string | The extracted content from the URL, processed into clean, LLM-friendly text |
| `links` | array | List of links found on the page \(when gatherLinks or withLinksummary is enabled\) |
| `images` | array | List of images found on the page \(when withImagesummary is enabled\) |
### `jina_search`

View File

@@ -9,6 +9,7 @@
"asana",
"browser_use",
"calendly",
"circleback",
"clay",
"confluence",
"cursor",
@@ -35,6 +36,7 @@
"google_slides",
"google_vault",
"grafana",
"grain",
"hubspot",
"huggingface",
"hunter",

View File

@@ -52,6 +52,7 @@ Send a chat completion request to any supported LLM provider
| `azureApiVersion` | string | No | Azure OpenAI API version |
| `vertexProject` | string | No | Google Cloud project ID for Vertex AI |
| `vertexLocation` | string | No | Google Cloud location for Vertex AI \(defaults to us-central1\) |
| `vertexCredential` | string | No | Google Cloud OAuth credential ID for Vertex AI |
#### Output

View File

@@ -5,7 +5,6 @@ title: Schedule
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
The Schedule block automatically triggers workflows on a recurring schedule at specified intervals or times.
@@ -21,16 +20,16 @@ The Schedule block automatically triggers workflows on a recurring schedule at s
## Schedule Options
Configure when your workflow runs using the dropdown options:
Configure when your workflow runs:
<Tabs items={['Simple Intervals', 'Cron Expressions']}>
<Tab>
<ul className="list-disc space-y-1 pl-6">
<li><strong>Every few minutes</strong>: 5, 15, 30 minute intervals</li>
<li><strong>Hourly</strong>: Every hour or every few hours</li>
<li><strong>Daily</strong>: Once or multiple times per day</li>
<li><strong>Weekly</strong>: Specific days of the week</li>
<li><strong>Monthly</strong>: Specific days of the month</li>
<li><strong>Every X Minutes</strong>: Run at minute intervals (1-1440)</li>
<li><strong>Hourly</strong>: Run at a specific minute each hour</li>
<li><strong>Daily</strong>: Run at a specific time each day</li>
<li><strong>Weekly</strong>: Run on a specific day and time each week</li>
<li><strong>Monthly</strong>: Run on a specific day and time each month</li>
</ul>
</Tab>
<Tab>
@@ -43,24 +42,25 @@ Configure when your workflow runs using the dropdown options:
</Tab>
</Tabs>
## Configuring Schedules
## Activation
When a workflow is scheduled:
- The schedule becomes **active** and shows the next execution time
- Click the **"Scheduled"** button to deactivate the schedule
- Schedules automatically deactivate after **3 consecutive failures**
Schedules are tied to workflow deployment:
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-2.png"
alt="Active Schedule Block"
width={500}
height={400}
className="my-6"
/>
</div>
- **Deploy workflow** → Schedule becomes active and starts running
- **Undeploy workflow** → Schedule is removed
- **Redeploy workflow** → Schedule is recreated with current configuration
## Disabled Schedules
<Callout>
You must deploy your workflow for the schedule to start running. Configure the schedule block, then deploy from the toolbar.
</Callout>
## Automatic Disabling
Schedules automatically disable after **100 consecutive failures** to prevent runaway errors. When disabled:
- A warning badge appears on the schedule block
- The schedule stops executing
- Click the badge to reactivate the schedule
<div className="flex justify-center">
<Image
@@ -72,8 +72,6 @@ When a workflow is scheduled:
/>
</div>
Disabled schedules show when they were last active. Click the **"Disabled"** badge to reactivate the schedule.
<Callout>
Schedule blocks cannot receive incoming connections and serve as pure workflow triggers.
</Callout>
Schedule blocks cannot receive incoming connections and serve as workflow entry points only.
</Callout>

View File

@@ -105,26 +105,30 @@ El desglose del modelo muestra:
Los precios mostrados reflejan las tarifas a partir del 10 de septiembre de 2025. Consulta la documentación del proveedor para conocer los precios actuales.
</Callout>
## Trae tu propia clave (BYOK)
Puedes usar tus propias claves API para modelos alojados (OpenAI, Anthropic, Google, Mistral) en **Configuración → BYOK** para pagar precios base. Las claves están encriptadas y se aplican a todo el espacio de trabajo.
## Estrategias de optimización de costos
- **Selección de modelos**: Elige modelos según la complejidad de la tarea. Las tareas simples pueden usar GPT-4.1-nano mientras que el razonamiento complejo podría necesitar o1 o Claude Opus.
- **Ingeniería de prompts**: Los prompts bien estructurados y concisos reducen el uso de tokens sin sacrificar la calidad.
- **Modelos locales**: Usa Ollama o VLLM para tareas no críticas para eliminar por completo los costos de API.
- **Almacenamiento en caché y reutilización**: Guarda resultados frecuentemente utilizados en variables o archivos para evitar llamadas repetidas al modelo de IA.
- **Procesamiento por lotes**: Procesa múltiples elementos en una sola solicitud de IA en lugar de hacer llamadas individuales.
- **Selección de modelo**: elige modelos según la complejidad de la tarea. Las tareas simples pueden usar GPT-4.1-nano mientras que el razonamiento complejo podría necesitar o1 o Claude Opus.
- **Ingeniería de prompts**: los prompts bien estructurados y concisos reducen el uso de tokens sin sacrificar calidad.
- **Modelos locales**: usa Ollama o VLLM para tareas no críticas para eliminar completamente los costos de API.
- **Almacenamiento en caché y reutilización**: guarda resultados usados frecuentemente en variables o archivos para evitar llamadas repetidas al modelo de IA.
- **Procesamiento por lotes**: procesa múltiples elementos en una sola solicitud de IA en lugar de hacer llamadas individuales.
## Monitoreo de uso
Monitorea tu uso y facturación en Configuración → Suscripción:
- **Uso actual**: Uso y costos en tiempo real para el período actual
- **Límites de uso**: Límites del plan con indicadores visuales de progreso
- **Detalles de facturación**: Cargos proyectados y compromisos mínimos
- **Gestión del plan**: Opciones de actualización e historial de facturación
- **Uso actual**: uso y costos en tiempo real para el período actual
- **Límites de uso**: límites del plan con indicadores visuales de progreso
- **Detalles de facturación**: cargos proyectados y compromisos mínimos
- **Gestión de plan**: opciones de actualización e historial de facturación
### Seguimiento programático de uso
### Seguimiento de uso programático
Puedes consultar tu uso actual y límites de forma programática utilizando la API:
Puedes consultar tu uso y límites actuales de forma programática usando la API:
**Endpoint:**
@@ -135,13 +139,13 @@ GET /api/users/me/usage-limits
**Autenticación:**
- Incluye tu clave API en el encabezado `X-API-Key`
**Ejemplo de solicitud:**
**Solicitud de ejemplo:**
```bash
curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" https://sim.ai/api/users/me/usage-limits
```
**Ejemplo de respuesta:**
**Respuesta de ejemplo:**
```json
{
@@ -172,14 +176,14 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
```
**Campos de límite de tasa:**
- `requestsPerMinute`: Límite de tasa sostenida (los tokens se recargan a esta velocidad)
- `maxBurst`: Máximo de tokens que puedes acumular (capacidad de ráfaga)
- `remaining`: Tokens disponibles actualmente (puede ser hasta `maxBurst`)
- `requestsPerMinute`: límite de tasa sostenida (los tokens se recargan a esta tasa)
- `maxBurst`: tokens máximos que puedes acumular (capacidad de ráfaga)
- `remaining`: tokens actuales disponibles (puede ser hasta `maxBurst`)
**Campos de respuesta:**
- `currentPeriodCost` refleja el uso en el período de facturación actual
- `limit` se deriva de límites individuales (Gratuito/Pro) o límites agrupados de la organización (Equipo/Empresa)
- `plan` es el plan activo de mayor prioridad asociado a tu usuario
- `limit` se deriva de límites individuales (Free/Pro) o límites de organización agrupados (Team/Enterprise)
- `plan` es el plan activo de mayor prioridad asociado con tu usuario
## Límites del plan
@@ -187,10 +191,10 @@ Los diferentes planes de suscripción tienen diferentes límites de uso:
| Plan | Límite de uso mensual | Límites de tasa (por minuto) |
|------|-------------------|-------------------------|
| **Gratis** | $20 | 5 síncronas, 10 asíncronas |
| **Pro** | $100 | 10 síncronas, 50 asíncronas |
| **Equipo** | $500 (compartido) | 50 síncronas, 100 asíncronas |
| **Empresarial** | Personalizado | Personalizado |
| **Gratuito** | $20 | 5 sync, 10 async |
| **Pro** | $100 | 10 sync, 50 async |
| **Equipo** | $500 (compartido) | 50 sync, 100 async |
| **Empresa** | Personalizado | Personalizado |
## Modelo de facturación
@@ -200,16 +204,16 @@ Sim utiliza un modelo de facturación de **suscripción base + excedente**:
**Plan Pro ($20/mes):**
- La suscripción mensual incluye $20 de uso
- Uso por debajo de $20 → Sin cargos adicionales
- Uso por encima de $20 → Pagas el excedente al final del mes
- Uso inferior a $20 → Sin cargos adicionales
- Uso superior a $20 → Paga el excedente al final del mes
- Ejemplo: $35 de uso = $20 (suscripción) + $15 (excedente)
**Plan de Equipo ($40/usuario/mes):**
- Uso agrupado entre todos los miembros del equipo
- Excedente calculado del uso total del equipo
**Plan Equipo ($40/usuario/mes):**
- Uso compartido entre todos los miembros del equipo
- El excedente se calcula a partir del uso total del equipo
- El propietario de la organización recibe una sola factura
**Planes Empresariales:**
**Planes Empresa:**
- Precio mensual fijo, sin excedentes
- Límites de uso personalizados según el acuerdo
@@ -218,23 +222,23 @@ Sim utiliza un modelo de facturación de **suscripción base + excedente**:
Cuando el excedente no facturado alcanza los $50, Sim factura automáticamente el monto total no facturado.
**Ejemplo:**
- Día 10: $70 de excedente → Factura inmediata de $70
- Día 15: $35 adicionales de uso ($105 en total) → Ya facturado, sin acción
- Día 20: Otros $50 de uso ($155 en total, $85 no facturados) → Factura inmediata de $85
- Día 10: $70 de excedente → Factura $70 inmediatamente
- Día 15: $35 adicionales de uso ($105 total) → Ya facturado, sin acción
- Día 20: Otros $50 de uso ($155 total, $85 sin facturar) → Factura $85 inmediatamente
Esto distribuye los cargos por exceso a lo largo del mes en lugar de una gran factura al final del período.
Esto distribuye los cargos por excedentes grandes a lo largo del mes en lugar de una sola factura grande al final del período.
## Mejores prácticas para la gestión de costos
## Mejores prácticas de gestión de costos
1. **Monitorear regularmente**: Revisa tu panel de uso con frecuencia para evitar sorpresas
2. **Establecer presupuestos**: Utiliza los límites del plan como guías para tu gasto
3. **Optimizar flujos de trabajo**: Revisa las ejecuciones de alto costo y optimiza los prompts o la selección de modelos
4. **Usar modelos apropiados**: Ajusta la complejidad del modelo a los requisitos de la tarea
5. **Agrupar tareas similares**: Combina múltiples solicitudes cuando sea posible para reducir la sobrecarga
1. **Monitorea regularmente**: Revisa tu panel de uso con frecuencia para evitar sorpresas
2. **Establece presupuestos**: Usa los límites del plan como barreras de protección para tu gasto
3. **Optimiza flujos de trabajo**: Revisa las ejecuciones de alto costo y optimiza los prompts o la selección de modelos
4. **Usa modelos apropiados**: Ajusta la complejidad del modelo a los requisitos de la tarea
5. **Agrupa tareas similares**: Combina múltiples solicitudes cuando sea posible para reducir la sobrecarga
## Próximos pasos
- Revisa tu uso actual en [Configuración → Suscripción](https://sim.ai/settings/subscription)
- Aprende sobre [Registro](/execution/logging) para seguir los detalles de ejecución
- Explora la [API externa](/execution/api) para el monitoreo programático de costos
- Consulta las [técnicas de optimización de flujo de trabajo](/blocks) para reducir costos
- Aprende sobre [Registro](/execution/logging) para rastrear detalles de ejecución
- Explora la [API externa](/execution/api) para monitoreo programático de costos
- Consulta las [técnicas de optimización de flujos de trabajo](/blocks) para reducir costos

View File

@@ -0,0 +1,59 @@
---
title: Circleback
description: Notas de reuniones e ítems de acción impulsados por IA
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="circleback"
color="linear-gradient(180deg, #E0F7FA 0%, #FFFFFF 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[Circleback](https://circleback.ai/) es una plataforma impulsada por IA que automatiza las notas de reuniones, ítems de acción, transcripciones y grabaciones para tu equipo. Cuando se completa una reunión, Circleback procesa la conversación y proporciona notas detalladas e ítems de acción, junto con una transcripción y una grabación (cuando está disponible). Esto ayuda a los equipos a capturar información de manera eficiente, distribuir ítems de acción y asegurar que no se pierda nada, todo integrado sin problemas en tus flujos de trabajo.
Con la integración de Sim Circleback, puedes:
- **Recibir notas detalladas de reuniones e ítems de acción**: Recopila automáticamente resúmenes de reuniones bien formateados y realiza seguimiento de las tareas accionables discutidas durante tus llamadas.
- **Acceder a grabaciones y transcripciones completas de reuniones**: Obtén la conversación completa y la grabación asociada, facilitando la revisión de momentos clave o compartir con colegas.
- **Capturar información de asistentes y contexto de la reunión**: Las listas de asistentes, metadatos de reuniones y etiquetas ayudan a mantener tus datos organizados y accionables.
- **Entregar información directamente en tus flujos de trabajo**: Activa automatizaciones o envía datos de Circleback a otros sistemas en el momento en que finaliza una reunión, usando los potentes activadores webhook de Sim.
**Cómo funciona en Sim:**
Circleback utiliza activadores webhook: cada vez que se procesa una reunión, los datos se envían automáticamente a tu agente o automatización. Puedes crear más automatizaciones basadas en:
- Reunión completada (todos los datos procesados disponibles)
- Nuevas notas (notas listas incluso antes de que se procese la reunión completa)
- Integración webhook sin procesar para casos de uso avanzados
**La siguiente información está disponible en la carga útil del webhook de reunión de Circleback:**
| Campo | Tipo | Descripción |
|----------------|---------|----------------------------------------------------|
| `id` | number | ID de reunión de Circleback |
| `name` | string | Título de la reunión |
| `url` | string | URL de reunión virtual (Zoom, Meet, Teams, etc.) |
| `createdAt` | string | Marca de tiempo de creación de la reunión |
| `duration` | number | Duración en segundos |
| `recordingUrl` | string | URL de grabación (válida 24 horas) |
| `tags` | json | Array de etiquetas |
| `icalUid` | string | ID de evento de calendario |
| `attendees` | json | Array de objetos de asistentes |
| `notes` | string | Notas de la reunión en Markdown |
| `actionItems` | json | Array de elementos de acción |
| `transcript` | json | Array de segmentos de transcripción |
| `insights` | json | Insights creados por el usuario |
| `meeting` | json | Payload completo de la reunión |
Ya sea que quieras distribuir resúmenes instantáneos, registrar elementos de acción o crear flujos de trabajo personalizados activados por nuevos datos de reuniones, Circleback y Sim hacen que sea sencillo manejar todo lo relacionado con tus reuniones, automáticamente.
{/* MANUAL-CONTENT-END */}
## Instrucciones de uso
Recibe notas de reuniones, elementos de acción, transcripciones y grabaciones cuando se procesen las reuniones. Circleback utiliza webhooks para enviar datos a tus flujos de trabajo.
## Notas
- Categoría: `triggers`
- Tipo: `circleback`

View File

@@ -0,0 +1,218 @@
---
title: Grain
description: Accede a grabaciones de reuniones, transcripciones y resúmenes de IA
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="grain"
color="#F6FAF9"
/>
{/* MANUAL-CONTENT-START:intro */}
[Grain](https://grain.com/) es una plataforma moderna para capturar, almacenar y compartir grabaciones de reuniones, transcripciones, momentos destacados y resúmenes generados por IA. Grain permite a los equipos convertir conversaciones en información procesable y mantener a todos alineados con los momentos clave de las reuniones.
Con Grain, puedes:
- **Acceder a grabaciones y transcripciones con búsqueda**: Encuentra y revisa cada reunión por palabra clave, participante o tema.
- **Compartir momentos destacados y clips**: Captura momentos importantes y comparte fragmentos cortos de video/audio en tu equipo o flujos de trabajo.
- **Obtener resúmenes generados por IA**: Produce automáticamente resúmenes de reuniones, elementos de acción e información clave utilizando la IA avanzada de Grain.
- **Organizar reuniones por equipo o tipo**: Etiqueta y categoriza grabaciones para facilitar el acceso y la generación de informes.
La integración de Sim con Grain permite a tus agentes:
- Listar, buscar y recuperar grabaciones de reuniones y detalles mediante filtros flexibles (fecha y hora, participante, equipo, etc.).
- Acceder a resúmenes de IA, participantes, momentos destacados y otros metadatos de reuniones para impulsar automatizaciones o análisis.
- Activar flujos de trabajo cada vez que se procesen nuevas reuniones, se generen resúmenes o se creen momentos destacados a través de webhooks de Grain.
- Conectar fácilmente los datos de Grain con otras herramientas o notificar a los compañeros de equipo en el momento en que sucede algo importante en una reunión.
Ya sea que desees automatizar acciones de seguimiento, mantener registros de conversaciones importantes o destacar información en toda tu organización, Grain y Sim facilitan la conexión de la inteligencia de reuniones con tus flujos de trabajo.
{/* MANUAL-CONTENT-END */}
## Instrucciones de uso
Integra Grain en tu flujo de trabajo. Accede a grabaciones de reuniones, transcripciones, momentos destacados y resúmenes generados por IA. También puede activar flujos de trabajo basados en eventos de webhook de Grain.
## Herramientas
### `grain_list_recordings`
Lista las grabaciones de Grain con filtros opcionales y paginación
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
| `cursor` | string | No | Cursor de paginación para la siguiente página |
| `beforeDatetime` | string | No | Solo grabaciones anteriores a esta marca de tiempo ISO8601 |
| `afterDatetime` | string | No | Solo grabaciones posteriores a esta marca de tiempo ISO8601 |
| `participantScope` | string | No | Filtro: "internal" o "external" |
| `titleSearch` | string | No | Término de búsqueda para filtrar por título de grabación |
| `teamId` | string | No | Filtrar por UUID de equipo |
| `meetingTypeId` | string | No | Filtrar por UUID de tipo de reunión |
| `includeHighlights` | boolean | No | Incluir destacados/clips en la respuesta |
| `includeParticipants` | boolean | No | Incluir lista de participantes en la respuesta |
| `includeAiSummary` | boolean | No | Incluir resumen generado por IA |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `recordings` | array | Array de objetos de grabación |
### `grain_get_recording`
Obtiene los detalles de una única grabación por ID
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
| `recordingId` | string | Sí | El UUID de la grabación |
| `includeHighlights` | boolean | No | Incluir destacados/clips |
| `includeParticipants` | boolean | No | Incluir lista de participantes |
| `includeAiSummary` | boolean | No | Incluir resumen de IA |
| `includeCalendarEvent` | boolean | No | Incluir datos del evento de calendario |
| `includeHubspot` | boolean | No | Incluir asociaciones de HubSpot |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `id` | string | UUID de la grabación |
| `title` | string | Título de la grabación |
| `start_datetime` | string | Marca de tiempo de inicio ISO8601 |
| `end_datetime` | string | Marca de tiempo de finalización ISO8601 |
| `duration_ms` | number | Duración en milisegundos |
| `media_type` | string | audio, transcript o video |
| `source` | string | Fuente de la grabación \(zoom, meet, teams, etc.\) |
| `url` | string | URL para ver en Grain |
| `thumbnail_url` | string | URL de la imagen en miniatura |
| `tags` | array | Array de cadenas de etiquetas |
| `teams` | array | Equipos a los que pertenece la grabación |
| `meeting_type` | object | Información del tipo de reunión \(id, nombre, alcance\) |
| `highlights` | array | Destacados \(si se incluyen\) |
| `participants` | array | Participantes \(si se incluyen\) |
| `ai_summary` | object | Texto del resumen de IA \(si se incluye\) |
| `calendar_event` | object | Datos del evento de calendario \(si se incluyen\) |
| `hubspot` | object | Asociaciones de HubSpot \(si se incluyen\) |
### `grain_get_transcript`
Obtener la transcripción completa de una grabación
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave de API de Grain \(token de acceso personal\) |
| `recordingId` | string | Sí | El UUID de la grabación |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `transcript` | array | Array de secciones de transcripción |
### `grain_list_teams`
Listar todos los equipos en el espacio de trabajo
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `teams` | array | Array de objetos de equipo |
### `grain_list_meeting_types`
Listar todos los tipos de reunión en el espacio de trabajo
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `meeting_types` | array | Array de objetos de tipo de reunión |
### `grain_create_hook`
Crear un webhook para recibir eventos de grabación
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
| `hookUrl` | string | Sí | URL del endpoint del webhook \(debe responder 2xx\) |
| `filterBeforeDatetime` | string | No | Filtro: grabaciones antes de esta fecha |
| `filterAfterDatetime` | string | No | Filtro: grabaciones después de esta fecha |
| `filterParticipantScope` | string | No | Filtro: "internal" o "external" |
| `filterTeamId` | string | No | Filtro: UUID de equipo específico |
| `filterMeetingTypeId` | string | No | Filtro: tipo de reunión específico |
| `includeHighlights` | boolean | No | Incluir destacados en la carga del webhook |
| `includeParticipants` | boolean | No | Incluir participantes en la carga del webhook |
| `includeAiSummary` | boolean | No | Incluir resumen de IA en la carga del webhook |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `id` | string | UUID del hook |
| `enabled` | boolean | Si el hook está activo |
| `hook_url` | string | La URL del webhook |
| `filter` | object | Filtros aplicados |
| `include` | object | Campos incluidos |
| `inserted_at` | string | Marca de tiempo de creación ISO8601 |
### `grain_list_hooks`
Listar todos los webhooks de la cuenta
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `hooks` | array | Array de objetos hook |
### `grain_delete_hook`
Eliminar un webhook por ID
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Sí | Clave API de Grain \(token de acceso personal\) |
| `hookId` | string | Sí | El UUID del hook a eliminar |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `success` | boolean | Verdadero cuando el webhook se eliminó correctamente |
## Notas
- Categoría: `tools`
- Tipo: `grain`

View File

@@ -61,8 +61,6 @@ Extrae y procesa contenido web en texto limpio y compatible con LLM usando Jina
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `content` | string | El contenido extraído de la URL, procesado en texto limpio y compatible con LLM |
| `links` | array | Lista de enlaces encontrados en la página (cuando gatherLinks o withLinksummary está activado) |
| `images` | array | Lista de imágenes encontradas en la página (cuando withImagesummary está activado) |
### `jina_search`

View File

@@ -38,17 +38,18 @@ Envía una solicitud de completado de chat a cualquier proveedor de LLM compatib
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `model` | string | Sí | El modelo a utilizar \(ej., gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| --------- | ---- | --------- | ----------- |
| `model` | string | Sí | El modelo a utilizar \(por ejemplo, gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| `systemPrompt` | string | No | Prompt del sistema para establecer el comportamiento del asistente |
| `context` | string | Sí | El mensaje del usuario o contexto a enviar al modelo |
| `apiKey` | string | No | Clave API del proveedor \(usa la clave de la plataforma si no se proporciona para modelos alojados\) |
| `temperature` | number | No | Temperatura para la generación de respuestas \(0-2\) |
| `maxTokens` | number | No | Tokens máximos en la respuesta |
| `maxTokens` | number | No | Máximo de tokens en la respuesta |
| `azureEndpoint` | string | No | URL del endpoint de Azure OpenAI |
| `azureApiVersion` | string | No | Versión de la API de Azure OpenAI |
| `vertexProject` | string | No | ID del proyecto de Google Cloud para Vertex AI |
| `vertexLocation` | string | No | Ubicación de Google Cloud para Vertex AI \(por defecto us-central1\) |
| `vertexCredential` | string | No | ID de credencial OAuth de Google Cloud para Vertex AI |
#### Salida

View File

@@ -5,7 +5,6 @@ title: Programación
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
El bloque de Programación activa automáticamente flujos de trabajo de forma recurrente en intervalos o momentos específicos.
@@ -21,16 +20,16 @@ El bloque de Programación activa automáticamente flujos de trabajo de forma re
## Opciones de programación
Configura cuándo se ejecuta tu flujo de trabajo utilizando las opciones desplegables:
Configura cuándo se ejecuta tu flujo de trabajo:
<Tabs items={['Intervalos simples', 'Expresiones cron']}>
<Tab>
<ul className="list-disc space-y-1 pl-6">
<li><strong>Cada pocos minutos</strong>: intervalos de 5, 15, 30 minutos</li>
<li><strong>Por hora</strong>: Cada hora o cada pocas horas</li>
<li><strong>Diariamente</strong>: Una o varias veces al día</li>
<li><strong>Semanalmente</strong>: Días específicos de la semana</li>
<li><strong>Mensualmente</strong>: Días específicos del mes</li>
<li><strong>Cada X minutos</strong>: ejecutar en intervalos de minutos (1-1440)</li>
<li><strong>Cada hora</strong>: ejecutar en un minuto específico cada hora</li>
<li><strong>Diariamente</strong>: ejecutar a una hora específica cada día</li>
<li><strong>Semanalmente</strong>: ejecutar en un día y hora específicos cada semana</li>
<li><strong>Mensualmente</strong>: ejecutar en un día y hora específicos cada mes</li>
</ul>
</Tab>
<Tab>
@@ -43,45 +42,36 @@ Configura cuándo se ejecuta tu flujo de trabajo utilizando las opciones despleg
</Tab>
</Tabs>
## Configuración de programaciones
## Activación
Cuando un flujo de trabajo está programado:
- La programación se vuelve **activa** y muestra el próximo tiempo de ejecución
- Haz clic en el botón **"Programado"** para desactivar la programación
- Las programaciones se desactivan automáticamente después de **3 fallos consecutivos**
Las programaciones están vinculadas al despliegue del flujo de trabajo:
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-2.png"
alt="Bloque de programación activo"
width={500}
height={400}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Programación desactivada"
width={500}
height={350}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Programación desactivada"
width={500}
height={400}
className="my-6"
/>
</div>
Las programaciones desactivadas muestran cuándo estuvieron activas por última vez. Haz clic en la insignia **"Desactivado"** para reactivar la programación.
- **Desplegar flujo de trabajo** → la programación se activa y comienza a ejecutarse
- **Retirar flujo de trabajo** → la programación se elimina
- **Redesplegar flujo de trabajo** → la programación se recrea con la configuración actual
<Callout>
Los bloques de programación no pueden recibir conexiones entrantes y funcionan exclusivamente como disparadores de flujos de trabajo.
Debes desplegar tu flujo de trabajo para que la programación comience a ejecutarse. Configura el bloque de programación y luego despliega desde la barra de herramientas.
</Callout>
## Desactivación automática
Las programaciones se desactivan automáticamente después de **100 fallos consecutivos** para evitar errores descontrolados. Cuando están desactivadas:
- Aparece una insignia de advertencia en el bloque de programación
- La programación deja de ejecutarse
- Haz clic en la insignia para reactivar la programación
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Programación desactivada"
width={500}
height={400}
className="my-6"
/>
</div>
<Callout>
Los bloques de programación no pueden recibir conexiones entrantes y sirven únicamente como puntos de entrada del flujo de trabajo.
</Callout>

View File

@@ -105,26 +105,30 @@ La répartition des modèles montre :
Les prix indiqués reflètent les tarifs en date du 10 septembre 2025. Consultez la documentation des fournisseurs pour les tarifs actuels.
</Callout>
## Apportez votre propre clé (BYOK)
Vous pouvez utiliser vos propres clés API pour les modèles hébergés (OpenAI, Anthropic, Google, Mistral) dans **Paramètres → BYOK** pour payer les prix de base. Les clés sont chiffrées et s'appliquent à l'ensemble de l'espace de travail.
## Stratégies d'optimisation des coûts
- **Sélection du modèle** : choisissez les modèles en fonction de la complexité de la tâche. Les tâches simples peuvent utiliser GPT-4.1-nano tandis que le raisonnement complexe pourrait nécessiter o1 ou Claude Opus.
- **Ingénierie de prompt** : des prompts bien structurés et concis réduisent l'utilisation de tokens sans sacrifier la qualité.
- **Sélection du modèle** : choisissez les modèles en fonction de la complexité de la tâche. Les tâches simples peuvent utiliser GPT-4.1-nano tandis que le raisonnement complexe peut nécessiter o1 ou Claude Opus.
- **Ingénierie des prompts** : des prompts bien structurés et concis réduisent l'utilisation de jetons sans sacrifier la qualité.
- **Modèles locaux** : utilisez Ollama ou VLLM pour les tâches non critiques afin d'éliminer complètement les coûts d'API.
- **Mise en cache et réutilisation** : stockez les résultats fréquemment utilisés dans des variables ou des fichiers pour éviter des appels répétés aux modèles d'IA.
- **Traitement par lots** : traitez plusieurs éléments dans une seule requête d'IA plutôt que de faire des appels individuels.
- **Mise en cache et réutilisation** : stockez les résultats fréquemment utilisés dans des variables ou des fichiers pour éviter les appels répétés aux modèles d'IA.
- **Traitement par lots** : traitez plusieurs éléments dans une seule requête d'IA plutôt que d'effectuer des appels individuels.
## Suivi de l'utilisation
## Surveillance de l'utilisation
Surveillez votre utilisation et votre facturation dans Paramètres → Abonnement :
- **Utilisation actuelle** : utilisation et coûts en temps réel pour la période en cours
- **Limites d'utilisation** : limites du forfait avec indicateurs visuels de progression
- **Détails de facturation** : frais prévisionnels et engagements minimums
- **Limites d'utilisation** : limites du forfait avec indicateurs de progression visuels
- **Détails de facturation** : frais projetés et engagements minimums
- **Gestion du forfait** : options de mise à niveau et historique de facturation
### Suivi d'utilisation programmatique
### Suivi programmatique de l'utilisation
Vous pouvez interroger votre utilisation actuelle et vos limites par programmation en utilisant l'API :
Vous pouvez interroger votre utilisation et vos limites actuelles de manière programmatique à l'aide de l'API :
**Point de terminaison :**
@@ -172,14 +176,14 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
```
**Champs de limite de débit :**
- `requestsPerMinute` : limite de débit soutenu (les jetons se rechargent à ce rythme)
- `requestsPerMinute` : limite de débit soutenue (les jetons se rechargent à ce rythme)
- `maxBurst` : nombre maximum de jetons que vous pouvez accumuler (capacité de rafale)
- `remaining` : jetons actuellement disponibles (peut aller jusqu'à `maxBurst`)
**Champs de réponse :**
- `currentPeriodCost` reflète l'utilisation dans la période de facturation actuelle
- `limit` est dérivé des limites individuelles (Gratuit/Pro) ou des limites mutualisées de l'organisation (Équipe/Entreprise)
- `plan` est le plan actif de plus haute priorité associé à votre utilisateur
- `limit` est dérivé des limites individuelles (Free/Pro) ou des limites d'organisation mutualisées (Team/Enterprise)
- `plan` est le forfait actif de priorité la plus élevée associé à votre utilisateur
## Limites des forfaits
@@ -196,21 +200,21 @@ Les différents forfaits d'abonnement ont des limites d'utilisation différentes
Sim utilise un modèle de facturation **abonnement de base + dépassement** :
### Comment ça fonctionne
### Fonctionnement
**Forfait Pro (20 $/mois) :**
- L'abonnement mensuel inclut 20 $ d'utilisation
- Utilisation inférieure à 20 $ → Pas de frais supplémentaires
- Utilisation inférieure à 20 $ → Aucun frais supplémentaire
- Utilisation supérieure à 20 $ → Paiement du dépassement en fin de mois
- Exemple : 35 $ d'utilisation = 20 $ (abonnement) + 15 $ (dépassement)
**Forfait Équipe (40 $/siège/mois) :**
- Utilisation mutualisée pour tous les membres de l'équipe
- Dépassement calculé à partir de l'utilisation totale de l'équipe
**Forfait Équipe (40 $/utilisateur/mois) :**
- Utilisation mutualisée entre tous les membres de l'équipe
- Dépassement calculé sur l'utilisation totale de l'équipe
- Le propriétaire de l'organisation reçoit une seule facture
**Forfaits Entreprise :**
- Prix mensuel fixe, pas de dépassements
- Prix mensuel fixe, sans dépassement
- Limites d'utilisation personnalisées selon l'accord
### Facturation par seuil
@@ -220,21 +224,21 @@ Lorsque le dépassement non facturé atteint 50 $, Sim facture automatiquement l
**Exemple :**
- Jour 10 : 70 $ de dépassement → Facturation immédiate de 70 $
- Jour 15 : 35 $ d'utilisation supplémentaire (105 $ au total) → Déjà facturé, aucune action
- Jour 20 : 50 $ d'utilisation supplémentaire (155 $ au total, 85 $ non facturés) → Facturation immédiate de 85 $
- Jour 20 : 50 $ d'utilisation supplémentaire (155 $ au total, 85 $ non facturé) → Facturation immédiate de 85 $
Cela répartit les frais de dépassement importants tout au long du mois au lieu d'une seule facture importante en fin de période.
## Meilleures pratiques de gestion des coûts
## Bonnes pratiques de gestion des coûts
1. **Surveillez régulièrement** : vérifiez fréquemment votre tableau de bord d'utilisation pour éviter les surprises
2. **Définissez des budgets** : utilisez les limites du plan comme garde-fous pour vos dépenses
3. **Optimisez les flux de travail** : examinez les exécutions à coût élevé et optimisez les prompts ou la sélection de modèles
4. **Utilisez des modèles appropriés** : adaptez la complexité du modèle aux exigences de la tâche
5. **Regroupez les tâches similaires** : combinez plusieurs requêtes lorsque c'est possible pour réduire les frais généraux
1. **Surveillez régulièrement** : Consultez fréquemment votre tableau de bord d'utilisation pour éviter les surprises
2. **Définissez des budgets** : Utilisez les limites des forfaits comme garde-fous pour vos dépenses
3. **Optimisez les flux de travail** : Examinez les exécutions coûteuses et optimisez les prompts ou la sélection de modèles
4. **Utilisez les modèles appropriés** : Adaptez la complexité du modèle aux exigences de la tâche
5. **Regroupez les tâches similaires** : Combinez plusieurs requêtes lorsque c'est possible pour réduire les frais généraux
## Prochaines étapes
- Examinez votre utilisation actuelle dans [Paramètres → Abonnement](https://sim.ai/settings/subscription)
- Apprenez-en plus sur la [Journalisation](/execution/logging) pour suivre les détails d'exécution
- Consultez votre utilisation actuelle dans [Paramètres → Abonnement](https://sim.ai/settings/subscription)
- Découvrez la [journalisation](/execution/logging) pour suivre les détails d'exécution
- Explorez l'[API externe](/execution/api) pour la surveillance programmatique des coûts
- Consultez les [techniques d'optimisation de flux de travail](/blocks) pour réduire les coûts
- Consultez les [techniques d'optimisation des workflows](/blocks) pour réduire les coûts

View File

@@ -0,0 +1,59 @@
---
title: Circleback
description: Notes de réunion et tâches générées par IA
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="circleback"
color="linear-gradient(180deg, #E0F7FA 0%, #FFFFFF 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[Circleback](https://circleback.ai/) est une plateforme alimentée par IA qui automatise les notes de réunion, les tâches, les transcriptions et les enregistrements pour votre équipe. Lorsqu'une réunion est terminée, Circleback traite la conversation et fournit des notes détaillées et des tâches, accompagnées d'une transcription et d'un enregistrement (lorsque disponible). Cela aide les équipes à capturer efficacement les informations, à distribuer les tâches et à s'assurer que rien n'est oublié, le tout intégré de manière transparente dans vos flux de travail.
Avec l'intégration Sim Circleback, vous pouvez :
- **Recevoir des notes de réunion détaillées et des tâches** : collectez automatiquement des résumés de réunion bien formatés et suivez les tâches discutées lors de vos appels.
- **Accéder aux enregistrements et transcriptions complètes des réunions** : obtenez la conversation complète et l'enregistrement associé, facilitant la révision des moments clés ou le partage avec des collègues.
- **Capturer les informations sur les participants et le contexte de la réunion** : les listes de participants, les métadonnées de réunion et les tags aident à garder vos données organisées et exploitables.
- **Transmettre les informations directement dans vos flux de travail** : déclenchez des automatisations ou envoyez les données Circleback vers d'autres systèmes dès qu'une réunion est terminée, en utilisant les puissants déclencheurs webhook de Sim.
**Comment cela fonctionne dans Sim :**
Circleback utilise des déclencheurs webhook : chaque fois qu'une réunion est traitée, les données sont automatiquement transmises à votre agent ou automatisation. Vous pouvez créer d'autres automatisations basées sur :
- Réunion terminée (toutes les données traitées disponibles)
- Nouvelles notes (notes prêtes avant même que la réunion complète ne soit traitée)
- Intégration webhook brute pour des cas d'usage avancés
**Les informations suivantes sont disponibles dans la charge utile du webhook de réunion Circleback :**
| Champ | Type | Description |
|----------------|---------|----------------------------------------------------|
| `id` | number | ID de réunion Circleback |
| `name` | string | Titre de la réunion |
| `url` | string | URL de réunion virtuelle (Zoom, Meet, Teams, etc.) |
| `createdAt` | string | Horodatage de création de la réunion |
| `duration` | number | Durée en secondes |
| `recordingUrl` | string | URL d'enregistrement (valide 24 heures) |
| `tags` | json | Tableau d'étiquettes |
| `icalUid` | string | ID d'événement de calendrier |
| `attendees` | json | Tableau d'objets participants |
| `notes` | string | Notes de réunion en Markdown |
| `actionItems` | json | Tableau d'éléments d'action |
| `transcript` | json | Tableau de segments de transcription |
| `insights` | json | Informations créées par l'utilisateur |
| `meeting` | json | Charge utile complète de la réunion |
Que vous souhaitiez distribuer des résumés instantanés, enregistrer des éléments d'action ou créer des workflows personnalisés déclenchés par de nouvelles données de réunion, Circleback et Sim facilitent la gestion automatique de tout ce qui concerne vos réunions.
{/* MANUAL-CONTENT-END */}
## Instructions d'utilisation
Recevez les notes de réunion, les éléments d'action, les transcriptions et les enregistrements lorsque les réunions sont traitées. Circleback utilise des webhooks pour transmettre les données à vos workflows.
## Remarques
- Catégorie : `triggers`
- Type : `circleback`

View File

@@ -0,0 +1,218 @@
---
title: Grain
description: Accédez aux enregistrements de réunions, transcriptions et résumés IA
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="grain"
color="#F6FAF9"
/>
{/* MANUAL-CONTENT-START:intro */}
[Grain](https://grain.com/) est une plateforme moderne pour capturer, stocker et partager des enregistrements de réunions, des transcriptions, des moments clés et des résumés générés par IA. Grain permet aux équipes de transformer les conversations en informations exploitables et de maintenir tout le monde aligné sur les moments importants des réunions.
Avec Grain, vous pouvez :
- **Accéder aux enregistrements et transcriptions consultables** : trouvez et consultez chaque réunion par mot-clé, participant ou sujet.
- **Partager des moments clés et des extraits** : capturez les moments importants et partagez de courts extraits vidéo/audio avec votre équipe ou dans vos workflows.
- **Obtenir des résumés générés par IA** : produisez automatiquement des résumés de réunions, des actions à entreprendre et des informations clés grâce à l'IA avancée de Grain.
- **Organiser les réunions par équipe ou par type** : étiquetez et catégorisez les enregistrements pour un accès et un reporting faciles.
L'intégration Sim Grain permet à vos agents de :
- Lister, rechercher et récupérer les enregistrements de réunions et leurs détails selon des filtres flexibles (date/heure, participant, équipe, etc.).
- Accéder aux résumés IA, participants, moments clés et autres métadonnées des réunions pour alimenter des automatisations ou des analyses.
- Déclencher des workflows dès que de nouvelles réunions sont traitées, que des résumés sont générés ou que des moments clés sont créés via les webhooks Grain.
- Connecter facilement les données Grain à d'autres outils ou notifier les membres de l'équipe dès qu'un événement important se produit dans une réunion.
Que vous souhaitiez automatiser les actions de suivi, conserver des traces de conversations importantes ou faire remonter des informations dans toute votre organisation, Grain et Sim facilitent la connexion de l'intelligence des réunions à vos workflows.
{/* MANUAL-CONTENT-END */}
## Instructions d'utilisation
Intégrez Grain dans votre workflow. Accédez aux enregistrements de réunions, transcriptions, moments clés et résumés générés par IA. Peut également déclencher des workflows basés sur les événements webhook de Grain.
## Outils
### `grain_list_recordings`
Liste les enregistrements de Grain avec des filtres optionnels et une pagination
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain (jeton d'accès personnel) |
| `cursor` | string | Non | Curseur de pagination pour la page suivante |
| `beforeDatetime` | string | Non | Uniquement les enregistrements avant cet horodatage ISO8601 |
| `afterDatetime` | string | Non | Uniquement les enregistrements après cet horodatage ISO8601 |
| `participantScope` | string | Non | Filtre : « internal » ou « external » |
| `titleSearch` | string | Non | Terme de recherche pour filtrer par titre d'enregistrement |
| `teamId` | string | Non | Filtrer par UUID d'équipe |
| `meetingTypeId` | string | Non | Filtrer par UUID de type de réunion |
| `includeHighlights` | boolean | Non | Inclure les moments forts/extraits dans la réponse |
| `includeParticipants` | boolean | Non | Inclure la liste des participants dans la réponse |
| `includeAiSummary` | boolean | Non | Inclure le résumé généré par IA |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `recordings` | array | Tableau d'objets d'enregistrement |
### `grain_get_recording`
Obtient les détails d'un seul enregistrement par ID
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain (jeton d'accès personnel) |
| `recordingId` | string | Oui | L'UUID de l'enregistrement |
| `includeHighlights` | boolean | Non | Inclure les moments forts/extraits |
| `includeParticipants` | boolean | Non | Inclure la liste des participants |
| `includeAiSummary` | boolean | Non | Inclure le résumé IA |
| `includeCalendarEvent` | boolean | Non | Inclure les données d'événement de calendrier |
| `includeHubspot` | boolean | Non | Inclure les associations HubSpot |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | UUID de l'enregistrement |
| `title` | string | Titre de l'enregistrement |
| `start_datetime` | string | Horodatage de début ISO8601 |
| `end_datetime` | string | Horodatage de fin ISO8601 |
| `duration_ms` | number | Durée en millisecondes |
| `media_type` | string | audio, transcript ou video |
| `source` | string | Source de l'enregistrement \(zoom, meet, teams, etc.\) |
| `url` | string | URL pour visualiser dans Grain |
| `thumbnail_url` | string | URL de l'image miniature |
| `tags` | array | Tableau de chaînes de tags |
| `teams` | array | Équipes auxquelles appartient l'enregistrement |
| `meeting_type` | object | Informations sur le type de réunion \(id, nom, portée\) |
| `highlights` | array | Points forts \(si inclus\) |
| `participants` | array | Participants \(si inclus\) |
| `ai_summary` | object | Texte du résumé IA \(si inclus\) |
| `calendar_event` | object | Données de l'événement de calendrier \(si incluses\) |
| `hubspot` | object | Associations HubSpot \(si incluses\) |
### `grain_get_transcript`
Obtenir la transcription complète d'un enregistrement
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain \(jeton d'accès personnel\) |
| `recordingId` | string | Oui | UUID de l'enregistrement |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `transcript` | array | Tableau de sections de transcription |
### `grain_list_teams`
Lister toutes les équipes dans l'espace de travail
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain (jeton d'accès personnel) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `teams` | array | Tableau d'objets équipe |
### `grain_list_meeting_types`
Lister tous les types de réunion dans l'espace de travail
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain (jeton d'accès personnel) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `meeting_types` | array | Tableau d'objets type de réunion |
### `grain_create_hook`
Créer un webhook pour recevoir les événements d'enregistrement
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain (jeton d'accès personnel) |
| `hookUrl` | string | Oui | URL du point de terminaison webhook (doit répondre 2xx) |
| `filterBeforeDatetime` | string | Non | Filtre : enregistrements avant cette date |
| `filterAfterDatetime` | string | Non | Filtre : enregistrements après cette date |
| `filterParticipantScope` | string | Non | Filtre : « internal » ou « external » |
| `filterTeamId` | string | Non | Filtre : UUID d'équipe spécifique |
| `filterMeetingTypeId` | string | Non | Filtre : type de réunion spécifique |
| `includeHighlights` | boolean | Non | Inclure les moments forts dans la charge utile du webhook |
| `includeParticipants` | boolean | Non | Inclure les participants dans la charge utile du webhook |
| `includeAiSummary` | boolean | Non | Inclure le résumé IA dans la charge utile du webhook |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `id` | string | UUID du hook |
| `enabled` | boolean | Indique si le hook est actif |
| `hook_url` | string | L'URL du webhook |
| `filter` | object | Filtres appliqués |
| `include` | object | Champs inclus |
| `inserted_at` | string | Horodatage de création ISO8601 |
### `grain_list_hooks`
Lister tous les webhooks du compte
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain \(jeton d'accès personnel\) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `hooks` | array | Tableau d'objets hook |
### `grain_delete_hook`
Supprimer un webhook par ID
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | Oui | Clé API Grain \(jeton d'accès personnel\) |
| `hookId` | string | Oui | L'UUID du hook à supprimer |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Vrai lorsque le webhook a été supprimé avec succès |
## Remarques
- Catégorie : `tools`
- Type : `grain`

View File

@@ -61,8 +61,6 @@ Extrayez et traitez le contenu web en texte propre et adapté aux LLM avec Jina
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `content` | string | Le contenu extrait de l'URL, traité en texte propre et adapté aux LLM |
| `links` | array | Liste des liens trouvés sur la page (lorsque gatherLinks ou withLinksummary est activé) |
| `images` | array | Liste des images trouvées sur la page (lorsque withImagesummary est activé) |
### `jina_search`

View File

@@ -49,6 +49,7 @@ Envoyez une requête de complétion de chat à n'importe quel fournisseur de LLM
| `azureApiVersion` | string | Non | Version de l'API Azure OpenAI |
| `vertexProject` | string | Non | ID du projet Google Cloud pour Vertex AI |
| `vertexLocation` | string | Non | Emplacement Google Cloud pour Vertex AI \(par défaut us-central1\) |
| `vertexCredential` | string | Non | ID des identifiants OAuth Google Cloud pour Vertex AI |
#### Sortie

View File

@@ -5,7 +5,6 @@ title: Planification
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
Le bloc Planification déclenche automatiquement des workflows de manière récurrente à des intervalles ou moments spécifiés.
@@ -21,67 +20,58 @@ Le bloc Planification déclenche automatiquement des workflows de manière récu
## Options de planification
Configurez quand votre workflow s'exécute en utilisant les options du menu déroulant :
Configurez quand votre workflow s'exécute :
<Tabs items={['Intervalles simples', 'Expressions cron']}>
<Tab>
<ul className="list-disc space-y-1 pl-6">
<li><strong>Toutes les quelques minutes</strong> : intervalles de 5, 15, 30 minutes</li>
<li><strong>Toutes les heures</strong> : chaque heure ou toutes les quelques heures</li>
<li><strong>Quotidien</strong> : une ou plusieurs fois par jour</li>
<li><strong>Hebdomadaire</strong> : jours spécifiques de la semaine</li>
<li><strong>Mensuel</strong> : jours spécifiques du mois</li>
<li><strong>Toutes les X minutes</strong> : exécution à intervalles de minutes (1-1440)</li>
<li><strong>Toutes les heures</strong> : exécution à une minute spécifique chaque heure</li>
<li><strong>Quotidien</strong> : exécution à une heure spécifique chaque jour</li>
<li><strong>Hebdomadaire</strong> : exécution un jour et une heure spécifiques chaque semaine</li>
<li><strong>Mensuel</strong> : exécution un jour et une heure spécifiques chaque mois</li>
</ul>
</Tab>
<Tab>
<p>Utilisez des expressions cron pour une planification avancée :</p>
<p>Utilisez les expressions cron pour une planification avancée :</p>
<div className="text-sm space-y-1">
<div><code>0 9 * * 1-5</code> - Chaque jour de semaine à 9h</div>
<div><code>0 9 * * 1-5</code> - Chaque jour de semaine à 9 h</div>
<div><code>*/15 * * * *</code> - Toutes les 15 minutes</div>
<div><code>0 0 1 * *</code> - Premier jour de chaque mois</div>
</div>
</Tab>
</Tabs>
## Configuration des planifications
## Activation
Lorsqu'un workflow est planifié :
- La planification devient **active** et affiche la prochaine heure d'exécution
- Cliquez sur le bouton **"Planifié"** pour désactiver la planification
- Les planifications se désactivent automatiquement après **3 échecs consécutifs**
Les planifications sont liées au déploiement du workflow :
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-2.png"
alt="Bloc de planification actif"
width={500}
height={400}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Planification désactivée"
width={500}
height={350}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Planification désactivée"
width={500}
height={400}
className="my-6"
/>
</div>
Les planifications désactivées indiquent quand elles ont été actives pour la dernière fois. Cliquez sur le badge **"Désactivé"** pour réactiver la planification.
- **Déployer le workflow** → la planification devient active et commence à s'exécuter
- **Annuler le déploiement du workflow** → la planification est supprimée
- **Redéployer le workflow** → la planification est recréée avec la configuration actuelle
<Callout>
Les blocs de planification ne peuvent pas recevoir de connexions entrantes et servent uniquement de déclencheurs de workflow.
Vous devez déployer votre workflow pour que la planification commence à s'exécuter. Configurez le bloc de planification, puis déployez depuis la barre d'outils.
</Callout>
## Désactivation automatique
Les planifications se désactivent automatiquement après **100 échecs consécutifs** pour éviter les erreurs en cascade. Lorsqu'elles sont désactivées :
- Un badge d'avertissement apparaît sur le bloc de planification
- La planification cesse de s'exécuter
- Cliquez sur le badge pour réactiver la planification
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="Planification désactivée"
width={500}
height={400}
className="my-6"
/>
</div>
<Callout>
Les blocs de planification ne peuvent pas recevoir de connexions entrantes et servent uniquement de points d'entrée de workflow.
</Callout>

View File

@@ -105,43 +105,47 @@ AIブロックを使用するワークフローでは、ログで詳細なコス
表示価格は2025年9月10日時点のレートを反映しています。最新の価格については各プロバイダーのドキュメントをご確認ください。
</Callout>
## Bring Your Own Key (BYOK)
ホストされたモデルOpenAI、Anthropic、Google、Mistralに対して、**設定 → BYOK**で独自のAPIキーを使用し、基本価格で支払うことができます。キーは暗号化され、ワークスペース全体に適用されます。
## コスト最適化戦略
- **モデル選択**: タスクの複雑さに基づいてモデルを選択してください。単純なタスクにはGPT-4.1-nanoを使用し、複雑な推論にはo1やClaude Opusが必要場合があります。
- **プロンプトエンジニアリング**: 構造化された簡潔なプロンプトは、品質を犠牲にすることなくトークン使用量を削減します。
- **ローカルモデル**: 重要度の低いタスクにはOllamaやVLLMを使用して、API費用を完全に排除します。
- **キャッシュと再利用**: 頻繁に使用される結果を変数やファイルに保存して、AIモデル呼び出しの繰り返しを避けます。
- **モデル選択**: タスクの複雑さに基づいてモデルを選択します。シンプルなタスクにはGPT-4.1-nanoを使用し、複雑な推論にはo1やClaude Opusが必要になる場合があります。
- **プロンプトエンジニアリング**: 適切に構造化された簡潔なプロンプトは、品質を犠牲にすることなくトークン使用量を削減します。
- **ローカルモデル**: 重要度の低いタスクにはOllamaやVLLMを使用して、APIコストを完全に排除します。
- **キャッシュと再利用**: 頻繁に使用される結果を変数やファイルに保存して、AIモデルの繰り返し呼び出しを回避します。
- **バッチ処理**: 個別の呼び出しを行うのではなく、単一のAIリクエストで複数のアイテムを処理します。
## 使用状況モニタリング
## 使用状況の監視
設定 → サブスクリプションで使用状況と請求を監視できます
設定 → サブスクリプションで使用状況と請求を監視ます:
- **現在の使用状況**: 現在の期間のリアルタイムの使用状況とコスト
- **使用制限**: 視覚的な進捗指標付きのプラン制限
- **請求詳細**: 予測される料金と最低利用額
- **使用制限**: 視覚的な進行状況インジケーター付きのプラン制限
- **請求詳細**: 予測される料金と最低コミットメント
- **プラン管理**: アップグレードオプションと請求履歴
### プログラムによる使用状況の追跡
APIを使用して、現在の使用状況と制限をプログラムで照会できます
APIを使用して、現在の使用状況と制限をプログラムでクエリできます:
**エンドポイント**
**エンドポイント:**
```text
GET /api/users/me/usage-limits
```
**認証**
- APIキーを `X-API-Key` ヘッダーに含めてください
**認証:**
- `X-API-Key`ヘッダーにAPIキーを含めます
**リクエスト例**
**リクエスト例:**
```bash
curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" https://sim.ai/api/users/me/usage-limits
```
**レスポンス例**
**レスポンス例:**
```json
{
@@ -171,70 +175,70 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
}
```
**レート制限フィールド**
- `requestsPerMinute`持続的なレート制限(トークンはこの速度で補充されます)
- `maxBurst`蓄積できる最大トークン数(バースト容量)
- `remaining`現在利用可能なトークン(最大`maxBurst`まで)
**レート制限フィールド:**
- `requestsPerMinute`: 持続的なレート制限(トークンはこのレートで補充されます)
- `maxBurst`: 蓄積できる最大トークン数(バースト容量)
- `remaining`: 現在利用可能なトークン(最大`maxBurst`まで)
**レスポンスフィールド**
**レスポンスフィールド:**
- `currentPeriodCost`は現在の請求期間の使用状況を反映します
- `limit`は個別の制限(無料/プロ)または組織のプール制限(チーム/エンタープライズ)から派生します
- `plan`はユーザーに関連付けられた最優先アクティブなプランです
- `limit`は個別の制限(Free/Proまたはプールされた組織の制限Team/Enterpriseから導出されます
- `plan`はユーザーに関連付けられた最優先度の高いアクティブなプランです
## プラン制限
## プラン制限
サブスクリプションプランによって使用制限が異なります
サブスクリプションプランによって使用量の制限が異なります
| プラン | 月間使用制限 | レート制限(毎分 |
| プラン | 月間使用制限 | レート制限(1分あたり |
|------|-------------------|-------------------------|
| **Free** | $20 | 同期5、非同期10 |
| **Pro** | $100 | 同期10、非同期50 |
| **Team** | $500プール | 同期50、非同期100 |
| **Enterprise** | カスタム | カスタム |
| **無料** | $20 | 同期5、非同期10 |
| **プロ** | $100 | 同期10、非同期50 |
| **チーム** | $500プール | 同期50、非同期100 |
| **エンタープライズ** | カスタム | カスタム |
## 課金モデル
Simは**基本サブスクリプション+超過分**の課金モデルを使用しています
Simは**基本サブスクリプション + 超過料金**の課金モデルを用しています
### 仕組み
**プロプラン(月額$20**
- 月額サブスクリプションには$20分の使用量が含まれます
- 使用量が$20未満 → 追加料金なし
- 使用量が$20を超える → 月末に超過分を支払い
- 例:$35の使用量 = $20サブスクリプション+ $15超過
- 使用量が$20超過 → 月末に超過分を支払い
- 例:使用量$35 = $20サブスクリプション+ $15超過料金
**チームプラン(席あたり月額$40**
- チームメンバー全体でプールされた使用量
- チーム全体の使用量から超過を計算
- 組織のオーナーが一括で請求を受け
**チームプラン(1席あたり月額$40**
- チームメンバー全員で使用量をプール
- チーム全体の使用量から超過料金を計算
- 組織のオーナーが1つの請求を受け取ります
**エンタープライズプラン:**
- 固定月額料金、超過料金なし
- 契約に基づくカスタム使用制限
- 契約に基づくカスタム使用制限
### しきい値課金
未請求の超過が$50に達すると、Simは自動的に未請求の全額を請求します。
未請求の超過料金が$50に達すると、Simは未請求金額の全額を自動的に請求します。
**例:**
- 10日目$70の超過分 → 即に$70を請求
- 15日目追加$35の使用(合計$105 → すでに請求済み、アクションなし
- 20日目さらに$50の使用合計$155、未請求$85 → 即に$85を請求
- 10日目超過料金$70 → 即に$70を請求
- 15日目追加使用量$35(合計$105 → すでに請求済み、アクションなし
- 20日目さらに$50の使用(合計$155、未請求$85 → 即に$85を請求
これにより、期間終了時に一度に大きな請求が発生するのではなく、月全体に大きな超過料金が分散されます。
これにより、期間終了時の1回の大きな請求ではなく、大きな超過料金を月全体に分散させることができます。
## コスト管理のベストプラクティス
1. **定期的な監視**: 予期せぬ事態を避けるため、使用状況ダッシュボードを頻繁に確認する
2. **予算の設定**: プランの制限を支出のガードレールとして使用する
3. **ワークフローの最適化**: コストの高い実行を見直し、プロンプトやモデル選択を最適化する
4. **適切なモデルの使用**: タスクの要件にモデルの複雑さを合わせる
5. **類似タスクのバッチ処理**: 可能な場合は複数のリクエストを組み合わせてオーバーヘッドを削減する
1. **定期的な監視**:予期しない事態を避けるため、使用状況ダッシュボードを頻繁に確認してください
2. **予算の設定**プランの制限を支出のガードレールとして使用してください
3. **ワークフローの最適化**コストの高い実行を確認し、プロンプトやモデル選択を最適化してください
4. **適切なモデルの使用**タスクの要件に合わせてモデルの複雑さを選択してください
5. **類似タスクのバッチ処理**可能な限り複数のリクエストを組み合わせてオーバーヘッドを削減してください
## 次のステップ
- [設定 → サブスクリプション](https://sim.ai/settings/subscription)で現在の使用状況を確認する
- 実行詳細を追跡するための[ロギング](/execution/logging)について学ぶ
- 実行詳細を追跡するための[ログ記録](/execution/logging)について学ぶ
- プログラムによるコスト監視のための[外部API](/execution/api)を探索する
- コスト削減ための[ワークフロー最適化テクニック](/blocks)をチェックする
- コスト削減するための[ワークフロー最適化テクニック](/blocks)を確認する

View File

@@ -0,0 +1,59 @@
---
title: Circleback
description: AI搭載の議事録とアクションアイテム
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="circleback"
color="linear-gradient(180deg, #E0F7FA 0%, #FFFFFF 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[Circleback](https://circleback.ai/)は、チームの議事録、アクションアイテム、文字起こし、録音を自動化するAI搭載プラットフォームです。会議が終了すると、Circlebackが会話を処理し、詳細な議事録とアクションアイテム、文字起こしと録音(利用可能な場合)を提供します。これにより、チームは効率的に洞察を記録し、アクションアイテムを配布し、見落としがないことを確認できます。すべてがワークフローにシームレスに統合されます。
Sim Circleback統合により、次のことが可能になります。
- **詳細な議事録とアクションアイテムの受信**: 通話中に議論された実行可能なタスクを追跡し、整形された会議サマリーを自動的に収集します。
- **完全な会議録音と文字起こしへのアクセス**: 会話全体と関連する録音を取得し、重要な瞬間を簡単に確認したり、同僚と共有したりできます。
- **参加者情報と会議コンテキストの記録**: 参加者リスト、会議メタデータ、タグにより、データを整理して実行可能な状態に保ちます。
- **ワークフローに直接洞察を配信**: 会議が終了した瞬間に、Simの強力なWebhookトリガーを使用して、自動化をトリガーしたり、Circlebackデータを他のシステムに送信したりできます。
**Simでの動作方法:**
CirclebackはWebhookトリガーを使用します。会議が処理されるたびに、データが自動的にエージェントまたは自動化にプッシュされます。次の条件に基づいてさらなる自動化を構築できます。
- 会議完了(すべての処理済みデータが利用可能)
- 新しいノート(会議全体が処理される前にノートが準備完了)
- 高度なユースケース向けの生のWebhook統合
**Circleback会議Webhookペイロードでは、次の情報が利用可能です:**
| フィールド | タイプ | 説明 |
|----------------|---------|----------------------------------------------------|
| `id` | number | CirclebackミーティングID |
| `name` | string | ミーティングタイトル |
| `url` | string | バーチャルミーティングURLZoom、Meet、Teamsなど |
| `createdAt` | string | ミーティング作成タイムスタンプ |
| `duration` | number | 秒単位の長さ |
| `recordingUrl` | string | 録画URL24時間有効 |
| `tags` | json | タグの配列 |
| `icalUid` | string | カレンダーイベントID |
| `attendees` | json | 参加者オブジェクトの配列 |
| `notes` | string | Markdown形式のミーティングート |
| `actionItems` | json | アクションアイテムの配列 |
| `transcript` | json | トランスクリプトセグメントの配列 |
| `insights` | json | ユーザー作成のインサイト |
| `meeting` | json | 完全なミーティングペイロード |
即座にサマリーを配信したい場合でも、アクションアイテムを記録したい場合でも、新しいミーティングデータによってトリガーされるカスタムワークフローを構築したい場合でも、CirclebackとSimを使えば、ミーティングに関連するすべてを自動的にシームレスに処理できます。
{/* MANUAL-CONTENT-END */}
## 使用方法
ミーティングが処理されると、ミーティングート、アクションアイテム、トランスクリプト、録画を受信します。Circlebackはwebhookを使用してワークフローにデータをプッシュします。
## 注意事項
- カテゴリー: `triggers`
- タイプ: `circleback`

View File

@@ -0,0 +1,218 @@
---
title: Grain
description: 会議の録画、文字起こし、AI要約にアクセス
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="grain"
color="#F6FAF9"
/>
{/* MANUAL-CONTENT-START:intro */}
[Grain](https://grain.com/)は、会議の録画、文字起こし、ハイライト、AI搭載の要約を記録、保存、共有するための最新プラットフォームです。Grainを使用すると、チームは会話を実用的なインサイトに変換し、会議の重要な瞬間について全員の認識を一致させることができます。
Grainでできること:
- **検索可能な録画と文字起こしへのアクセス**: キーワード、参加者、トピックで会議を検索して確認できます。
- **ハイライトとクリップの共有**: 重要な瞬間を記録し、短い動画/音声のハイライトをチームやワークフロー全体で共有できます。
- **AI生成の要約を取得**: Grainの高度なAIを使用して、会議の要約、アクションアイテム、主要なインサイトを自動的に作成します。
- **チームやタイプ別に会議を整理**: 録画にタグを付けて分類し、簡単にアクセスしてレポートを作成できます。
Sim Grain統合により、エージェントは次のことが可能になります:
- 柔軟なフィルター(日時、参加者、チームなど)で会議の録画と詳細を一覧表示、検索、取得できます。
- 会議のAI要約、参加者、ハイライト、その他のメタデータにアクセスして、自動化や分析を強化できます。
- Grain Webhookを介して、新しい会議が処理されたとき、要約が生成されたとき、またはハイライトが作成されたときにワークフローをトリガーできます。
- Grainのデータを他のツールに簡単に連携したり、会議で重要なことが発生した瞬間にチームメイトに通知したりできます。
フォローアップアクションを自動化したり、重要な会話の記録を保持したり、組織全体でインサイトを表示したりする場合でも、GrainとSimを使用すると、会議のインテリジェンスをワークフローに簡単に接続できます。
{/* MANUAL-CONTENT-END */}
## 使用方法
Grainをワークフローに統合します。会議の録画、文字起こし、ハイライト、AI生成の要約にアクセスできます。Grain Webhookイベントに基づいてワークフローをトリガーすることもできます。
## ツール
### `grain_list_recordings`
オプションのフィルターとページネーションを使用してGrainから録画を一覧表示
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー個人アクセストークン |
| `cursor` | string | いいえ | 次のページのページネーションカーソル |
| `beforeDatetime` | string | いいえ | このISO8601タイムスタンプより前の録画のみ |
| `afterDatetime` | string | いいえ | このISO8601タイムスタンプより後の録画のみ |
| `participantScope` | string | いいえ | フィルター「internal」または「external」 |
| `titleSearch` | string | いいえ | 録画タイトルでフィルタリングする検索語 |
| `teamId` | string | いいえ | チームUUIDでフィルタリング |
| `meetingTypeId` | string | いいえ | ミーティングタイプUUIDでフィルタリング |
| `includeHighlights` | boolean | いいえ | レスポンスにハイライト/クリップを含める |
| `includeParticipants` | boolean | いいえ | レスポンスに参加者リストを含める |
| `includeAiSummary` | boolean | いいえ | AI生成サマリーを含める |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `recordings` | array | 録画オブジェクトの配列 |
### `grain_get_recording`
IDで単一の録画の詳細を取得
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー個人アクセストークン |
| `recordingId` | string | はい | 録画UUID |
| `includeHighlights` | boolean | いいえ | ハイライト/クリップを含める |
| `includeParticipants` | boolean | いいえ | 参加者リストを含める |
| `includeAiSummary` | boolean | いいえ | AIサマリーを含める |
| `includeCalendarEvent` | boolean | いいえ | カレンダーイベントデータを含める |
| `includeHubspot` | boolean | いいえ | HubSpot関連付けを含める |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `id` | string | 録画UUID |
| `title` | string | 録画タイトル |
| `start_datetime` | string | ISO8601形式の開始タイムスタンプ |
| `end_datetime` | string | ISO8601形式の終了タイムスタンプ |
| `duration_ms` | number | ミリ秒単位の長さ |
| `media_type` | string | audio、transcript、またはvideo |
| `source` | string | 録画ソース(zoom、meet、teamsなど) |
| `url` | string | Grainで表示するためのURL |
| `thumbnail_url` | string | サムネイル画像URL |
| `tags` | array | タグ文字列の配列 |
| `teams` | array | 録画が属するチーム |
| `meeting_type` | object | ミーティングタイプ情報(id、name、scope) |
| `highlights` | array | ハイライト(含まれる場合) |
| `participants` | array | 参加者(含まれる場合) |
| `ai_summary` | object | AI要約テキスト(含まれる場合) |
| `calendar_event` | object | カレンダーイベントデータ(含まれる場合) |
| `hubspot` | object | HubSpot関連付け(含まれる場合) |
### `grain_get_transcript`
録画の完全なトランスクリプトを取得
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー(パーソナルアクセストークン) |
| `recordingId` | string | はい | 録画UUID |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `transcript` | array | トランスクリプトセクションの配列 |
### `grain_list_teams`
ワークスペース内のすべてのチームを一覧表示
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー(パーソナルアクセストークン) |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `teams` | array | チームオブジェクトの配列 |
### `grain_list_meeting_types`
ワークスペース内のすべてのミーティングタイプを一覧表示
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー(パーソナルアクセストークン) |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `meeting_types` | array | ミーティングタイプオブジェクトの配列 |
### `grain_create_hook`
録画イベントを受信するためのWebhookを作成
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー(パーソナルアクセストークン) |
| `hookUrl` | string | はい | WebhookエンドポイントURL(2xxを返す必要があります) |
| `filterBeforeDatetime` | string | いいえ | フィルタ: この日付より前の録画 |
| `filterAfterDatetime` | string | いいえ | フィルタ: この日付より後の録画 |
| `filterParticipantScope` | string | いいえ | フィルタ: "internal"または"external" |
| `filterTeamId` | string | いいえ | フィルタ: 特定のチームUUID |
| `filterMeetingTypeId` | string | いいえ | フィルタ: 特定のミーティングタイプ |
| `includeHighlights` | boolean | いいえ | Webhookペイロードにハイライトを含める |
| `includeParticipants` | boolean | いいえ | Webhookペイロードに参加者を含める |
| `includeAiSummary` | boolean | いいえ | WebhookペイロードにAIサマリーを含める |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `id` | string | フックUUID |
| `enabled` | boolean | フックがアクティブかどうか |
| `hook_url` | string | WebフックURL |
| `filter` | object | 適用されたフィルタ |
| `include` | object | 含まれるフィールド |
| `inserted_at` | string | ISO8601形式の作成タイムスタンプ |
### `grain_list_hooks`
アカウントのすべてのWebフックを一覧表示
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー個人アクセストークン |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `hooks` | array | フックオブジェクトの配列 |
### `grain_delete_hook`
IDでWebフックを削除
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | はい | Grain APIキー個人アクセストークン |
| `hookId` | string | はい | 削除するフックUUID |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `success` | boolean | Webフックが正常に削除された場合はtrue |
## 注記
- カテゴリ: `tools`
- タイプ: `grain`

View File

@@ -60,9 +60,7 @@ Jina AI Readerを使用してウェブコンテンツを抽出し、LLMフレン
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `content` | string | URLから抽出されたコンテンツで、クリーンでLLMフレンドリーなテキストに処理されたもの |
| `links` | array | ページで見つかったリンクのリストgatherLinksまたはwithLinksummaryが有効な場合 |
| `images` | array | ページで見つかった画像のリストwithImagesummaryが有効な場合 |
| `content` | string | URLから抽出されたコンテンツクリーンでLLMフレンドリーなテキストに処理されています |
### `jina_search`

View File

@@ -49,6 +49,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
| `azureApiVersion` | string | いいえ | Azure OpenAI APIバージョン |
| `vertexProject` | string | いいえ | Vertex AI用のGoogle CloudプロジェクトID |
| `vertexLocation` | string | いいえ | Vertex AI用のGoogle Cloudロケーションデフォルトはus-central1 |
| `vertexCredential` | string | いいえ | Vertex AI用のGoogle Cloud OAuth認証情報ID |
#### 出力

View File

@@ -5,7 +5,6 @@ title: スケジュール
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
スケジュールブロックは、指定された間隔または時間に定期的なスケジュールでワークフローを自動的にトリガーします。
@@ -21,20 +20,20 @@ import { Video } from '@/components/ui/video'
## スケジュールオプション
ドロップダウンオプションを使用してワークフローの実行タイミングを設定します:
ワークフローの実行タイミングを設定します:
<Tabs items={['簡単な間隔', 'Cron式']}>
<Tabs items={['Simple Intervals', 'Cron Expressions']}>
<Tab>
<ul className="list-disc space-y-1 pl-6">
<li><strong>分ごと</strong>5分、15分、30分間隔</li>
<li><strong>毎時</strong>1時間ごとまたは数時間ごと</li>
<li><strong>毎日</strong>1日に1回または複数回</li>
<li><strong>毎週</strong>週の特定の曜日</li>
<li><strong>毎月</strong>月の特定の日</li>
<li><strong>X分ごと</strong>分単位の間隔で実行1〜1440</li>
<li><strong>毎時</strong>毎時指定した分に実行</li>
<li><strong>毎日</strong>毎日指定した時刻に実行</li>
<li><strong>毎週</strong>毎週指定した曜日と時刻に実行</li>
<li><strong>毎月</strong>毎月指定した日時に実行</li>
</ul>
</Tab>
<Tab>
<p>高度なスケジューリングにはCron式を使用します</p>
<p>高度なスケジュール設定にはcron式を使用します</p>
<div className="text-sm space-y-1">
<div><code>0 9 * * 1-5</code> - 平日の午前9時</div>
<div><code>*/15 * * * *</code> - 15分ごと</div>
@@ -43,45 +42,36 @@ import { Video } from '@/components/ui/video'
</Tab>
</Tabs>
## スケジュールの設定
## アクティベーション
ワークフローがスケジュールされると
- スケジュールが**有効**になり、次の実行時間が表示されます
- **「スケジュール済み」**ボタンをクリックするとスケジュールを無効にできます
- スケジュールは**3回連続で失敗すると**自動的に無効になります
スケジュールはワークフローのデプロイに連動します
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-2.png"
alt="アクティブなスケジュールブロック"
width={500}
height={400}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="無効化されたスケジュール"
width={500}
height={350}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="無効化されたスケジュール"
width={500}
height={400}
className="my-6"
/>
</div>
無効化されたスケジュールは、最後に有効だった時間を表示します。**「無効」**バッジをクリックすると、スケジュールを再度有効にできます。
- **ワークフローをデプロイ** → スケジュールが有効になり実行を開始
- **ワークフローをアンデプロイ** → スケジュールが削除
- **ワークフローを再デプロイ** → 現在の設定でスケジュールが再作成
<Callout>
スケジュールブロックは入力接続を受け取ることができず、純粋なワークフロートリガーとして機能します
スケジュールを開始するには、ワークフローをデプロイする必要があります。スケジュールブロックを設定してから、ツールバーからデプロイしてください
</Callout>
## 自動無効化
スケジュールは**100回連続で失敗**すると、エラーの連鎖を防ぐために自動的に無効化されます。無効化されると:
- スケジュールブロックに警告バッジが表示されます
- スケジュールの実行が停止します
- バッジをクリックしてスケジュールを再有効化できます
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="無効化されたスケジュール"
width={500}
height={400}
className="my-6"
/>
</div>
<Callout>
スケジュールブロックは入力接続を受け取ることができず、ワークフローのエントリーポイントとしてのみ機能します。
</Callout>

View File

@@ -105,43 +105,47 @@ totalCost = baseExecutionCharge + modelCost
显示的价格为截至 2025 年 9 月 10 日的费率。请查看提供商文档以获取最新价格。
</Callout>
## 自带密钥BYOK
你可以在 **设置 → BYOK** 中为托管模型OpenAI、Anthropic、Google、Mistral使用你自己的 API 密钥,以按基础价格计费。密钥会被加密,并在整个工作区范围内生效。
## 成本优化策略
- **模型选择**:根据任务复杂选择模型。简单任务可以使用 GPT-4.1-nano复杂推理可能需要 o1 或 Claude Opus。
- **提示工程**:结构良好、简洁的提示可以减少令牌使用,同时保质量。
- **本地模型**:对于非关键任务,使用 Ollama 或 VLLM 完全消除 API 成本。
- **缓存和重用**:将经常使用的结果存储在变量或文件中,避免重复调用 AI 模型。
- **批量处理**在单次 AI 请求处理多个项目,而不是逐一调用
- **模型选择**:根据任务复杂选择合适的模型。简单任务可用 GPT-4.1-nano复杂推理可 o1 或 Claude Opus。
- **提示工程**:结构清晰、简洁的提示能减少 token 使用,同时保质量。
- **本地模型**:对于非关键任务,使用 Ollama 或 VLLM,可完全消除 API 成本。
- **缓存与复用**:将常用结果存储在变量或文件中,避免重复调用 AI 模型。
- **批量处理**次 AI 请求处理多个项目,减少单独调用次数
## 使用监控
在 设置 → 订阅 中监控您的使用情况和账单:
你可以在 设置 → 订阅 中监控你的用量和账单:
- **当前使用情况**:当前周期的实时使用和成本
- **使用限制**:计划限制及其可视化进度指示
- **账单详情**:预计费用和最低承诺
- **计划管理**:升级选项和账单历史记录
- **当前用量**:当前周期的实时用量和费用
- **用量上限**:带有可视化进度指示的套餐限制
- **账单明细**:预计费用和最低承诺金额
- **套餐管理**:升级选项和账单历史
### 程序化使用跟
### 编程方式用量追
可以通过 API 程序化地查询当前的使用情况和限制:
可以通过 API 以编程方式查询当前用量和限制:
**端点**
**接口地址**
```text
GET /api/users/me/usage-limits
```
**认证:**
- 在 `X-API-Key` 标头中包含的 API 密钥
**认证方式**
- 在 `X-API-Key` header 中包含的 API 密钥
**示例请求:**
**请求示例**
```bash
curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" https://sim.ai/api/users/me/usage-limits
```
**示例响应:**
**响应示例**
```json
{
@@ -171,70 +175,70 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
}
```
**速率限制字段:**
- `requestsPerMinute`:持续速率限制(令牌以此速率补充)
- `maxBurst`您可以累积的最大令牌数(突发容量)
- `remaining`:当前可用令牌数(最多可达 `maxBurst`
**限流字段:**
- `requestsPerMinute`:持续速率限制(token 按此速率补充)
- `maxBurst`你可累计的最大 token 数(突发容量)
- `remaining`:当前可用 token 数(最多可达 `maxBurst`
**响应字段:**
- `currentPeriodCost` 反映当前计费周期的使用情况
- `limit` 来源于个人限制(免费/专业)或组织池限制(团队/企业
- `plan` 是与的用户关联的最高优先级的活动计划
- `currentPeriodCost` 反映当前账单周期的用量
- `limit` 来源于个人限Free/Pro或组织池化限额Team/Enterprise
- `plan` 是与的用户关联的最高优先级的激活套餐
## 计划限制
## 套餐限制
不同的订阅计划有不同的使用限制:
不同的订阅套餐有不同的使用限制:
| 方案 | 每月使用额 | 速率限制(每分钟) |
| 套餐 | 每月使用额 | 速率限制(每分钟) |
|------|-------------------|-------------------------|
| **Free** | $20 | 5 sync10 async |
| **Pro** | $100 | 10 sync50 async |
| **Team** | $500共享 | 50 sync100 async |
| **Enterprise** | 定制 | 定制 |
| **Enterprise** | 自定义 | 自定义 |
## 计费模式
Sim 使用 **基础订阅 + 超额**计费模式:
Sim 采用**基础订阅 + 超额**计费模式:
### 工作原理
### 计费方式说明
**专业计划$20/月):**
- 月订阅包含 $20 使用额度
- 使用低于 $20 → 无额外费用
- 使用超过 $20 → 月底支付超额部分
**Pro 套餐$20/月):**
- 月订阅包含 $20 使用额度
- 使用未超过 $20 → 无额外费用
- 使用超过 $20 → 月底结算超额部分
- 示例:$35 使用 = $20订阅+ $15超额
**团队计划$40/每席位/月):**
- 团队成员之间共享使用额度
- 超额费用根据团队总使用量计算
- 组织所有者收到一张账单
**Team 套餐$40//月):**
- 团队成员共享使用额度
- 超额费用团队总用量计算
- 账单由组织所有者统一支付
**企业计划**
**Enterprise 套餐**
- 固定月费,无超额费用
- 根据协议自定义使用限
- 使用额度可按协议定
### 阈值计费
当未计费的超额费用达到 $50 时Sim 会自动计费全额未计费金额。
当未结算的超额费用达到 $50 时Sim 会自动结算全部未结算金额。
**示例:**
- 第 10 天:$70 超额 → 立即计费 $70
- 第 15 天:额外使用 $35计 $105→ 已计费,无需操作
- 第 20 天:再使用 $50计 $155计费 $85→ 立即计费 $85
- 第 10 天:超额 $70 → 立即结算 $70
- 第 15 天:新增 $35 使用(累计 $105→ 已结算,无需操作
- 第 20 天:再用 $50计 $155结算 $85→ 立即结算 $85
会将大量的超额费用分散到整个月,而不是在周期结束时收到一张大账单。
样可以将大额超额费用分摊到每月多次结算,避免期末一次性大额账单。
## 成本管理最佳实践
1. **定期监控**:经常检查您的使用仪表,避免意外情况
2. **设预算**使用计划限制作为支出控制的护栏
3. **优化工作流程**查高成本执行操作,优化提示或模型选择
4. **使用合适模型**:根据任务需求匹配模型复杂度
5. **批量处理相似任务**:尽可能合并多个请求减少开销
1. **定期监控**:经常查看用量仪表,避免意外支出
2. **设预算**用套餐额度作为支出警戒线
3. **优化流程**查高成本执行,优化提示或模型选择
4. **选择合适模型**:根据任务需求匹配模型复杂度
5. **批量处理相似任务**:尽量合并请求减少额外开销
## 下一步
- 在[设置 → 订阅](https://sim.ai/settings/subscription)中查看您当前的使用情况
- 了解[日志记录](/execution/logging)以跟踪执行详情
- 探索[外部 API](/execution/api)以进行程序化成本监控
- 查看[工作流优化技](/blocks)以降低成本
- 在 [设置 → 订阅](https://sim.ai/settings/subscription) 中查看您当前的使用情况
- 了解 [日志记录](/execution/logging)以跟踪执行详情
- 探索 [外部 API](/execution/api),实现程序化成本监控
- 查看 [工作流优化技](/blocks)以降低成本

View File

@@ -0,0 +1,58 @@
---
title: Circleback
description: AI 驱动的会议记录与行动项
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="circleback"
color="linear-gradient(180deg, #E0F7FA 0%, #FFFFFF 100%)"
/>
{/* MANUAL-CONTENT-START:intro */}
[Circleback](https://circleback.ai/) 是一个 AI 驱动的平台可为您的团队自动生成会议记录、行动项、文字稿和录音。每当会议结束后Circleback 会处理对话内容,提供详细的会议纪要和行动项,同时附上文字稿和录音(如有)。这有助于团队高效捕捉洞见、分发行动项,并确保不会遗漏任何重要信息——所有内容都能无缝集成到您的工作流程中。
通过 Sim Circleback 集成,您可以:
- **获取详细的会议记录和行动项**:自动收集格式良好的会议摘要,并跟踪通话中讨论的可执行任务。
- **访问完整的会议录音和文字稿**:获取完整对话及相关录音,便于回顾关键时刻或与同事分享。
- **捕捉与会者信息和会议背景**:与会者名单、会议元数据和标签帮助您有序管理和利用数据。
- **将洞见直接推送到您的工作流程**:会议结束后,利用 Sim 强大的 webhook 触发器,自动触发自动化流程或将 Circleback 数据发送到其他系统。
**在 Sim 中的工作方式:**
Circleback 使用 webhook 触发器:每当会议处理完成,数据会自动推送到您的代理或自动化流程。您可以基于以下内容构建更多自动化:
- 会议完成(所有处理数据可用)
- 新会议记录(即使会议尚未全部处理,会议纪要也可提前获取)
- 原始 webhook 集成,适用于高级用例
**Circleback 会议 webhook 有效载荷中包含以下信息:**
| 字段 | 类型 | 描述 |
|----------------|---------|----------------------------------------------------|
| `id` | number | Circleback 会议 ID |
| `name` | string | 会议标题 |
| `url` | string | 虚拟会议 URLZoom、Meet、Teams 等) |
| `createdAt` | string | 会议创建时间戳 |
| `duration` | number | 时长(秒) |
| `recordingUrl` | string | 录制文件 URL有效期 24 小时) |
| `tags` | json | 标签数组 |
| `icalUid` | string | 日历事件 ID |
| `attendees` | json | 参会者对象数组 |
| `notes` | string | Markdown 格式会议记录 |
| `actionItems` | json | 行动项数组 |
| `transcript` | json | 会议记录片段数组 |
| `insights` | json | 用户创建的洞见 |
| `meeting` | json | 完整会议数据 |
无论你是想分发即时摘要、记录行动项还是基于新会议数据构建自定义工作流Circleback 和 Sim 都能让你自动无缝处理所有与会议相关的事务。
## 使用说明
当会议被处理时你将收到会议记录、行动项、转录和录音。Circleback 使用 webhook 将数据推送到你的工作流。
## 备注
- 分类:`triggers`
- 类型:`circleback`

View File

@@ -0,0 +1,218 @@
---
title: Grain
description: 访问会议录音、转录文本和 AI 摘要
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="grain"
color="#F6FAF9"
/>
{/* MANUAL-CONTENT-START:intro */}
[Grain](https://grain.com/) 是一个现代化平台,用于捕捉、存储和分享会议录音、转录文本、重点片段以及 AI 驱动的摘要。Grain 帮助团队将对话转化为可执行的洞察,让每个人都能对会议中的关键时刻保持一致。
使用 Grain您可以
- **访问可搜索的录音和转录文本**:可按关键词、参与者或主题查找和回顾每场会议。
- **分享重点片段和剪辑**:捕捉重要时刻,并在团队或工作流中分享短视频/音频片段。
- **获取 AI 生成的摘要**:利用 Grain 的先进 AI 自动生成会议摘要、行动项和关键洞察。
- **按团队或类型组织会议**:为录音打标签和分类,便于访问和报告。
Sim Grain 集成让您的坐席能够:
- 通过灵活的筛选条件(日期时间、参与者、团队等)列出、搜索和获取会议录音及详细信息。
- 获取会议的 AI 摘要、参与者、重点片段及其他元数据,以支持自动化或分析。
- 通过 Grain webhook在新会议被处理、摘要生成或重点片段创建时触发工作流。
- 轻松将 Grain 数据桥接到其他工具,或在会议中有重要事件发生时即时通知团队成员。
无论您是想自动化后续操作、保留重要对话记录还是在组织内挖掘洞察Grain 和 Sim 都能让您轻松将会议智能连接到工作流中。
{/* MANUAL-CONTENT-END */}
## 使用说明
将 Grain 集成到您的工作流中。访问会议录音、转录文本、重点片段和 AI 生成的摘要。还可以基于 Grain webhook 事件触发工作流。
## 工具
### `grain_list_recordings`
从 Grain 获取录音列表,可选过滤和分页
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
| `cursor` | string | 否 | 下一页的分页游标 |
| `beforeDatetime` | string | 否 | 仅包含此 ISO8601 时间戳之前的录音 |
| `afterDatetime` | string | 否 | 仅包含此 ISO8601 时间戳之后的录音 |
| `participantScope` | string | 否 | 过滤条件“internal” 或 “external” |
| `titleSearch` | string | 否 | 按录音标题搜索过滤 |
| `teamId` | string | 否 | 按团队 UUID 过滤 |
| `meetingTypeId` | string | 否 | 按会议类型 UUID 过滤 |
| `includeHighlights` | boolean | 否 | 响应中包含重点/片段 |
| `includeParticipants` | boolean | 否 | 响应中包含参与者列表 |
| `includeAiSummary` | boolean | 否 | 包含 AI 生成的摘要 |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `recordings` | array | 录音对象数组 |
### `grain_get_recording`
根据 ID 获取单个录音的详细信息
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
| `recordingId` | string | 是 | 录音 UUID |
| `includeHighlights` | boolean | 否 | 包含重点/片段 |
| `includeParticipants` | boolean | 否 | 包含参与者列表 |
| `includeAiSummary` | boolean | 否 | 包含 AI 摘要 |
| `includeCalendarEvent` | boolean | 否 | 包含日历事件数据 |
| `includeHubspot` | boolean | 否 | 包含 HubSpot 关联 |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `id` | string | 录音 UUID |
| `title` | string | 录音标题 |
| `start_datetime` | string | ISO8601 开始时间戳 |
| `end_datetime` | string | ISO8601 结束时间戳 |
| `duration_ms` | number | 持续时间(毫秒) |
| `media_type` | string | 音频、转录或视频 |
| `source` | string | 录音来源zoom、meet、teams 等) |
| `url` | string | 在 Grain 中查看的 URL |
| `thumbnail_url` | string | 缩略图 URL |
| `tags` | array | 标签字符串数组 |
| `teams` | array | 录音所属团队 |
| `meeting_type` | object | 会议类型信息id、name、scope |
| `highlights` | array | 高亮内容(如有) |
| `participants` | array | 参与者(如有) |
| `ai_summary` | object | AI 摘要文本(如有) |
| `calendar_event` | object | 日历事件数据(如有) |
| `hubspot` | object | HubSpot 关联信息(如有) |
### `grain_get_transcript`
获取录音的完整转录文本
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | ---- | ----------- |
| `apiKey` | string | 是 | Grain API 密钥(个人访问令牌) |
| `recordingId` | string | 是 | 录音 UUID |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `transcript` | array | 转录片段数组 |
### `grain_list_teams`
列出工作区中的所有团队
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `teams` | array | 团队对象数组 |
### `grain_list_meeting_types`
列出工作区中的所有会议类型
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `meeting_types` | array | 会议类型对象数组 |
### `grain_create_hook`
创建一个 webhook 以接收录制事件
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
| `hookUrl` | string | 是 | Webhook endpoint URL必须响应 2xx |
| `filterBeforeDatetime` | string | 否 | 筛选:此日期之前的录制 |
| `filterAfterDatetime` | string | 否 | 筛选:此日期之后的录制 |
| `filterParticipantScope` | string | 否 | 筛选“internal” 或 “external” |
| `filterTeamId` | string | 否 | 筛选:指定团队 UUID |
| `filterMeetingTypeId` | string | 否 | 筛选:指定会议类型 |
| `includeHighlights` | boolean | 否 | 在 webhook 负载中包含重点内容 |
| `includeParticipants` | boolean | 否 | 在 webhook 负载中包含参与者 |
| `includeAiSummary` | boolean | 否 | 在 webhook 负载中包含 AI 摘要 |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `id` | string | Hook UUID |
| `enabled` | boolean | Hook 是否激活 |
| `hook_url` | string | webhook URL |
| `filter` | object | 已应用的过滤器 |
| `include` | object | 包含的字段 |
| `inserted_at` | string | ISO8601 创建时间戳 |
### `grain_list_hooks`
列出该账户下的所有 webhook
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `hooks` | array | Hook 对象数组 |
### `grain_delete_hook`
根据 ID 删除 webhook
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `apiKey` | string | 是 | Grain API key个人访问令牌 |
| `hookId` | string | 是 | 要删除的 Hook UUID |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `success` | boolean | webhook 删除成功时为 true |
## 备注
- 分类:`tools`
- 类型:`grain`

View File

@@ -60,9 +60,7 @@ Jina AI Reader 专注于从网页中提取最相关的内容,去除杂乱、
| 参数 | 类型 | 描述 |
| --------- | ---- | ----------- |
| `content` | 字符串 | 从 URL 提取的内容,处理为干净且适合 LLM 的文本 |
| `links` | 数组 | 页面中找到的链接列表(当启用 gatherLinks 或 withLinksummary 时) |
| `images` | 数组 | 页面中找到的图片列表(当启用 withImagesummary 时) |
| `content` | 字符串 | 从 URL 提取的内容,处理为简洁、适合 LLM 的文本 |
### `jina_search`

View File

@@ -39,7 +39,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `model` | string | 是 | 要使用的模型(例如 gpt-4o、claude-sonnet-4-5、gemini-2.0-flash |
| `model` | string | 是 | 要使用的模型(例如gpt-4o、claude-sonnet-4-5、gemini-2.0-flash |
| `systemPrompt` | string | 否 | 设置助手行为的 system prompt |
| `context` | string | 是 | 发送给模型的用户消息或上下文 |
| `apiKey` | string | 否 | 提供方的 API key如未提供托管模型将使用平台密钥 |
@@ -49,6 +49,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
| `azureApiVersion` | string | 否 | Azure OpenAI API 版本 |
| `vertexProject` | string | 否 | Vertex AI 的 Google Cloud 项目 ID |
| `vertexLocation` | string | 否 | Vertex AI 的 Google Cloud 区域(默认为 us-central1 |
| `vertexCredential` | string | 否 | Vertex AI 的 Google Cloud OAuth 凭证 ID |
#### 输出

View File

@@ -5,7 +5,6 @@ title: 计划
import { Callout } from 'fumadocs-ui/components/callout'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
import { Image } from '@/components/ui/image'
import { Video } from '@/components/ui/video'
计划模块会在指定的时间间隔或时间点自动触发工作流。
@@ -21,67 +20,58 @@ import { Video } from '@/components/ui/video'
## 计划选项
通过下拉选项配置工作流的运行时间:
配置工作流的运行时间:
<Tabs items={['简单间隔', 'Cron 表达式']}>
<Tab>
<ul className="list-disc space-y-1 pl-6">
<li><strong>每隔几分钟</strong>5 分钟、15 分钟、30 分钟的间隔</li>
<li><strong>每小时</strong>:每小时或每隔几小时</li>
<li><strong>每天</strong>:每天一次或多次</li>
<li><strong>每周</strong>一周中的特定日子</li>
<li><strong>每月</strong>一个月中的特定日子</li>
<li><strong>每 X 分钟</strong>按分钟间隔运行1-1440</li>
<li><strong>每小时</strong>:每小时在指定的分钟运行</li>
<li><strong>每天</strong>:每天在指定时间运行</li>
<li><strong>每周</strong>每周在指定的星期和时间运行</li>
<li><strong>每月</strong>每月在指定的日期和时间运行</li>
</ul>
</Tab>
<Tab>
<p>使用 cron 表达式进行高级调度:</p>
<div className="text-sm space-y-1">
<div><code>0 9 * * 1-5</code> - 每个工作日的上午 9 点</div>
<div><code>*/15 * * * *</code> - 每 15 分钟</div>
<div><code>*/15 * * * *</code> - 每 15 分钟一次</div>
<div><code>0 0 1 * *</code> - 每月的第一天</div>
</div>
</Tab>
</Tabs>
## 配置计划
## 启用
当工作流被计划时
- 计划变为**激活**状态,并显示下次执行时间
- 点击 **"已计划"** 按钮以停用计划
- 计划在 **连续失败 3 次** 后会自动停用
计划与工作流部署相关联
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-2.png"
alt="活动计划块"
width={500}
height={400}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="停用的计划"
width={500}
height={350}
className="my-6"
/>
</div>
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="禁用计划"
width={500}
height={400}
className="my-6"
/>
</div>
已禁用的计划会显示上次激活的时间。点击 **"已禁用"** 徽章以重新激活计划。
- **部署工作流** → 计划激活并开始运行
- **取消部署工作流** → 计划被移除
- **重新部署工作流** → 计划会以当前配置重新创建
<Callout>
计划块无法接收传入连接,仅作为纯工作流触发器
必须先部署工作流,计划才会开始运行。请先配置计划块,然后在工具栏中部署
</Callout>
## 自动禁用
为防止持续性错误,计划任务在**连续失败 100 次**后会自动禁用。禁用后:
- 计划块上会显示警告徽章
- 计划将停止执行
- 点击徽章可重新激活计划
<div className="flex justify-center">
<Image
src="/static/blocks/schedule-3.png"
alt="已禁用的计划"
width={500}
height={400}
className="my-6"
/>
</div>
<Callout>
计划块无法接收传入连接,只能作为工作流的入口点。
</Callout>

View File

@@ -217,19 +217,21 @@ checksums:
content/9: cbca5d806da167603e38e7dc90344e57
fb53ce2c1fc28db4c6c09f5296ff59c6:
meta/title: a75428cb811bc50150cecde090a3a0d5
content/0: c0a142478cc5c515f87d368fa72da818
content/0: e4684b7201c2aed215c82606e9eaa293
content/1: 3fcad3dff5044fbf0c734bab806c437e
content/2: 7c82b7d111a2517b08861c4c0e71eff9
content/3: 1ed1a03c7f922f4b1437594f34ea2afb
content/4: ca43f34465308970910b39fa073e10ec
content/5: f67fd398c98884cf0829682dca6d5d91
content/6: 72a5feaa2b80a1f22d224e311a0e4efe
content/7: 62261cedf5fff6a13220f3f0b6de661b
content/8: e58bf5c8afb239f2606ec5dfba30fc2f
content/9: 35840d3d91271d11c9449e7f316ff280
content/10: 2ff1c8bf00c740f66bce8a4a7f768ca8
content/11: 909f57e2475676b16d90b6605cd3ff43
content/12: 8f5b5d43297c4ff114ca49395878292b
content/4: d34ebf41fb97810c8398b4064520bd7b
content/5: 5025a2d2e9eadc2b91f323b2862b0a1a
content/6: 913f67efd4923e0f70e29640405e34d2
content/7: a706670c6362a1b723ccc3d6720ad6af
content/8: ab4fe131de634064f9a7744a11599434
content/9: 2f6c9564a33ad9f752df55840b0c8e16
content/10: fef34568e5bbd5a50e2a89412f85302c
content/11: a891bfb5cf490148001f05acde467f68
content/12: bcd95e6bef30b6f480fee33800928b13
content/13: 2ff1c8bf00c740f66bce8a4a7f768ca8
content/14: 16eb64906b9e981ea3c11525ff5a1c2e
73129cc41f543288d67924faea3172db:
meta/title: 8cbe02c3108a0dbe0586dbc18db04efe
meta/description: 9540ac7731cebd594afa4ce2fb59ab9d
@@ -557,7 +559,7 @@ checksums:
content/8: 6325adefb6e1520835225285b18b6a45
content/9: b7fa85fce9c7476fe132df189e27dac1
content/10: 371d0e46b4bd2c23f559b8bc112f6955
content/11: 7ad14ccfe548588081626cfe769ad492
content/11: a34c59648e0f7218a8e9b72c333366fb
content/12: bcadfc362b69078beee0088e5936c98b
content/13: 6af66efd0da20944a87fdb8d9defa358
content/14: b3f310d5ef115bea5a8b75bf25d7ea9a
@@ -2664,7 +2666,7 @@ checksums:
content/12: 371d0e46b4bd2c23f559b8bc112f6955
content/13: 6ad8fcd98fc25eab726d05f9e9ccc6a4
content/14: bcadfc362b69078beee0088e5936c98b
content/15: 0ac8cd06fceaf16c960de79f7df987ee
content/15: 1eb58de69f18ba555d7f349fed365de5
content/16: c340d51e1b2d05b9b68a79baa8e9481a
content/17: 64d5a97527775c7bfcdcbb418a10ea35
content/18: 371d0e46b4bd2c23f559b8bc112f6955
@@ -4579,39 +4581,41 @@ checksums:
content/19: 83fc31418ff454a5e06b290e3708ef32
content/20: 4392b5939a6d5774fb080cad1ee1dbb8
content/21: 890b65b7326a9eeef3933a8b63f6ccdd
content/22: 892d6a80d8ac5a895a20408462f63cc5
content/23: 930176b3786ebbe9eb1f76488f183140
content/24: 22d9d167630c581e868d6d7a9fdddbcf
content/25: d250621762d63cd87b3359236c95bdac
content/26: 50be8ae73b8ce27de7ddd21964ee29e8
content/27: cd622841b5bc748a7b2a0d9252e72bd5
content/28: 38608a5d416eb33f373c6f9e6bf546b9
content/29: 074c12c794283c3af53a3f038fbda2a6
content/30: 5cdcf7e32294e087612b77914d850d26
content/31: 7529829b2f064fedf956da639aaea8e1
content/32: 7b5e2207a0d93fd434b92f2f290a8dd5
content/33: f950b8f58af1973a3e00393d860bce02
content/34: d5ff07fec9455183e1d93f7ddf1dab1b
content/35: 5d2d85e082d9fdd3859fb5c788d5f9a3
content/36: 23a7de9c5adb6e07c28c23a9d4e03dc2
content/37: 7bb928aba33a4013ad5f08487da5bbf9
content/38: dbbf313837f13ddfa4a8843d71cb9cc4
content/39: cf10560ae6defb8ee5da344fc6509f6e
content/40: 1dea5c6442c127ae290185db0cef067b
content/41: 332dab0588fb35dabb64b674ba6120eb
content/42: 714b3f99b0a8686bbb3434deb1f682b3
content/43: ba18ac99184b17d7e49bd1abdc814437
content/44: bed2b629274d55c38bd637e6a28dbc4a
content/45: 71487ae6f6fb1034d1787456de442e6d
content/46: 137d9874cf5ec8d09bd447f224cc7a7c
content/47: 6b5b4c3b2f98b8fc7dd908fef2605ce8
content/48: 3af6812662546ce647a55939241fd88e
content/49: 6a4d7f0ccb8c28303251d1ef7b3dcca7
content/50: 5dce779f77cc2b0abf12802a833df499
content/51: aa47ff01b631252f024eaaae0c773e42
content/52: 1266d1c7582bb617cdef56857be34f30
content/53: c2cef2688104adaf6641092f43d4969a
content/54: 089fc64b4589b2eaa371de7e04c4aed9
content/22: ada515cf6e2e0f9d3f57f720f79699d3
content/23: 332e0d08f601da9fb56c6b7e7c8e9daf
content/24: 892d6a80d8ac5a895a20408462f63cc5
content/25: 930176b3786ebbe9eb1f76488f183140
content/26: 22d9d167630c581e868d6d7a9fdddbcf
content/27: d250621762d63cd87b3359236c95bdac
content/28: 50be8ae73b8ce27de7ddd21964ee29e8
content/29: cd622841b5bc748a7b2a0d9252e72bd5
content/30: 38608a5d416eb33f373c6f9e6bf546b9
content/31: 074c12c794283c3af53a3f038fbda2a6
content/32: 5cdcf7e32294e087612b77914d850d26
content/33: 7529829b2f064fedf956da639aaea8e1
content/34: 7b5e2207a0d93fd434b92f2f290a8dd5
content/35: f950b8f58af1973a3e00393d860bce02
content/36: d5ff07fec9455183e1d93f7ddf1dab1b
content/37: 5d2d85e082d9fdd3859fb5c788d5f9a3
content/38: 23a7de9c5adb6e07c28c23a9d4e03dc2
content/39: 7bb928aba33a4013ad5f08487da5bbf9
content/40: dbbf313837f13ddfa4a8843d71cb9cc4
content/41: cf10560ae6defb8ee5da344fc6509f6e
content/42: 1dea5c6442c127ae290185db0cef067b
content/43: 332dab0588fb35dabb64b674ba6120eb
content/44: 714b3f99b0a8686bbb3434deb1f682b3
content/45: ba18ac99184b17d7e49bd1abdc814437
content/46: bed2b629274d55c38bd637e6a28dbc4a
content/47: 71487ae6f6fb1034d1787456de442e6d
content/48: 137d9874cf5ec8d09bd447f224cc7a7c
content/49: 6b5b4c3b2f98b8fc7dd908fef2605ce8
content/50: 3af6812662546ce647a55939241fd88e
content/51: 6a4d7f0ccb8c28303251d1ef7b3dcca7
content/52: 5dce779f77cc2b0abf12802a833df499
content/53: aa47ff01b631252f024eaaae0c773e42
content/54: 1266d1c7582bb617cdef56857be34f30
content/55: c2cef2688104adaf6641092f43d4969a
content/56: 089fc64b4589b2eaa371de7e04c4aed9
722959335ba76c9d0097860e2ad5a952:
meta/title: 1f5b53b9904ec41d49c1e726e3d56b40
content/0: c2b41859d63a751682f0d9aec488e581
@@ -49869,3 +49873,84 @@ checksums:
content/32: fd0f38eb3fe5cf95be366a4ff6b4fb90
content/33: b3f310d5ef115bea5a8b75bf25d7ea9a
content/34: 4a7b2c644e487f3d12b6a6b54f8c6773
d75b83c6e1f54ba41b8cd27960256f4e:
meta/title: 63d9b961cc414fe48ed3a117b1849ac0
meta/description: 0828295c4f8482d4ab18ae67cefb3efa
content/0: 1b031fb0c62c46b177aeed5c3d3f8f80
content/1: ce93512e241ca1ac9723d797d937e8d6
content/2: 4539a8e7b9a0b8c570e8b2261e6d53e8
content/3: 05d783b8313bd21464edbc35f72acda7
content/4: aadfc263ce44fb67b5ec899cf7034707
content/5: 7feedc49fa38d45979f4ae3685e2a2e8
content/6: 6d8ac64adb588d4675e8ad779861cf79
content/7: 9b55ef7d0cb63e28ac9aa5b71ca5611e
content/8: 821e6394b0a953e2b0842b04ae8f3105
content/9: 3e3c921ad486b0390454b325a0ecab98
content/10: 9c8aa3f09c9b2bd50ea4cdff3598ea4e
content/11: 3e12916db64b7037df05c733542689b8
content/12: bf76a8fa5e9be0ad03d4a25fc1cd5d2c
content/13: 371d0e46b4bd2c23f559b8bc112f6955
content/14: 7476e5130f17fef0005e9eb79a288a4b
content/15: bcadfc362b69078beee0088e5936c98b
content/16: 921522dc74bcfe253933280a44e32325
content/17: 5c91a98c8c182a86561bdc2bb55d52fb
content/18: 5a003869e25c931a6a39e75f1fbb331e
content/19: 371d0e46b4bd2c23f559b8bc112f6955
content/20: c03a1ad5898fb9592c47f9fef3a443f9
content/21: bcadfc362b69078beee0088e5936c98b
content/22: 966dd802eb6aa5f8a6d37be800aa0476
content/23: d7f931ee4088a41234a19dbc070bbb06
content/24: b7a5a66f81700ac0f58f0f417a090db1
content/25: 371d0e46b4bd2c23f559b8bc112f6955
content/26: 8dddce76764cf0050ac4f8cb88cbf3b7
content/27: bcadfc362b69078beee0088e5936c98b
content/28: b30efde22ffd4646ac11e1b7053b2f71
content/29: e12dd4d7b99e7c7038b8935f48fbed29
content/30: fd9b29ad276abb6ffbb5350d71fb174a
content/31: 371d0e46b4bd2c23f559b8bc112f6955
content/32: 2e9928cbf2e736fc61f08d4339ccae59
content/33: bcadfc362b69078beee0088e5936c98b
content/34: 99b063108e07f3350f6ec02ce632d682
content/35: c1d1369970a7430014aa1f70a75e1b56
content/36: 25adb991028a92365272704d5921c0fe
content/37: 371d0e46b4bd2c23f559b8bc112f6955
content/38: 2e9928cbf2e736fc61f08d4339ccae59
content/39: bcadfc362b69078beee0088e5936c98b
content/40: 852dffd5402c58c35f6abfd6b8046585
content/41: 66a326fe86b5ff7c12f097bae8917018
content/42: a58bde3efd6164d3541047bd97cee6fe
content/43: 371d0e46b4bd2c23f559b8bc112f6955
content/44: 1c98f5538b8b37801da7f5e8c5912219
content/45: bcadfc362b69078beee0088e5936c98b
content/46: a84d2702883c8af99a401582e2192d39
content/47: e3dd3df817017359361432029b0c5ef1
content/48: 557fb6942a695af69e94fbd7692590e6
content/49: 371d0e46b4bd2c23f559b8bc112f6955
content/50: 2e9928cbf2e736fc61f08d4339ccae59
content/51: bcadfc362b69078beee0088e5936c98b
content/52: 69a6fbea11482ab284195984788c9710
content/53: dafa30ae47d52c901b756bd8bd0ae2fd
content/54: 70e8dcde230d0cd4a9b7b18c8df043cd
content/55: 371d0e46b4bd2c23f559b8bc112f6955
content/56: 97a4116e8509aede52ea1801656a7671
content/57: bcadfc362b69078beee0088e5936c98b
content/58: 2b73b3348aa37da99e35d12e892197f2
content/59: b3f310d5ef115bea5a8b75bf25d7ea9a
content/60: 11e67a936d6e434842446342f83b5289
753fe021f7c0fca8dc429f2e971fae5a:
meta/title: b3498307d692252f1286175a18b62e16
meta/description: 5f52444a2ad126633723bb44d06e7638
content/0: 1b031fb0c62c46b177aeed5c3d3f8f80
content/1: 28c5925266bfcf8a373b0490f6e63c8c
content/2: 76482724500904d534bc171470aa5594
content/3: 5e43b6ea89ab6aa913524b5db4c4f2f3
content/4: aa6b3680f93d09752072d278d8d3e6bb
content/5: 49686bd2f0af8b45071a5e11a47df85e
content/6: 4fcff29464aac96e894b0e9da8b7aac5
content/7: 89255fc21a3a429f27d1f2cdfe065235
content/8: 51dac9c1f218035a3f23137588eca5b6
content/9: 2ddb58bd6414897d33c6cb1590558749
content/10: 821e6394b0a953e2b0842b04ae8f3105
content/11: 972721b310d5e3e6e08ec33dc9630f62
content/12: b3f310d5ef115bea5a8b75bf25d7ea9a
content/13: 06a9cbcec05366fe1c873c90c36b4f44

4
apps/docs/lib/db.ts Normal file
View File

@@ -0,0 +1,4 @@
import { db } from '@sim/db'
import { docsEmbeddings } from '@sim/db/schema'
export { db, docsEmbeddings }

View File

@@ -0,0 +1,40 @@
/**
* Generate embeddings for search queries using OpenAI API
*/
export async function generateSearchEmbedding(query: string): Promise<number[]> {
const apiKey = process.env.OPENAI_API_KEY
if (!apiKey) {
throw new Error('OPENAI_API_KEY environment variable is required')
}
const response = await fetch('https://api.openai.com/v1/embeddings', {
method: 'POST',
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
input: query,
model: 'text-embedding-3-small',
encoding_format: 'float',
}),
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(`OpenAI API failed: ${response.status} ${response.statusText} - ${errorText}`)
}
const data = await response.json()
if (!data?.data || !Array.isArray(data.data) || data.data.length === 0) {
throw new Error('OpenAI API returned invalid response structure: missing or empty data array')
}
if (!data.data[0]?.embedding || !Array.isArray(data.data[0].embedding)) {
throw new Error('OpenAI API returned invalid response structure: missing or invalid embedding')
}
return data.data[0].embedding
}

View File

@@ -11,16 +11,19 @@
"type-check": "tsc --noEmit"
},
"dependencies": {
"@sim/db": "workspace:*",
"@tabler/icons-react": "^3.31.0",
"@vercel/og": "^0.6.5",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"drizzle-orm": "^0.44.5",
"fumadocs-core": "16.2.3",
"fumadocs-mdx": "14.1.0",
"fumadocs-ui": "16.2.3",
"lucide-react": "^0.511.0",
"next": "16.1.0-canary.21",
"next-themes": "^0.4.6",
"postgres": "^3.4.5",
"react": "19.2.1",
"react-dom": "19.2.1",
"tailwind-merge": "^3.0.2"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 138 KiB

View File

@@ -20,7 +20,7 @@ interface NavProps {
}
export default function Nav({ hideAuthButtons = false, variant = 'landing' }: NavProps = {}) {
const [githubStars, setGithubStars] = useState('18.6k')
const [githubStars, setGithubStars] = useState('24.4k')
const [isHovered, setIsHovered] = useState(false)
const [isLoginHovered, setIsLoginHovered] = useState(false)
const router = useRouter()

View File

@@ -1,26 +1,42 @@
'use client'
import { type ReactNode, useState } from 'react'
import type { ReactNode } from 'react'
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
export function QueryProvider({ children }: { children: ReactNode }) {
const [queryClient] = useState(
() =>
new QueryClient({
defaultOptions: {
queries: {
staleTime: 30 * 1000,
gcTime: 5 * 60 * 1000,
refetchOnWindowFocus: false,
retry: 1,
retryOnMount: false,
},
mutations: {
retry: 1,
},
},
})
)
/**
* Singleton QueryClient instance for client-side use.
* Can be imported directly for cache operations outside React components.
*/
function makeQueryClient() {
return new QueryClient({
defaultOptions: {
queries: {
staleTime: 30 * 1000,
gcTime: 5 * 60 * 1000,
refetchOnWindowFocus: false,
retry: 1,
retryOnMount: false,
},
mutations: {
retry: 1,
},
},
})
}
let browserQueryClient: QueryClient | undefined
export function getQueryClient() {
if (typeof window === 'undefined') {
return makeQueryClient()
}
if (!browserQueryClient) {
browserQueryClient = makeQueryClient()
}
return browserQueryClient
}
export function QueryProvider({ children }: { children: ReactNode }) {
const queryClient = getQueryClient()
return <QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
}

View File

@@ -1,6 +1,9 @@
import { createMockLogger as createSimTestingMockLogger } from '@sim/testing'
import { NextRequest } from 'next/server'
import { vi } from 'vitest'
export { createMockLogger } from '@sim/testing'
export interface MockUser {
id: string
email: string
@@ -214,12 +217,11 @@ export const mockDb = {
})),
}
export const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
/**
* Mock logger using @sim/testing createMockLogger.
* This provides a consistent mock logger across all API tests.
*/
export const mockLogger = createSimTestingMockLogger()
export const mockUser = {
id: 'user-123',
@@ -729,7 +731,8 @@ export function mockKnowledgeSchemas() {
}
/**
* Mock console logger
* Mock console logger using the shared mockLogger instance.
* This ensures tests can assert on the same mockLogger instance exported from this module.
*/
export function mockConsoleLogger() {
vi.doMock('@/lib/logs/console/logger', () => ({

View File

@@ -4,7 +4,7 @@
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Connections API Route', () => {
const mockGetSession = vi.fn()
@@ -14,12 +14,7 @@ describe('OAuth Connections API Route', () => {
where: vi.fn().mockReturnThis(),
limit: vi.fn(),
}
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockParseProvider = vi.fn()
const mockEvaluateScopeCoverage = vi.fn()
@@ -70,7 +65,7 @@ describe('OAuth Connections API Route', () => {
})
)
vi.doMock('@/lib/oauth/oauth', () => ({
vi.doMock('@/lib/oauth/utils', () => ({
parseProvider: mockParseProvider,
evaluateScopeCoverage: mockEvaluateScopeCoverage,
}))

View File

@@ -5,8 +5,8 @@ import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import type { OAuthProvider } from '@/lib/oauth/oauth'
import { evaluateScopeCoverage, parseProvider } from '@/lib/oauth/oauth'
import type { OAuthProvider } from '@/lib/oauth'
import { evaluateScopeCoverage, parseProvider } from '@/lib/oauth'
const logger = createLogger('OAuthConnectionsAPI')

View File

@@ -6,6 +6,7 @@
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockLogger } from '@/app/api/__test-utils__/utils'
describe('OAuth Credentials API Route', () => {
const mockGetSession = vi.fn()
@@ -17,12 +18,7 @@ describe('OAuth Credentials API Route', () => {
where: vi.fn().mockReturnThis(),
limit: vi.fn(),
}
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'
@@ -42,7 +38,7 @@ describe('OAuth Credentials API Route', () => {
getSession: mockGetSession,
}))
vi.doMock('@/lib/oauth/oauth', () => ({
vi.doMock('@/lib/oauth/utils', () => ({
parseProvider: mockParseProvider,
evaluateScopeCoverage: mockEvaluateScopeCoverage,
}))

View File

@@ -7,7 +7,7 @@ import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { evaluateScopeCoverage, parseProvider } from '@/lib/oauth/oauth'
import { evaluateScopeCoverage, type OAuthProvider, parseProvider } from '@/lib/oauth'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
export const dynamic = 'force-dynamic'
@@ -132,7 +132,7 @@ export async function GET(request: NextRequest) {
}
// Parse the provider to get base provider and feature type (if provider is present)
const { baseProvider } = parseProvider(providerParam || 'google-default')
const { baseProvider } = parseProvider((providerParam || 'google') as OAuthProvider)
let accountsData

View File

@@ -4,7 +4,7 @@
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Disconnect API Route', () => {
const mockGetSession = vi.fn()
@@ -12,12 +12,7 @@ describe('OAuth Disconnect API Route', () => {
delete: vi.fn().mockReturnThis(),
where: vi.fn(),
}
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'

View File

@@ -4,7 +4,7 @@
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Token API Routes', () => {
const mockGetUserId = vi.fn()
@@ -13,12 +13,7 @@ describe('OAuth Token API Routes', () => {
const mockAuthorizeCredentialUse = vi.fn()
const mockCheckHybridAuth = vi.fn()
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'
const mockRequestId = mockUUID.slice(0, 8)

View File

@@ -3,9 +3,11 @@
*
* @vitest-environment node
*/
import { createSession, loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const mockSession = { user: { id: 'test-user-id' } }
const mockSession = createSession({ userId: 'test-user-id' })
const mockGetSession = vi.fn()
vi.mock('@/lib/auth', () => ({
@@ -26,19 +28,13 @@ vi.mock('@sim/db', () => ({
vi.mock('@/lib/oauth/oauth', () => ({
refreshOAuthToken: vi.fn(),
OAUTH_PROVIDERS: {},
}))
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn().mockReturnValue({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}),
}))
vi.mock('@/lib/logs/console/logger', () => loggerMock)
import { db } from '@sim/db'
import { refreshOAuthToken } from '@/lib/oauth/oauth'
import { refreshOAuthToken } from '@/lib/oauth'
import {
getCredential,
getUserId,
@@ -46,14 +42,14 @@ import {
refreshTokenIfNeeded,
} from '@/app/api/auth/oauth/utils'
const mockDb = db as any
const mockDbTyped = db as any
const mockRefreshOAuthToken = refreshOAuthToken as any
describe('OAuth Utils', () => {
beforeEach(() => {
vi.clearAllMocks()
mockGetSession.mockResolvedValue(mockSession)
mockDb.limit.mockReturnValue([])
mockDbTyped.limit.mockReturnValue([])
})
afterEach(() => {
@@ -68,14 +64,14 @@ describe('OAuth Utils', () => {
})
it('should get user ID from workflow when workflowId is provided', async () => {
mockDb.limit.mockReturnValueOnce([{ userId: 'workflow-owner-id' }])
mockDbTyped.limit.mockReturnValueOnce([{ userId: 'workflow-owner-id' }])
const userId = await getUserId('request-id', 'workflow-id')
expect(mockDb.select).toHaveBeenCalled()
expect(mockDb.from).toHaveBeenCalled()
expect(mockDb.where).toHaveBeenCalled()
expect(mockDb.limit).toHaveBeenCalledWith(1)
expect(mockDbTyped.select).toHaveBeenCalled()
expect(mockDbTyped.from).toHaveBeenCalled()
expect(mockDbTyped.where).toHaveBeenCalled()
expect(mockDbTyped.limit).toHaveBeenCalledWith(1)
expect(userId).toBe('workflow-owner-id')
})
@@ -88,7 +84,7 @@ describe('OAuth Utils', () => {
})
it('should return undefined if workflow is not found', async () => {
mockDb.limit.mockReturnValueOnce([])
mockDbTyped.limit.mockReturnValueOnce([])
const userId = await getUserId('request-id', 'nonexistent-workflow-id')
@@ -99,20 +95,20 @@ describe('OAuth Utils', () => {
describe('getCredential', () => {
it('should return credential when found', async () => {
const mockCredential = { id: 'credential-id', userId: 'test-user-id' }
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const credential = await getCredential('request-id', 'credential-id', 'test-user-id')
expect(mockDb.select).toHaveBeenCalled()
expect(mockDb.from).toHaveBeenCalled()
expect(mockDb.where).toHaveBeenCalled()
expect(mockDb.limit).toHaveBeenCalledWith(1)
expect(mockDbTyped.select).toHaveBeenCalled()
expect(mockDbTyped.from).toHaveBeenCalled()
expect(mockDbTyped.where).toHaveBeenCalled()
expect(mockDbTyped.limit).toHaveBeenCalledWith(1)
expect(credential).toEqual(mockCredential)
})
it('should return undefined when credential is not found', async () => {
mockDb.limit.mockReturnValueOnce([])
mockDbTyped.limit.mockReturnValueOnce([])
const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id')
@@ -126,7 +122,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'valid-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000), // 1 hour in the future
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000),
providerId: 'google',
}
@@ -141,7 +137,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
}
@@ -154,8 +150,8 @@ describe('OAuth Utils', () => {
const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'new-token', refreshed: true })
})
@@ -164,7 +160,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
}
@@ -180,7 +176,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'token',
refreshToken: null,
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
}
@@ -197,11 +193,11 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'valid-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000), // 1 hour in the future
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000),
providerId: 'google',
userId: 'test-user-id',
}
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
@@ -214,11 +210,11 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
userId: 'test-user-id',
}
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockRefreshOAuthToken.mockResolvedValueOnce({
accessToken: 'new-token',
@@ -229,13 +225,13 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(token).toBe('new-token')
})
it('should return null if credential not found', async () => {
mockDb.limit.mockReturnValueOnce([])
mockDbTyped.limit.mockReturnValueOnce([])
const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id')
@@ -247,11 +243,11 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
userId: 'test-user-id',
}
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockRefreshOAuthToken.mockResolvedValueOnce(null)

View File

@@ -3,7 +3,7 @@ import { account, workflow } from '@sim/db/schema'
import { and, desc, eq } from 'drizzle-orm'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console/logger'
import { refreshOAuthToken } from '@/lib/oauth/oauth'
import { refreshOAuthToken } from '@/lib/oauth'
const logger = createLogger('OAuthUtilsAPI')

View File

@@ -0,0 +1,550 @@
/**
* Tests for chat OTP API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
describe('Chat OTP API Route', () => {
const mockEmail = 'test@example.com'
const mockChatId = 'chat-123'
const mockIdentifier = 'test-chat'
const mockOTP = '123456'
const mockRedisSet = vi.fn()
const mockRedisGet = vi.fn()
const mockRedisDel = vi.fn()
const mockGetRedisClient = vi.fn()
const mockDbSelect = vi.fn()
const mockDbInsert = vi.fn()
const mockDbDelete = vi.fn()
const mockSendEmail = vi.fn()
const mockRenderOTPEmail = vi.fn()
const mockAddCorsHeaders = vi.fn()
const mockCreateSuccessResponse = vi.fn()
const mockCreateErrorResponse = vi.fn()
const mockSetChatAuthCookie = vi.fn()
const mockGenerateRequestId = vi.fn()
let storageMethod: 'redis' | 'database' = 'redis'
beforeEach(() => {
vi.resetModules()
vi.clearAllMocks()
vi.spyOn(Math, 'random').mockReturnValue(0.123456)
vi.spyOn(Date, 'now').mockReturnValue(1640995200000)
vi.stubGlobal('crypto', {
...crypto,
randomUUID: vi.fn().mockReturnValue('test-uuid-1234'),
})
const mockRedisClient = {
set: mockRedisSet,
get: mockRedisGet,
del: mockRedisDel,
}
mockGetRedisClient.mockReturnValue(mockRedisClient)
mockRedisSet.mockResolvedValue('OK')
mockRedisGet.mockResolvedValue(null)
mockRedisDel.mockResolvedValue(1)
vi.doMock('@/lib/core/config/redis', () => ({
getRedisClient: mockGetRedisClient,
}))
const createDbChain = (result: any) => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue(result),
}),
}),
})
mockDbSelect.mockImplementation(() => createDbChain([]))
mockDbInsert.mockImplementation(() => ({
values: vi.fn().mockResolvedValue(undefined),
}))
mockDbDelete.mockImplementation(() => ({
where: vi.fn().mockResolvedValue(undefined),
}))
vi.doMock('@sim/db', () => ({
db: {
select: mockDbSelect,
insert: mockDbInsert,
delete: mockDbDelete,
transaction: vi.fn(async (callback) => {
return callback({
select: mockDbSelect,
insert: mockDbInsert,
delete: mockDbDelete,
})
}),
},
}))
vi.doMock('@sim/db/schema', () => ({
chat: {
id: 'id',
authType: 'authType',
allowedEmails: 'allowedEmails',
title: 'title',
},
verification: {
id: 'id',
identifier: 'identifier',
value: 'value',
expiresAt: 'expiresAt',
createdAt: 'createdAt',
updatedAt: 'updatedAt',
},
}))
vi.doMock('drizzle-orm', () => ({
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
gt: vi.fn((field, value) => ({ field, value, type: 'gt' })),
lt: vi.fn((field, value) => ({ field, value, type: 'lt' })),
}))
vi.doMock('@/lib/core/storage', () => ({
getStorageMethod: vi.fn(() => storageMethod),
}))
mockSendEmail.mockResolvedValue({ success: true })
mockRenderOTPEmail.mockResolvedValue('<html>OTP Email</html>')
vi.doMock('@/lib/messaging/email/mailer', () => ({
sendEmail: mockSendEmail,
}))
vi.doMock('@/components/emails/render-email', () => ({
renderOTPEmail: mockRenderOTPEmail,
}))
mockAddCorsHeaders.mockImplementation((response) => response)
mockCreateSuccessResponse.mockImplementation((data) => ({
json: () => Promise.resolve(data),
status: 200,
}))
mockCreateErrorResponse.mockImplementation((message, status) => ({
json: () => Promise.resolve({ error: message }),
status,
}))
vi.doMock('@/app/api/chat/utils', () => ({
addCorsHeaders: mockAddCorsHeaders,
setChatAuthCookie: mockSetChatAuthCookie,
}))
vi.doMock('@/app/api/workflows/utils', () => ({
createSuccessResponse: mockCreateSuccessResponse,
createErrorResponse: mockCreateErrorResponse,
}))
vi.doMock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn().mockReturnValue({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
}),
}))
vi.doMock('zod', () => ({
z: {
object: vi.fn().mockReturnValue({
parse: vi.fn().mockImplementation((data) => data),
}),
string: vi.fn().mockReturnValue({
email: vi.fn().mockReturnThis(),
length: vi.fn().mockReturnThis(),
}),
},
}))
mockGenerateRequestId.mockReturnValue('req-123')
vi.doMock('@/lib/core/utils/request', () => ({
generateRequestId: mockGenerateRequestId,
}))
})
afterEach(() => {
vi.restoreAllMocks()
})
describe('POST - Store OTP (Redis path)', () => {
beforeEach(() => {
storageMethod = 'redis'
})
it('should store OTP in Redis when storage method is redis', async () => {
const { POST } = await import('./route')
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
allowedEmails: [mockEmail],
title: 'Test Chat',
},
]),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'POST',
body: JSON.stringify({ email: mockEmail }),
})
await POST(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisSet).toHaveBeenCalledWith(
`otp:${mockEmail}:${mockChatId}`,
expect.any(String),
'EX',
900 // 15 minutes
)
expect(mockDbInsert).not.toHaveBeenCalled()
})
})
describe('POST - Store OTP (Database path)', () => {
beforeEach(() => {
storageMethod = 'database'
mockGetRedisClient.mockReturnValue(null)
})
it('should store OTP in database when storage method is database', async () => {
const { POST } = await import('./route')
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
allowedEmails: [mockEmail],
title: 'Test Chat',
},
]),
}),
}),
}))
const mockInsertValues = vi.fn().mockResolvedValue(undefined)
mockDbInsert.mockImplementationOnce(() => ({
values: mockInsertValues,
}))
const mockDeleteWhere = vi.fn().mockResolvedValue(undefined)
mockDbDelete.mockImplementation(() => ({
where: mockDeleteWhere,
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'POST',
body: JSON.stringify({ email: mockEmail }),
})
await POST(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockDbDelete).toHaveBeenCalled()
expect(mockDbInsert).toHaveBeenCalled()
expect(mockInsertValues).toHaveBeenCalledWith({
id: expect.any(String),
identifier: `chat-otp:${mockChatId}:${mockEmail}`,
value: expect.any(String),
expiresAt: expect.any(Date),
createdAt: expect.any(Date),
updatedAt: expect.any(Date),
})
expect(mockRedisSet).not.toHaveBeenCalled()
})
})
describe('PUT - Verify OTP (Redis path)', () => {
beforeEach(() => {
storageMethod = 'redis'
mockRedisGet.mockResolvedValue(mockOTP)
})
it('should retrieve OTP from Redis and verify successfully', async () => {
const { PUT } = await import('./route')
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
},
]),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisGet).toHaveBeenCalledWith(`otp:${mockEmail}:${mockChatId}`)
expect(mockRedisDel).toHaveBeenCalledWith(`otp:${mockEmail}:${mockChatId}`)
expect(mockDbSelect).toHaveBeenCalledTimes(1)
})
})
describe('PUT - Verify OTP (Database path)', () => {
beforeEach(() => {
storageMethod = 'database'
mockGetRedisClient.mockReturnValue(null)
})
it('should retrieve OTP from database and verify successfully', async () => {
const { PUT } = await import('./route')
let selectCallCount = 0
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockImplementation(() => {
selectCallCount++
if (selectCallCount === 1) {
return Promise.resolve([
{
id: mockChatId,
authType: 'email',
},
])
}
return Promise.resolve([
{
value: mockOTP,
expiresAt: new Date(Date.now() + 10 * 60 * 1000),
},
])
}),
}),
}),
}))
const mockDeleteWhere = vi.fn().mockResolvedValue(undefined)
mockDbDelete.mockImplementation(() => ({
where: mockDeleteWhere,
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockDbSelect).toHaveBeenCalledTimes(2)
expect(mockDbDelete).toHaveBeenCalled()
expect(mockRedisGet).not.toHaveBeenCalled()
})
it('should reject expired OTP from database', async () => {
const { PUT } = await import('./route')
let selectCallCount = 0
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockImplementation(() => {
selectCallCount++
if (selectCallCount === 1) {
return Promise.resolve([
{
id: mockChatId,
authType: 'email',
},
])
}
return Promise.resolve([])
}),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
'No verification code found, request a new one',
400
)
})
})
describe('DELETE OTP (Redis path)', () => {
beforeEach(() => {
storageMethod = 'redis'
})
it('should delete OTP from Redis after verification', async () => {
const { PUT } = await import('./route')
mockRedisGet.mockResolvedValue(mockOTP)
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
},
]),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisDel).toHaveBeenCalledWith(`otp:${mockEmail}:${mockChatId}`)
expect(mockDbDelete).not.toHaveBeenCalled()
})
})
describe('DELETE OTP (Database path)', () => {
beforeEach(() => {
storageMethod = 'database'
mockGetRedisClient.mockReturnValue(null)
})
it('should delete OTP from database after verification', async () => {
const { PUT } = await import('./route')
let selectCallCount = 0
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockImplementation(() => {
selectCallCount++
if (selectCallCount === 1) {
return Promise.resolve([{ id: mockChatId, authType: 'email' }])
}
return Promise.resolve([
{ value: mockOTP, expiresAt: new Date(Date.now() + 10 * 60 * 1000) },
])
}),
}),
}),
}))
const mockDeleteWhere = vi.fn().mockResolvedValue(undefined)
mockDbDelete.mockImplementation(() => ({
where: mockDeleteWhere,
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockDbDelete).toHaveBeenCalled()
expect(mockRedisDel).not.toHaveBeenCalled()
})
})
describe('Behavior consistency between Redis and Database', () => {
it('should have same behavior for missing OTP in both storage methods', async () => {
storageMethod = 'redis'
mockRedisGet.mockResolvedValue(null)
const { PUT: PUTRedis } = await import('./route')
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ id: mockChatId, authType: 'email' }]),
}),
}),
}))
const requestRedis = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUTRedis(requestRedis, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
'No verification code found, request a new one',
400
)
})
it('should have same OTP expiry time in both storage methods', async () => {
const OTP_EXPIRY = 15 * 60
storageMethod = 'redis'
const { POST: POSTRedis } = await import('./route')
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
allowedEmails: [mockEmail],
title: 'Test Chat',
},
]),
}),
}),
}))
const requestRedis = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'POST',
body: JSON.stringify({ email: mockEmail }),
})
await POSTRedis(requestRedis, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisSet).toHaveBeenCalledWith(
expect.any(String),
expect.any(String),
'EX',
OTP_EXPIRY
)
})
})
})

View File

@@ -1,6 +1,7 @@
import { randomUUID } from 'crypto'
import { db } from '@sim/db'
import { chat } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { chat, verification } from '@sim/db/schema'
import { and, eq, gt } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { z } from 'zod'
import { renderOTPEmail } from '@/components/emails/render-email'
@@ -22,24 +23,11 @@ const OTP_EXPIRY = 15 * 60 // 15 minutes
const OTP_EXPIRY_MS = OTP_EXPIRY * 1000
/**
* In-memory OTP storage for single-instance deployments without Redis.
* Only used when REDIS_URL is not configured (determined once at startup).
*
* Warning: This does NOT work in multi-instance/serverless deployments.
* Stores OTP in Redis or database depending on storage method.
* Uses the verification table for database storage.
*/
const inMemoryOTPStore = new Map<string, { otp: string; expiresAt: number }>()
function cleanupExpiredOTPs() {
const now = Date.now()
for (const [key, value] of inMemoryOTPStore.entries()) {
if (value.expiresAt < now) {
inMemoryOTPStore.delete(key)
}
}
}
async function storeOTP(email: string, chatId: string, otp: string): Promise<void> {
const key = `otp:${email}:${chatId}`
const identifier = `chat-otp:${chatId}:${email}`
const storageMethod = getStorageMethod()
if (storageMethod === 'redis') {
@@ -47,18 +35,28 @@ async function storeOTP(email: string, chatId: string, otp: string): Promise<voi
if (!redis) {
throw new Error('Redis configured but client unavailable')
}
const key = `otp:${email}:${chatId}`
await redis.set(key, otp, 'EX', OTP_EXPIRY)
} else {
cleanupExpiredOTPs()
inMemoryOTPStore.set(key, {
otp,
expiresAt: Date.now() + OTP_EXPIRY_MS,
const now = new Date()
const expiresAt = new Date(now.getTime() + OTP_EXPIRY_MS)
await db.transaction(async (tx) => {
await tx.delete(verification).where(eq(verification.identifier, identifier))
await tx.insert(verification).values({
id: randomUUID(),
identifier,
value: otp,
expiresAt,
createdAt: now,
updatedAt: now,
})
})
}
}
async function getOTP(email: string, chatId: string): Promise<string | null> {
const key = `otp:${email}:${chatId}`
const identifier = `chat-otp:${chatId}:${email}`
const storageMethod = getStorageMethod()
if (storageMethod === 'redis') {
@@ -66,22 +64,27 @@ async function getOTP(email: string, chatId: string): Promise<string | null> {
if (!redis) {
throw new Error('Redis configured but client unavailable')
}
const key = `otp:${email}:${chatId}`
return redis.get(key)
}
const entry = inMemoryOTPStore.get(key)
if (!entry) return null
const now = new Date()
const [record] = await db
.select({
value: verification.value,
expiresAt: verification.expiresAt,
})
.from(verification)
.where(and(eq(verification.identifier, identifier), gt(verification.expiresAt, now)))
.limit(1)
if (entry.expiresAt < Date.now()) {
inMemoryOTPStore.delete(key)
return null
}
if (!record) return null
return entry.otp
return record.value
}
async function deleteOTP(email: string, chatId: string): Promise<void> {
const key = `otp:${email}:${chatId}`
const identifier = `chat-otp:${chatId}:${email}`
const storageMethod = getStorageMethod()
if (storageMethod === 'redis') {
@@ -89,9 +92,10 @@ async function deleteOTP(email: string, chatId: string): Promise<void> {
if (!redis) {
throw new Error('Redis configured but client unavailable')
}
const key = `otp:${email}:${chatId}`
await redis.del(key)
} else {
inMemoryOTPStore.delete(key)
await db.delete(verification).where(eq(verification.identifier, identifier))
}
}

View File

@@ -0,0 +1,361 @@
/**
* Tests for copilot api-keys API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@/app/api/__test-utils__/utils'
describe('Copilot API Keys API Route', () => {
const mockFetch = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
global.fetch = mockFetch
vi.doMock('@/lib/copilot/constants', () => ({
SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com',
}))
vi.doMock('@/lib/core/config/env', () => ({
env: {
SIM_AGENT_API_URL: null,
COPILOT_API_KEY: 'test-api-key',
},
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('GET', () => {
it('should return 401 when user is not authenticated', async () => {
const authMocks = mockAuth()
authMocks.setUnauthenticated()
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return list of API keys with masked values', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const mockApiKeys = [
{
id: 'key-1',
apiKey: 'sk-sim-abcdefghijklmnopqrstuv',
name: 'Production Key',
createdAt: '2024-01-01T00:00:00.000Z',
lastUsed: '2024-01-15T00:00:00.000Z',
},
{
id: 'key-2',
apiKey: 'sk-sim-zyxwvutsrqponmlkjihgfe',
name: null,
createdAt: '2024-01-02T00:00:00.000Z',
lastUsed: null,
},
]
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve(mockApiKeys),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.keys).toHaveLength(2)
expect(responseData.keys[0].id).toBe('key-1')
expect(responseData.keys[0].displayKey).toBe('•••••qrstuv')
expect(responseData.keys[0].name).toBe('Production Key')
expect(responseData.keys[1].displayKey).toBe('•••••jihgfe')
expect(responseData.keys[1].name).toBeNull()
})
it('should return empty array when user has no API keys', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve([]),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.keys).toEqual([])
})
it('should forward userId to Sim Agent', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve([]),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
await GET(request)
expect(mockFetch).toHaveBeenCalledWith(
'https://agent.sim.example.com/api/validate-key/get-api-keys',
expect.objectContaining({
method: 'POST',
headers: expect.objectContaining({
'Content-Type': 'application/json',
'x-api-key': 'test-api-key',
}),
body: JSON.stringify({ userId: 'user-123' }),
})
)
})
it('should return error when Sim Agent returns non-ok response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: false,
status: 503,
json: () => Promise.resolve({ error: 'Service unavailable' }),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(503)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to get keys' })
})
it('should return 500 when Sim Agent returns invalid response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ invalid: 'response' }),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
it('should handle network errors gracefully', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockRejectedValueOnce(new Error('Network error'))
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to get keys' })
})
it('should handle API keys with empty apiKey string', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const mockApiKeys = [
{
id: 'key-1',
apiKey: '',
name: 'Empty Key',
createdAt: '2024-01-01T00:00:00.000Z',
lastUsed: null,
},
]
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve(mockApiKeys),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.keys[0].displayKey).toBe('•••••')
})
it('should handle JSON parsing errors from Sim Agent', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.reject(new Error('Invalid JSON')),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
})
describe('DELETE', () => {
it('should return 401 when user is not authenticated', async () => {
const authMocks = mockAuth()
authMocks.setUnauthenticated()
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return 400 when id parameter is missing', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await DELETE(request)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'id is required' })
})
it('should successfully delete an API key', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: true }),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
expect(mockFetch).toHaveBeenCalledWith(
'https://agent.sim.example.com/api/validate-key/delete',
expect.objectContaining({
method: 'POST',
headers: expect.objectContaining({
'Content-Type': 'application/json',
'x-api-key': 'test-api-key',
}),
body: JSON.stringify({ userId: 'user-123', apiKeyId: 'key-123' }),
})
)
})
it('should return error when Sim Agent returns non-ok response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: false,
status: 404,
json: () => Promise.resolve({ error: 'Key not found' }),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=non-existent')
const response = await DELETE(request)
expect(response.status).toBe(404)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to delete key' })
})
it('should return 500 when Sim Agent returns invalid response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: false }),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
it('should handle network errors gracefully', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockRejectedValueOnce(new Error('Network error'))
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to delete key' })
})
it('should handle JSON parsing errors from Sim Agent on delete', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.reject(new Error('Invalid JSON')),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
})
})

View File

@@ -0,0 +1,189 @@
/**
* Tests for copilot chat delete API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Chat Delete API Route', () => {
const mockDelete = vi.fn()
const mockWhere = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
mockDelete.mockReturnValue({ where: mockWhere })
mockWhere.mockResolvedValue([])
vi.doMock('@sim/db', () => ({
db: {
delete: mockDelete,
},
}))
vi.doMock('@sim/db/schema', () => ({
copilotChats: {
id: 'id',
userId: 'userId',
},
}))
vi.doMock('drizzle-orm', () => ({
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('DELETE', () => {
it('should return 401 when user is not authenticated', async () => {
const authMocks = mockAuth()
authMocks.setUnauthenticated()
const req = createMockRequest('DELETE', {
chatId: 'chat-123',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Unauthorized' })
})
it('should successfully delete a chat', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockWhere.mockResolvedValueOnce([{ id: 'chat-123' }])
const req = createMockRequest('DELETE', {
chatId: 'chat-123',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
expect(mockDelete).toHaveBeenCalled()
expect(mockWhere).toHaveBeenCalled()
})
it('should return 500 for invalid request body - missing chatId', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = createMockRequest('DELETE', {})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to delete chat')
})
it('should return 500 for invalid request body - chatId is not a string', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = createMockRequest('DELETE', {
chatId: 12345,
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to delete chat')
})
it('should handle database errors gracefully', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockWhere.mockRejectedValueOnce(new Error('Database connection failed'))
const req = createMockRequest('DELETE', {
chatId: 'chat-123',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Failed to delete chat' })
})
it('should handle JSON parsing errors in request body', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = new NextRequest('http://localhost:3000/api/copilot/chat/delete', {
method: 'DELETE',
body: '{invalid-json',
headers: {
'Content-Type': 'application/json',
},
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to delete chat')
})
it('should delete chat even if it does not exist (idempotent)', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockWhere.mockResolvedValueOnce([])
const req = createMockRequest('DELETE', {
chatId: 'non-existent-chat',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
})
it('should delete chat with empty string chatId (validation should fail)', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = createMockRequest('DELETE', {
chatId: '',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(200)
expect(mockDelete).toHaveBeenCalled()
})
})
})

View File

@@ -1066,7 +1066,6 @@ export async function GET(req: NextRequest) {
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
previewYaml: null, // Not needed for chat list
planArtifact: chat.planArtifact || null,
config: chat.config || null,
createdAt: chat.createdAt,

View File

@@ -0,0 +1,277 @@
/**
* Tests for copilot chats list API route
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { mockCryptoUuid, setupCommonApiMocks } from '@/app/api/__test-utils__/utils'
describe('Copilot Chats List API Route', () => {
const mockSelect = vi.fn()
const mockFrom = vi.fn()
const mockWhere = vi.fn()
const mockOrderBy = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
mockSelect.mockReturnValue({ from: mockFrom })
mockFrom.mockReturnValue({ where: mockWhere })
mockWhere.mockReturnValue({ orderBy: mockOrderBy })
mockOrderBy.mockResolvedValue([])
vi.doMock('@sim/db', () => ({
db: {
select: mockSelect,
},
}))
vi.doMock('@sim/db/schema', () => ({
copilotChats: {
id: 'id',
title: 'title',
workflowId: 'workflowId',
userId: 'userId',
updatedAt: 'updatedAt',
},
}))
vi.doMock('drizzle-orm', () => ({
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
desc: vi.fn((field) => ({ field, type: 'desc' })),
}))
vi.doMock('@/lib/copilot/request-helpers', () => ({
authenticateCopilotRequestSessionOnly: vi.fn(),
createUnauthorizedResponse: vi
.fn()
.mockReturnValue(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })),
createInternalServerErrorResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 500 })
),
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('GET', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return empty chats array when user has no chats', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockOrderBy.mockResolvedValueOnce([])
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({
success: true,
chats: [],
})
})
it('should return list of chats for authenticated user', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'chat-1',
title: 'First Chat',
workflowId: 'workflow-1',
updatedAt: new Date('2024-01-02'),
},
{
id: 'chat-2',
title: 'Second Chat',
workflowId: 'workflow-2',
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.chats).toHaveLength(2)
expect(responseData.chats[0].id).toBe('chat-1')
expect(responseData.chats[0].title).toBe('First Chat')
expect(responseData.chats[1].id).toBe('chat-2')
})
it('should return chats ordered by updatedAt descending', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'newest-chat',
title: 'Newest',
workflowId: 'workflow-1',
updatedAt: new Date('2024-01-10'),
},
{
id: 'older-chat',
title: 'Older',
workflowId: 'workflow-2',
updatedAt: new Date('2024-01-05'),
},
{
id: 'oldest-chat',
title: 'Oldest',
workflowId: 'workflow-3',
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.chats[0].id).toBe('newest-chat')
expect(responseData.chats[2].id).toBe('oldest-chat')
})
it('should handle chats with null workflowId', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'chat-no-workflow',
title: 'Chat without workflow',
workflowId: null,
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.chats[0].workflowId).toBeNull()
})
it('should handle database errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockOrderBy.mockRejectedValueOnce(new Error('Database connection failed'))
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to fetch user chats')
})
it('should only return chats belonging to authenticated user', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'my-chat',
title: 'My Chat',
workflowId: 'workflow-1',
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
await GET(request as any)
expect(mockSelect).toHaveBeenCalled()
expect(mockWhere).toHaveBeenCalled()
})
it('should return 401 when userId is null despite isAuthenticated being true', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: true,
})
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(401)
})
})
})

View File

@@ -10,9 +10,9 @@ import {
createRequestTracker,
createUnauthorizedResponse,
} from '@/lib/copilot/request-helpers'
import { validateUUID } from '@/lib/core/security/input-validation'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { createLogger } from '@/lib/logs/console/logger'
import { isUuidV4 } from '@/executor/constants'
const logger = createLogger('CheckpointRevertAPI')
@@ -87,9 +87,8 @@ export async function POST(request: NextRequest) {
isDeployed: cleanedState.isDeployed,
})
const workflowIdValidation = validateUUID(checkpoint.workflowId, 'workflowId')
if (!workflowIdValidation.isValid) {
logger.error(`[${tracker.requestId}] Invalid workflow ID: ${workflowIdValidation.error}`)
if (!isUuidV4(checkpoint.workflowId)) {
logger.error(`[${tracker.requestId}] Invalid workflow ID format`)
return NextResponse.json({ error: 'Invalid workflow ID format' }, { status: 400 })
}

View File

@@ -14,6 +14,8 @@ import { generateRequestId } from '@/lib/core/utils/request'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { REFERENCE } from '@/executor/constants'
import { createEnvVarPattern } from '@/executor/utils/reference-validation'
import { executeTool } from '@/tools'
import { getTool } from '@/tools/utils'
@@ -33,14 +35,18 @@ const ExecuteToolSchema = z.object({
function resolveEnvVarReferences(value: any, envVars: Record<string, string>): any {
if (typeof value === 'string') {
// Check for exact match: entire string is "{{VAR_NAME}}"
const exactMatch = /^\{\{([^}]+)\}\}$/.exec(value)
const exactMatchPattern = new RegExp(
`^\\${REFERENCE.ENV_VAR_START}([^}]+)\\${REFERENCE.ENV_VAR_END}$`
)
const exactMatch = exactMatchPattern.exec(value)
if (exactMatch) {
const envVarName = exactMatch[1].trim()
return envVars[envVarName] ?? value
}
// Check for embedded references: "prefix {{VAR}} suffix"
return value.replace(/\{\{([^}]+)\}\}/g, (match, varName) => {
const envVarPattern = createEnvVarPattern()
return value.replace(envVarPattern, (match, varName) => {
const trimmedName = varName.trim()
return envVars[trimmedName] ?? match
})

View File

@@ -0,0 +1,516 @@
/**
* Tests for copilot feedback API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Feedback API Route', () => {
const mockInsert = vi.fn()
const mockValues = vi.fn()
const mockReturning = vi.fn()
const mockSelect = vi.fn()
const mockFrom = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
mockInsert.mockReturnValue({ values: mockValues })
mockValues.mockReturnValue({ returning: mockReturning })
mockReturning.mockResolvedValue([])
mockSelect.mockReturnValue({ from: mockFrom })
mockFrom.mockResolvedValue([])
vi.doMock('@sim/db', () => ({
db: {
insert: mockInsert,
select: mockSelect,
},
}))
vi.doMock('@sim/db/schema', () => ({
copilotFeedback: {
feedbackId: 'feedbackId',
userId: 'userId',
chatId: 'chatId',
userQuery: 'userQuery',
agentResponse: 'agentResponse',
isPositive: 'isPositive',
feedback: 'feedback',
workflowYaml: 'workflowYaml',
createdAt: 'createdAt',
},
}))
vi.doMock('drizzle-orm', () => ({
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
}))
vi.doMock('@/lib/copilot/request-helpers', () => ({
authenticateCopilotRequestSessionOnly: vi.fn(),
createUnauthorizedResponse: vi
.fn()
.mockReturnValue(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })),
createBadRequestResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 400 })
),
createInternalServerErrorResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 500 })
),
createRequestTracker: vi.fn().mockReturnValue({
requestId: 'test-request-id',
getDuration: vi.fn().mockReturnValue(100),
}),
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('POST', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should successfully submit positive feedback', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const feedbackRecord = {
feedbackId: 'feedback-123',
userId: 'user-123',
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositive: true,
feedback: null,
workflowYaml: null,
createdAt: new Date('2024-01-01'),
}
mockReturning.mockResolvedValueOnce([feedbackRecord])
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedbackId).toBe('feedback-123')
expect(responseData.message).toBe('Feedback submitted successfully')
})
it('should successfully submit negative feedback with text', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const feedbackRecord = {
feedbackId: 'feedback-456',
userId: 'user-123',
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I deploy?',
agentResponse: 'Here is how to deploy...',
isPositive: false,
feedback: 'The response was not helpful',
workflowYaml: null,
createdAt: new Date('2024-01-01'),
}
mockReturning.mockResolvedValueOnce([feedbackRecord])
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I deploy?',
agentResponse: 'Here is how to deploy...',
isPositiveFeedback: false,
feedback: 'The response was not helpful',
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedbackId).toBe('feedback-456')
})
it('should successfully submit feedback with workflow YAML', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const workflowYaml = `
blocks:
- id: starter
type: starter
- id: agent
type: agent
edges:
- source: starter
target: agent
`
const feedbackRecord = {
feedbackId: 'feedback-789',
userId: 'user-123',
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'Build a simple agent workflow',
agentResponse: 'I created a workflow for you.',
isPositive: true,
feedback: null,
workflowYaml: workflowYaml,
createdAt: new Date('2024-01-01'),
}
mockReturning.mockResolvedValueOnce([feedbackRecord])
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'Build a simple agent workflow',
agentResponse: 'I created a workflow for you.',
isPositiveFeedback: true,
workflowYaml: workflowYaml,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(mockValues).toHaveBeenCalledWith(
expect.objectContaining({
workflowYaml: workflowYaml,
})
)
})
it('should return 400 for invalid chatId format', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: 'not-a-uuid',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should return 400 for empty userQuery', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: '',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should return 400 for empty agentResponse', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: '',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should return 400 for missing isPositiveFeedback', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should handle database errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockReturning.mockRejectedValueOnce(new Error('Database connection failed'))
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to submit feedback')
})
it('should handle JSON parsing errors in request body', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = new NextRequest('http://localhost:3000/api/copilot/feedback', {
method: 'POST',
body: '{invalid-json',
headers: {
'Content-Type': 'application/json',
},
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(500)
})
})
describe('GET', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return empty feedback array when no feedback exists', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFrom.mockResolvedValueOnce([])
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedback).toEqual([])
})
it('should return all feedback records', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockFeedback = [
{
feedbackId: 'feedback-1',
userId: 'user-123',
chatId: 'chat-1',
userQuery: 'Query 1',
agentResponse: 'Response 1',
isPositive: true,
feedback: null,
workflowYaml: null,
createdAt: new Date('2024-01-01'),
},
{
feedbackId: 'feedback-2',
userId: 'user-456',
chatId: 'chat-2',
userQuery: 'Query 2',
agentResponse: 'Response 2',
isPositive: false,
feedback: 'Not helpful',
workflowYaml: 'yaml: content',
createdAt: new Date('2024-01-02'),
},
]
mockFrom.mockResolvedValueOnce(mockFeedback)
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedback).toHaveLength(2)
expect(responseData.feedback[0].feedbackId).toBe('feedback-1')
expect(responseData.feedback[1].feedbackId).toBe('feedback-2')
})
it('should handle database errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFrom.mockRejectedValueOnce(new Error('Database connection failed'))
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to retrieve feedback')
})
it('should return metadata with response', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFrom.mockResolvedValueOnce([])
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.metadata).toBeDefined()
expect(responseData.metadata.requestId).toBeDefined()
expect(responseData.metadata.duration).toBeDefined()
})
})
})

View File

@@ -0,0 +1,367 @@
/**
* Tests for copilot stats API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Stats API Route', () => {
const mockFetch = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
global.fetch = mockFetch
vi.doMock('@/lib/copilot/request-helpers', () => ({
authenticateCopilotRequestSessionOnly: vi.fn(),
createUnauthorizedResponse: vi
.fn()
.mockReturnValue(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })),
createBadRequestResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 400 })
),
createInternalServerErrorResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 500 })
),
createRequestTracker: vi.fn().mockReturnValue({
requestId: 'test-request-id',
getDuration: vi.fn().mockReturnValue(100),
}),
}))
vi.doMock('@/lib/copilot/constants', () => ({
SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com',
}))
vi.doMock('@/lib/core/config/env', () => ({
env: {
SIM_AGENT_API_URL: null,
COPILOT_API_KEY: 'test-api-key',
},
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('POST', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should successfully forward stats to Sim Agent', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: true }),
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: true,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
expect(mockFetch).toHaveBeenCalledWith(
'https://agent.sim.example.com/api/stats',
expect.objectContaining({
method: 'POST',
headers: expect.objectContaining({
'Content-Type': 'application/json',
'x-api-key': 'test-api-key',
}),
body: JSON.stringify({
messageId: 'message-123',
diffCreated: true,
diffAccepted: true,
}),
})
)
})
it('should return 400 for invalid request body - missing messageId', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should return 400 for invalid request body - missing diffCreated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should return 400 for invalid request body - missing diffAccepted', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should return 400 when upstream Sim Agent returns error', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: false,
json: () => Promise.resolve({ error: 'Invalid message ID' }),
})
const req = createMockRequest('POST', {
messageId: 'invalid-message',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Invalid message ID' })
})
it('should handle upstream error with message field', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: false,
json: () => Promise.resolve({ message: 'Rate limit exceeded' }),
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Rate limit exceeded' })
})
it('should handle upstream error with no JSON response', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: false,
json: () => Promise.reject(new Error('Not JSON')),
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Upstream error' })
})
it('should handle network errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockRejectedValueOnce(new Error('Network error'))
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to forward copilot stats')
})
it('should handle JSON parsing errors in request body', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = new NextRequest('http://localhost:3000/api/copilot/stats', {
method: 'POST',
body: '{invalid-json',
headers: {
'Content-Type': 'application/json',
},
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should forward stats with diffCreated=false and diffAccepted=false', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: true }),
})
const req = createMockRequest('POST', {
messageId: 'message-456',
diffCreated: false,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(200)
expect(mockFetch).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
body: JSON.stringify({
messageId: 'message-456',
diffCreated: false,
diffAccepted: false,
}),
})
)
})
})
})

View File

@@ -14,6 +14,7 @@ import type { StorageConfig } from '@/lib/uploads/core/storage-client'
import { getFileMetadataByKey } from '@/lib/uploads/server/metadata'
import { inferContextFromKey } from '@/lib/uploads/utils/file-utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import { isUuid } from '@/executor/constants'
const logger = createLogger('FileAuthorization')
@@ -85,9 +86,7 @@ function extractWorkspaceIdFromKey(key: string): string | null {
const parts = key.split('/')
const workspaceId = parts[0]
// Validate UUID format
const UUID_PATTERN = /^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$/i
if (workspaceId && UUID_PATTERN.test(workspaceId)) {
if (workspaceId && isUuid(workspaceId)) {
return workspaceId
}

View File

@@ -1,5 +1,6 @@
import { type NextRequest, NextResponse } from 'next/server'
import { createLogger } from '@/lib/logs/console/logger'
import { sanitizeFileName } from '@/executor/constants'
import '@/lib/uploads/core/setup.server'
import { getSession } from '@/lib/auth'
import type { StorageContext } from '@/lib/uploads/config'
@@ -154,7 +155,7 @@ export async function POST(request: NextRequest) {
logger.info(`Uploading knowledge-base file: ${originalName}`)
const timestamp = Date.now()
const safeFileName = originalName.replace(/\s+/g, '-')
const safeFileName = sanitizeFileName(originalName)
const storageKey = `kb/${timestamp}-${safeFileName}`
const metadata: Record<string, string> = {
@@ -267,9 +268,8 @@ export async function POST(request: NextRequest) {
logger.info(`Uploading ${context} file: ${originalName}`)
// Generate storage key with context prefix and timestamp to ensure uniqueness
const timestamp = Date.now()
const safeFileName = originalName.replace(/\s+/g, '-')
const safeFileName = sanitizeFileName(originalName)
const storageKey = `${context}/${timestamp}-${safeFileName}`
const metadata: Record<string, string> = {

View File

@@ -5,6 +5,7 @@ import { executeInE2B } from '@/lib/execution/e2b'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
import { createLogger } from '@/lib/logs/console/logger'
import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
import {
createEnvVarPattern,
createWorkflowVariablePattern,
@@ -405,7 +406,7 @@ function resolveWorkflowVariables(
// Find the variable by name (workflowVariables is indexed by ID, values are variable objects)
const foundVariable = Object.entries(workflowVariables).find(
([_, variable]) => (variable.name || '').replace(/\s+/g, '') === variableName
([_, variable]) => normalizeName(variable.name || '') === variableName
)
let variableValue: unknown = ''
@@ -513,31 +514,26 @@ function resolveTagVariables(
): string {
let resolvedCode = code
const tagMatches = resolvedCode.match(/<([a-zA-Z_][a-zA-Z0-9_.]*[a-zA-Z0-9_])>/g) || []
const tagPattern = new RegExp(
`${REFERENCE.START}([a-zA-Z_][a-zA-Z0-9_${REFERENCE.PATH_DELIMITER}]*[a-zA-Z0-9_])${REFERENCE.END}`,
'g'
)
const tagMatches = resolvedCode.match(tagPattern) || []
for (const match of tagMatches) {
const tagName = match.slice(1, -1).trim()
const tagName = match.slice(REFERENCE.START.length, -REFERENCE.END.length).trim()
// Handle nested paths like "getrecord.response.data" or "function1.response.result"
// First try params, then blockData directly, then try with block name mapping
let tagValue = getNestedValue(params, tagName) || getNestedValue(blockData, tagName) || ''
// If not found and the path starts with a block name, try mapping the block name to ID
if (!tagValue && tagName.includes('.')) {
const pathParts = tagName.split('.')
if (!tagValue && tagName.includes(REFERENCE.PATH_DELIMITER)) {
const pathParts = tagName.split(REFERENCE.PATH_DELIMITER)
const normalizedBlockName = pathParts[0] // This should already be normalized like "function1"
// Find the block ID by looking for a block name that normalizes to this value
let blockId = null
for (const [blockName, id] of Object.entries(blockNameMapping)) {
// Apply the same normalization logic as the UI: remove spaces and lowercase
const normalizedName = blockName.replace(/\s+/g, '').toLowerCase()
if (normalizedName === normalizedBlockName) {
blockId = id
break
}
}
// Direct lookup using normalized block name
const blockId = blockNameMapping[normalizedBlockName] ?? null
if (blockId) {
const remainingPath = pathParts.slice(1).join('.')
@@ -617,13 +613,6 @@ function getNestedValue(obj: any, path: string): any {
}, obj)
}
/**
* Escape special regex characters in a string
*/
function escapeRegExp(string: string): string {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
}
/**
* Remove one trailing newline from stdout
* This handles the common case where print() or console.log() adds a trailing \n

View File

@@ -31,7 +31,7 @@ export async function GET(
const payload = run.payload as any
if (payload?.workflowId) {
const { verifyWorkflowAccess } = await import('@/socket-server/middleware/permissions')
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
const accessCheck = await verifyWorkflowAccess(authenticatedUserId, payload.workflowId)
if (!accessCheck.hasAccess) {
logger.warn(`[${requestId}] User ${authenticatedUserId} denied access to task ${taskId}`, {

View File

@@ -100,7 +100,12 @@ export async function PUT(
try {
const validatedData = UpdateChunkSchema.parse(body)
const updatedChunk = await updateChunk(chunkId, validatedData, requestId)
const updatedChunk = await updateChunk(
chunkId,
validatedData,
requestId,
accessCheck.knowledgeBase?.workspaceId
)
logger.info(
`[${requestId}] Chunk updated: ${chunkId} in document ${documentId} in knowledge base ${knowledgeBaseId}`

View File

@@ -184,7 +184,8 @@ export async function POST(
documentId,
docTags,
validatedData,
requestId
requestId,
accessCheck.knowledgeBase?.workspaceId
)
let cost = null

View File

@@ -183,11 +183,11 @@ export async function POST(request: NextRequest) {
)
}
// Generate query embedding only if query is provided
const workspaceId = accessChecks.find((ac) => ac?.hasAccess)?.knowledgeBase?.workspaceId
const hasQuery = validatedData.query && validatedData.query.trim().length > 0
// Start embedding generation early and await when needed
const queryEmbeddingPromise = hasQuery
? generateSearchEmbedding(validatedData.query!)
? generateSearchEmbedding(validatedData.query!, undefined, workspaceId)
: Promise.resolve(null)
// Check if any requested knowledge bases were not accessible

View File

@@ -99,7 +99,7 @@ export interface EmbeddingData {
export interface KnowledgeBaseAccessResult {
hasAccess: true
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId'>
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId' | 'workspaceId'>
}
export interface KnowledgeBaseAccessDenied {
@@ -113,7 +113,7 @@ export type KnowledgeBaseAccessCheck = KnowledgeBaseAccessResult | KnowledgeBase
export interface DocumentAccessResult {
hasAccess: true
document: DocumentData
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId'>
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId' | 'workspaceId'>
}
export interface DocumentAccessDenied {
@@ -128,7 +128,7 @@ export interface ChunkAccessResult {
hasAccess: true
chunk: EmbeddingData
document: DocumentData
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId'>
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId' | 'workspaceId'>
}
export interface ChunkAccessDenied {

View File

@@ -1,28 +1,15 @@
import { db } from '@sim/db'
import { permissions, workflow, workflowExecutionLogs } from '@sim/db/schema'
import { and, desc, eq, gte, inArray, lte, type SQL, sql } from 'drizzle-orm'
import { and, desc, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console/logger'
import { buildFilterConditions, LogFilterParamsSchema } from '@/lib/logs/filters'
const logger = createLogger('LogsExportAPI')
export const revalidate = 0
const ExportParamsSchema = z.object({
level: z.string().optional(),
workflowIds: z.string().optional(),
folderIds: z.string().optional(),
triggers: z.string().optional(),
startDate: z.string().optional(),
endDate: z.string().optional(),
search: z.string().optional(),
workflowName: z.string().optional(),
folderName: z.string().optional(),
workspaceId: z.string(),
})
function escapeCsv(value: any): string {
if (value === null || value === undefined) return ''
const str = String(value)
@@ -41,7 +28,7 @@ export async function GET(request: NextRequest) {
const userId = session.user.id
const { searchParams } = new URL(request.url)
const params = ExportParamsSchema.parse(Object.fromEntries(searchParams.entries()))
const params = LogFilterParamsSchema.parse(Object.fromEntries(searchParams.entries()))
const selectColumns = {
id: workflowExecutionLogs.id,
@@ -57,53 +44,11 @@ export async function GET(request: NextRequest) {
workflowName: workflow.name,
}
let conditions: SQL | undefined = eq(workflowExecutionLogs.workspaceId, params.workspaceId)
if (params.level && params.level !== 'all') {
const levels = params.level.split(',').filter(Boolean)
if (levels.length === 1) {
conditions = and(conditions, eq(workflowExecutionLogs.level, levels[0]))
} else if (levels.length > 1) {
conditions = and(conditions, inArray(workflowExecutionLogs.level, levels))
}
}
if (params.workflowIds) {
const workflowIds = params.workflowIds.split(',').filter(Boolean)
if (workflowIds.length > 0) conditions = and(conditions, inArray(workflow.id, workflowIds))
}
if (params.folderIds) {
const folderIds = params.folderIds.split(',').filter(Boolean)
if (folderIds.length > 0) conditions = and(conditions, inArray(workflow.folderId, folderIds))
}
if (params.triggers) {
const triggers = params.triggers.split(',').filter(Boolean)
if (triggers.length > 0 && !triggers.includes('all')) {
conditions = and(conditions, inArray(workflowExecutionLogs.trigger, triggers))
}
}
if (params.startDate) {
conditions = and(conditions, gte(workflowExecutionLogs.startedAt, new Date(params.startDate)))
}
if (params.endDate) {
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
}
if (params.search) {
const term = `%${params.search}%`
conditions = and(conditions, sql`${workflowExecutionLogs.executionId} ILIKE ${term}`)
}
if (params.workflowName) {
const nameTerm = `%${params.workflowName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${nameTerm}`)
}
if (params.folderName) {
const folderTerm = `%${params.folderName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${folderTerm}`)
}
const workspaceCondition = eq(workflowExecutionLogs.workspaceId, params.workspaceId)
const filterConditions = buildFilterConditions(params)
const conditions = filterConditions
? and(workspaceCondition, filterConditions)
: workspaceCondition
const header = [
'startedAt',

View File

@@ -6,51 +6,22 @@ import {
workflowDeploymentVersion,
workflowExecutionLogs,
} from '@sim/db/schema'
import {
and,
desc,
eq,
gt,
gte,
inArray,
isNotNull,
isNull,
lt,
lte,
ne,
or,
type SQL,
sql,
} from 'drizzle-orm'
import { and, desc, eq, isNotNull, isNull, or, type SQL, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { buildFilterConditions, LogFilterParamsSchema } from '@/lib/logs/filters'
const logger = createLogger('LogsAPI')
export const revalidate = 0
const QueryParamsSchema = z.object({
const QueryParamsSchema = LogFilterParamsSchema.extend({
details: z.enum(['basic', 'full']).optional().default('basic'),
limit: z.coerce.number().optional().default(100),
offset: z.coerce.number().optional().default(0),
level: z.string().optional(),
workflowIds: z.string().optional(),
folderIds: z.string().optional(),
triggers: z.string().optional(),
startDate: z.string().optional(),
endDate: z.string().optional(),
search: z.string().optional(),
workflowName: z.string().optional(),
folderName: z.string().optional(),
executionId: z.string().optional(),
costOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
costValue: z.coerce.number().optional(),
durationOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
durationValue: z.coerce.number().optional(),
workspaceId: z.string(),
})
export async function GET(request: NextRequest) {
@@ -78,6 +49,7 @@ export async function GET(request: NextRequest) {
stateSnapshotId: workflowExecutionLogs.stateSnapshotId,
deploymentVersionId: workflowExecutionLogs.deploymentVersionId,
level: workflowExecutionLogs.level,
status: workflowExecutionLogs.status,
trigger: workflowExecutionLogs.trigger,
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
@@ -107,6 +79,7 @@ export async function GET(request: NextRequest) {
stateSnapshotId: workflowExecutionLogs.stateSnapshotId,
deploymentVersionId: workflowExecutionLogs.deploymentVersionId,
level: workflowExecutionLogs.level,
status: workflowExecutionLogs.status,
trigger: workflowExecutionLogs.trigger,
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
@@ -197,102 +170,11 @@ export async function GET(request: NextRequest) {
}
}
if (params.workflowIds) {
const workflowIds = params.workflowIds.split(',').filter(Boolean)
if (workflowIds.length > 0) {
conditions = and(conditions, inArray(workflow.id, workflowIds))
}
}
if (params.folderIds) {
const folderIds = params.folderIds.split(',').filter(Boolean)
if (folderIds.length > 0) {
conditions = and(conditions, inArray(workflow.folderId, folderIds))
}
}
if (params.triggers) {
const triggers = params.triggers.split(',').filter(Boolean)
if (triggers.length > 0 && !triggers.includes('all')) {
conditions = and(conditions, inArray(workflowExecutionLogs.trigger, triggers))
}
}
if (params.startDate) {
conditions = and(
conditions,
gte(workflowExecutionLogs.startedAt, new Date(params.startDate))
)
}
if (params.endDate) {
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
}
if (params.search) {
const searchTerm = `%${params.search}%`
conditions = and(conditions, sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`)
}
if (params.workflowName) {
const nameTerm = `%${params.workflowName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${nameTerm}`)
}
if (params.folderName) {
const folderTerm = `%${params.folderName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${folderTerm}`)
}
if (params.executionId) {
conditions = and(conditions, eq(workflowExecutionLogs.executionId, params.executionId))
}
if (params.costOperator && params.costValue !== undefined) {
const costField = sql`(${workflowExecutionLogs.cost}->>'total')::numeric`
switch (params.costOperator) {
case '=':
conditions = and(conditions, sql`${costField} = ${params.costValue}`)
break
case '>':
conditions = and(conditions, sql`${costField} > ${params.costValue}`)
break
case '<':
conditions = and(conditions, sql`${costField} < ${params.costValue}`)
break
case '>=':
conditions = and(conditions, sql`${costField} >= ${params.costValue}`)
break
case '<=':
conditions = and(conditions, sql`${costField} <= ${params.costValue}`)
break
case '!=':
conditions = and(conditions, sql`${costField} != ${params.costValue}`)
break
}
}
if (params.durationOperator && params.durationValue !== undefined) {
const durationField = workflowExecutionLogs.totalDurationMs
switch (params.durationOperator) {
case '=':
conditions = and(conditions, eq(durationField, params.durationValue))
break
case '>':
conditions = and(conditions, gt(durationField, params.durationValue))
break
case '<':
conditions = and(conditions, lt(durationField, params.durationValue))
break
case '>=':
conditions = and(conditions, gte(durationField, params.durationValue))
break
case '<=':
conditions = and(conditions, lte(durationField, params.durationValue))
break
case '!=':
conditions = and(conditions, ne(durationField, params.durationValue))
break
}
// Apply common filters (workflowIds, folderIds, triggers, dates, search, cost, duration)
// Level filtering is handled above with advanced running/pending state logic
const commonFilters = buildFilterConditions(params, { useSimpleLevelFilter: false })
if (commonFilters) {
conditions = and(conditions, commonFilters)
}
const logs = await baseQuery
@@ -379,15 +261,16 @@ export async function GET(request: NextRequest) {
input: 0,
output: 0,
total: 0,
tokens: { prompt: 0, completion: 0, total: 0 },
tokens: { input: 0, output: 0, total: 0 },
})
}
const modelCost = models.get(block.cost.model)
modelCost.input += Number(block.cost.input) || 0
modelCost.output += Number(block.cost.output) || 0
modelCost.total += Number(block.cost.total) || 0
modelCost.tokens.prompt += block.cost.tokens?.prompt || 0
modelCost.tokens.completion += block.cost.tokens?.completion || 0
modelCost.tokens.input += block.cost.tokens?.input || block.cost.tokens?.prompt || 0
modelCost.tokens.output +=
block.cost.tokens?.output || block.cost.tokens?.completion || 0
modelCost.tokens.total += block.cost.tokens?.total || 0
}
}
@@ -399,8 +282,8 @@ export async function GET(request: NextRequest) {
output: totalOutputCost,
tokens: {
total: totalTokens,
prompt: totalPromptTokens,
completion: totalCompletionTokens,
input: totalPromptTokens,
output: totalCompletionTokens,
},
models: Object.fromEntries(models),
}
@@ -451,6 +334,7 @@ export async function GET(request: NextRequest) {
deploymentVersion: log.deploymentVersion ?? null,
deploymentVersionName: log.deploymentVersionName ?? null,
level: log.level,
status: log.status,
duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null,
trigger: log.trigger,
createdAt: log.startedAt.toISOString(),

View File

@@ -6,6 +6,8 @@ import { getParsedBody, withMcpAuth } from '@/lib/mcp/middleware'
import type { McpServerConfig, McpTransport } from '@/lib/mcp/types'
import { validateMcpServerUrl } from '@/lib/mcp/url-validator'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
import { REFERENCE } from '@/executor/constants'
import { createEnvVarPattern } from '@/executor/utils/reference-validation'
const logger = createLogger('McpServerTestAPI')
@@ -23,12 +25,13 @@ function isUrlBasedTransport(transport: McpTransport): boolean {
* Resolve environment variables in strings
*/
function resolveEnvVars(value: string, envVars: Record<string, string>): string {
const envMatches = value.match(/\{\{([^}]+)\}\}/g)
const envVarPattern = createEnvVarPattern()
const envMatches = value.match(envVarPattern)
if (!envMatches) return value
let resolvedValue = value
for (const match of envMatches) {
const envKey = match.slice(2, -2).trim()
const envKey = match.slice(REFERENCE.ENV_VAR_START.length, -REFERENCE.ENV_VAR_END.length).trim()
const envValue = envVars[envKey]
if (envValue === undefined) {

View File

@@ -1,9 +1,12 @@
import { db } from '@sim/db'
import { account } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import type { StreamingExecution } from '@/executor/types'
import { executeProviderRequest } from '@/providers'
import { getApiKey } from '@/providers/utils'
const logger = createLogger('ProvidersAPI')
@@ -37,6 +40,7 @@ export async function POST(request: NextRequest) {
azureApiVersion,
vertexProject,
vertexLocation,
vertexCredential,
responseFormat,
workflowId,
workspaceId,
@@ -62,6 +66,7 @@ export async function POST(request: NextRequest) {
hasAzureApiVersion: !!azureApiVersion,
hasVertexProject: !!vertexProject,
hasVertexLocation: !!vertexLocation,
hasVertexCredential: !!vertexCredential,
hasResponseFormat: !!responseFormat,
workflowId,
stream: !!stream,
@@ -74,18 +79,20 @@ export async function POST(request: NextRequest) {
verbosity,
})
let finalApiKey: string
let finalApiKey: string | undefined = apiKey
try {
finalApiKey = getApiKey(provider, model, apiKey)
if (provider === 'vertex' && vertexCredential) {
finalApiKey = await resolveVertexCredential(requestId, vertexCredential)
}
} catch (error) {
logger.error(`[${requestId}] Failed to get API key:`, {
logger.error(`[${requestId}] Failed to resolve Vertex credential:`, {
provider,
model,
error: error instanceof Error ? error.message : String(error),
hasProvidedApiKey: !!apiKey,
hasVertexCredential: !!vertexCredential,
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'API key error' },
{ error: error instanceof Error ? error.message : 'Credential error' },
{ status: 400 }
)
}
@@ -97,7 +104,6 @@ export async function POST(request: NextRequest) {
hasApiKey: !!finalApiKey,
})
// Execute provider request directly with the managed key
const response = await executeProviderRequest(provider, {
model,
systemPrompt,
@@ -165,8 +171,8 @@ export async function POST(request: NextRequest) {
: '',
model: executionData.output?.model,
tokens: executionData.output?.tokens || {
prompt: 0,
completion: 0,
input: 0,
output: 0,
total: 0,
},
// Sanitize any potential Unicode characters in tool calls
@@ -324,3 +330,27 @@ function sanitizeObject(obj: any): any {
return result
}
/**
* Resolves a Vertex AI OAuth credential to an access token
*/
async function resolveVertexCredential(requestId: string, credentialId: string): Promise<string> {
logger.info(`[${requestId}] Resolving Vertex AI credential: ${credentialId}`)
const credential = await db.query.account.findFirst({
where: eq(account.id, credentialId),
})
if (!credential) {
throw new Error(`Vertex AI credential not found: ${credentialId}`)
}
const { accessToken } = await refreshTokenIfNeeded(requestId, credential, credentialId)
if (!accessToken) {
throw new Error('Failed to get Vertex AI access token')
}
logger.info(`[${requestId}] Successfully resolved Vertex AI credential`)
return accessToken
}

View File

@@ -0,0 +1,652 @@
/**
* Tests for schedule reactivate PUT API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const { mockGetSession, mockGetUserEntityPermissions, mockDbSelect, mockDbUpdate } = vi.hoisted(
() => ({
mockGetSession: vi.fn(),
mockGetUserEntityPermissions: vi.fn(),
mockDbSelect: vi.fn(),
mockDbUpdate: vi.fn(),
})
)
vi.mock('@/lib/auth', () => ({
getSession: mockGetSession,
}))
vi.mock('@/lib/workspaces/permissions/utils', () => ({
getUserEntityPermissions: mockGetUserEntityPermissions,
}))
vi.mock('@sim/db', () => ({
db: {
select: mockDbSelect,
update: mockDbUpdate,
},
}))
vi.mock('@sim/db/schema', () => ({
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
workflowSchedule: { id: 'id', workflowId: 'workflowId', status: 'status' },
}))
vi.mock('drizzle-orm', () => ({
eq: vi.fn(),
}))
vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: () => 'test-request-id',
}))
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: () => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}),
}))
import { PUT } from './route'
function createRequest(body: Record<string, unknown>): NextRequest {
return new NextRequest(new URL('http://test/api/schedules/sched-1'), {
method: 'PUT',
body: JSON.stringify(body),
headers: { 'Content-Type': 'application/json' },
})
}
function createParams(id: string): { params: Promise<{ id: string }> } {
return { params: Promise.resolve({ id }) }
}
function mockDbChain(selectResults: unknown[][]) {
let selectCallIndex = 0
mockDbSelect.mockImplementation(() => ({
from: () => ({
where: () => ({
limit: () => selectResults[selectCallIndex++] || [],
}),
}),
}))
mockDbUpdate.mockImplementation(() => ({
set: () => ({
where: vi.fn().mockResolvedValue({}),
}),
}))
}
describe('Schedule PUT API (Reactivate)', () => {
beforeEach(() => {
vi.clearAllMocks()
mockGetSession.mockResolvedValue({ user: { id: 'user-1' } })
mockGetUserEntityPermissions.mockResolvedValue('write')
})
afterEach(() => {
vi.clearAllMocks()
})
describe('Authentication', () => {
it('returns 401 when user is not authenticated', async () => {
mockGetSession.mockResolvedValue(null)
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(401)
const data = await res.json()
expect(data.error).toBe('Unauthorized')
})
})
describe('Request Validation', () => {
it('returns 400 when action is not reactivate', async () => {
mockDbChain([
[{ id: 'sched-1', workflowId: 'wf-1', status: 'disabled' }],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'disable' }), createParams('sched-1'))
expect(res.status).toBe(400)
const data = await res.json()
expect(data.error).toBe('Invalid request body')
})
it('returns 400 when action is missing', async () => {
mockDbChain([
[{ id: 'sched-1', workflowId: 'wf-1', status: 'disabled' }],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({}), createParams('sched-1'))
expect(res.status).toBe(400)
const data = await res.json()
expect(data.error).toBe('Invalid request body')
})
})
describe('Schedule Not Found', () => {
it('returns 404 when schedule does not exist', async () => {
mockDbChain([[]])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-999'))
expect(res.status).toBe(404)
const data = await res.json()
expect(data.error).toBe('Schedule not found')
})
it('returns 404 when workflow does not exist for schedule', async () => {
mockDbChain([[{ id: 'sched-1', workflowId: 'wf-1', status: 'disabled' }], []])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(404)
const data = await res.json()
expect(data.error).toBe('Workflow not found')
})
})
describe('Authorization', () => {
it('returns 403 when user is not workflow owner', async () => {
mockDbChain([
[{ id: 'sched-1', workflowId: 'wf-1', status: 'disabled' }],
[{ userId: 'other-user', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(403)
const data = await res.json()
expect(data.error).toBe('Not authorized to modify this schedule')
})
it('returns 403 for workspace member with only read permission', async () => {
mockGetUserEntityPermissions.mockResolvedValue('read')
mockDbChain([
[{ id: 'sched-1', workflowId: 'wf-1', status: 'disabled' }],
[{ userId: 'other-user', workspaceId: 'ws-1' }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(403)
})
it('allows workflow owner to reactivate', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '*/5 * * * *',
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
expect(data.message).toBe('Schedule activated successfully')
})
it('allows workspace member with write permission to reactivate', async () => {
mockGetUserEntityPermissions.mockResolvedValue('write')
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '*/5 * * * *',
timezone: 'UTC',
},
],
[{ userId: 'other-user', workspaceId: 'ws-1' }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
})
it('allows workspace admin to reactivate', async () => {
mockGetUserEntityPermissions.mockResolvedValue('admin')
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '*/5 * * * *',
timezone: 'UTC',
},
],
[{ userId: 'other-user', workspaceId: 'ws-1' }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
})
})
describe('Schedule State Handling', () => {
it('returns success message when schedule is already active', async () => {
mockDbChain([
[{ id: 'sched-1', workflowId: 'wf-1', status: 'active' }],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
expect(data.message).toBe('Schedule is already active')
expect(mockDbUpdate).not.toHaveBeenCalled()
})
it('successfully reactivates disabled schedule', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '*/5 * * * *',
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
expect(data.message).toBe('Schedule activated successfully')
expect(data.nextRunAt).toBeDefined()
expect(mockDbUpdate).toHaveBeenCalled()
})
it('returns 400 when schedule has no cron expression', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: null,
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(400)
const data = await res.json()
expect(data.error).toBe('Schedule has no cron expression')
})
it('returns 400 when schedule has invalid cron expression', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: 'invalid-cron',
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(400)
const data = await res.json()
expect(data.error).toBe('Schedule has invalid cron expression')
})
it('calculates nextRunAt from stored cron expression (every 5 minutes)', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '*/5 * * * *',
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
const afterCall = Date.now()
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt).getTime()
// nextRunAt should be within 0-5 minutes in the future
expect(nextRunAt).toBeGreaterThan(beforeCall)
expect(nextRunAt).toBeLessThanOrEqual(afterCall + 5 * 60 * 1000 + 1000)
// Should align with 5-minute intervals (minute divisible by 5)
expect(new Date(nextRunAt).getMinutes() % 5).toBe(0)
})
it('calculates nextRunAt from daily cron expression', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '30 14 * * *', // 2:30 PM daily
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date at 14:30 UTC
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getUTCHours()).toBe(14)
expect(nextRunAt.getUTCMinutes()).toBe(30)
expect(nextRunAt.getUTCSeconds()).toBe(0)
})
it('calculates nextRunAt from weekly cron expression', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '0 9 * * 1', // Monday at 9:00 AM
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date on Monday at 09:00 UTC
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getUTCDay()).toBe(1) // Monday
expect(nextRunAt.getUTCHours()).toBe(9)
expect(nextRunAt.getUTCMinutes()).toBe(0)
})
it('calculates nextRunAt from monthly cron expression', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '0 10 15 * *', // 15th of month at 10:00 AM
timezone: 'UTC',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date on the 15th at 10:00 UTC
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getUTCDate()).toBe(15)
expect(nextRunAt.getUTCHours()).toBe(10)
expect(nextRunAt.getUTCMinutes()).toBe(0)
})
})
describe('Timezone Handling in Reactivation', () => {
it('calculates nextRunAt with America/New_York timezone', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '0 9 * * *', // 9:00 AM Eastern
timezone: 'America/New_York',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
// The exact UTC hour will depend on DST, but it should be 13:00 or 14:00 UTC
const utcHour = nextRunAt.getUTCHours()
expect([13, 14]).toContain(utcHour) // 9 AM ET = 1-2 PM UTC depending on DST
expect(nextRunAt.getUTCMinutes()).toBe(0)
})
it('calculates nextRunAt with Asia/Tokyo timezone', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '30 15 * * *', // 3:30 PM Japan Time
timezone: 'Asia/Tokyo',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
// 3:30 PM JST (UTC+9) = 6:30 AM UTC
expect(nextRunAt.getUTCHours()).toBe(6)
expect(nextRunAt.getUTCMinutes()).toBe(30)
})
it('calculates nextRunAt with Europe/London timezone', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '0 12 * * 5', // Friday at noon London time
timezone: 'Europe/London',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date on Friday
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getUTCDay()).toBe(5) // Friday
// UTC hour depends on BST/GMT (11:00 or 12:00 UTC)
const utcHour = nextRunAt.getUTCHours()
expect([11, 12]).toContain(utcHour)
expect(nextRunAt.getUTCMinutes()).toBe(0)
})
it('uses UTC as default timezone when timezone is not set', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '0 10 * * *', // 10:00 AM
timezone: null,
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date at 10:00 UTC
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getUTCHours()).toBe(10)
expect(nextRunAt.getUTCMinutes()).toBe(0)
})
it('handles minutely schedules with timezone correctly', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '*/10 * * * *', // Every 10 minutes
timezone: 'America/Los_Angeles',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date within the next 10 minutes
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getTime()).toBeLessThanOrEqual(beforeCall + 10 * 60 * 1000 + 1000)
// Should align with 10-minute intervals
expect(nextRunAt.getMinutes() % 10).toBe(0)
})
it('handles hourly schedules with timezone correctly', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '15 * * * *', // At minute 15 of every hour
timezone: 'America/Chicago',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date at minute 15
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
expect(nextRunAt.getMinutes()).toBe(15)
expect(nextRunAt.getSeconds()).toBe(0)
})
it('handles custom cron expressions with complex patterns and timezone', async () => {
mockDbChain([
[
{
id: 'sched-1',
workflowId: 'wf-1',
status: 'disabled',
cronExpression: '0 9 * * 1-5', // Weekdays at 9 AM
timezone: 'America/New_York',
},
],
[{ userId: 'user-1', workspaceId: null }],
])
const beforeCall = Date.now()
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(200)
const data = await res.json()
const nextRunAt = new Date(data.nextRunAt)
// Should be a future date on a weekday (1-5)
expect(nextRunAt.getTime()).toBeGreaterThan(beforeCall)
const dayOfWeek = nextRunAt.getUTCDay()
expect([1, 2, 3, 4, 5]).toContain(dayOfWeek)
})
})
describe('Error Handling', () => {
it('returns 500 when database operation fails', async () => {
mockDbSelect.mockImplementation(() => {
throw new Error('Database connection failed')
})
const res = await PUT(createRequest({ action: 'reactivate' }), createParams('sched-1'))
expect(res.status).toBe(500)
const data = await res.json()
expect(data.error).toBe('Failed to update schedule')
})
})
})

View File

@@ -6,104 +6,26 @@ import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { validateCronExpression } from '@/lib/workflows/schedules/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('ScheduleAPI')
export const dynamic = 'force-dynamic'
const scheduleActionEnum = z.enum(['reactivate', 'disable'])
const scheduleStatusEnum = z.enum(['active', 'disabled'])
const scheduleUpdateSchema = z
.object({
action: scheduleActionEnum.optional(),
status: scheduleStatusEnum.optional(),
})
.refine((data) => data.action || data.status, {
message: 'Either action or status must be provided',
})
const scheduleUpdateSchema = z.object({
action: z.literal('reactivate'),
})
/**
* Delete a schedule
*/
export async function DELETE(
request: NextRequest,
{ params }: { params: Promise<{ id: string }> }
) {
const requestId = generateRequestId()
try {
const { id } = await params
logger.debug(`[${requestId}] Deleting schedule with ID: ${id}`)
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized schedule deletion attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
// Find the schedule and check ownership
const schedules = await db
.select({
schedule: workflowSchedule,
workflow: {
id: workflow.id,
userId: workflow.userId,
workspaceId: workflow.workspaceId,
},
})
.from(workflowSchedule)
.innerJoin(workflow, eq(workflowSchedule.workflowId, workflow.id))
.where(eq(workflowSchedule.id, id))
.limit(1)
if (schedules.length === 0) {
logger.warn(`[${requestId}] Schedule not found: ${id}`)
return NextResponse.json({ error: 'Schedule not found' }, { status: 404 })
}
const workflowRecord = schedules[0].workflow
// Check authorization - either the user owns the workflow or has write/admin workspace permissions
let isAuthorized = workflowRecord.userId === session.user.id
// If not authorized by ownership and the workflow belongs to a workspace, check workspace permissions
if (!isAuthorized && workflowRecord.workspaceId) {
const userPermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workflowRecord.workspaceId
)
isAuthorized = userPermission === 'write' || userPermission === 'admin'
}
if (!isAuthorized) {
logger.warn(`[${requestId}] Unauthorized schedule deletion attempt for schedule: ${id}`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
}
// Delete the schedule
await db.delete(workflowSchedule).where(eq(workflowSchedule.id, id))
logger.info(`[${requestId}] Successfully deleted schedule: ${id}`)
return NextResponse.json({ success: true }, { status: 200 })
} catch (error) {
logger.error(`[${requestId}] Error deleting schedule`, error)
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}
/**
* Update a schedule - can be used to reactivate a disabled schedule
* Reactivate a disabled schedule
*/
export async function PUT(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
try {
const { id } = await params
const scheduleId = id
logger.debug(`[${requestId}] Updating schedule with ID: ${scheduleId}`)
const { id: scheduleId } = await params
logger.debug(`[${requestId}] Reactivating schedule with ID: ${scheduleId}`)
const session = await getSession()
if (!session?.user?.id) {
@@ -115,18 +37,16 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
const validation = scheduleUpdateSchema.safeParse(body)
if (!validation.success) {
const firstError = validation.error.errors[0]
logger.warn(`[${requestId}] Validation error:`, firstError)
return NextResponse.json({ error: firstError.message }, { status: 400 })
return NextResponse.json({ error: 'Invalid request body' }, { status: 400 })
}
const { action, status: requestedStatus } = validation.data
const [schedule] = await db
.select({
id: workflowSchedule.id,
workflowId: workflowSchedule.workflowId,
status: workflowSchedule.status,
cronExpression: workflowSchedule.cronExpression,
timezone: workflowSchedule.timezone,
})
.from(workflowSchedule)
.where(eq(workflowSchedule.id, scheduleId))
@@ -164,57 +84,40 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Not authorized to modify this schedule' }, { status: 403 })
}
if (action === 'reactivate' || (requestedStatus && requestedStatus === 'active')) {
if (schedule.status === 'active') {
return NextResponse.json({ message: 'Schedule is already active' }, { status: 200 })
}
if (schedule.status === 'active') {
return NextResponse.json({ message: 'Schedule is already active' }, { status: 200 })
}
const now = new Date()
const nextRunAt = new Date(now.getTime() + 60 * 1000) // Schedule to run in 1 minute
if (!schedule.cronExpression) {
logger.error(`[${requestId}] Schedule has no cron expression: ${scheduleId}`)
return NextResponse.json({ error: 'Schedule has no cron expression' }, { status: 400 })
}
await db
.update(workflowSchedule)
.set({
status: 'active',
failedCount: 0,
updatedAt: now,
nextRunAt,
})
.where(eq(workflowSchedule.id, scheduleId))
const cronResult = validateCronExpression(schedule.cronExpression, schedule.timezone || 'UTC')
if (!cronResult.isValid || !cronResult.nextRun) {
logger.error(`[${requestId}] Invalid cron expression for schedule: ${scheduleId}`)
return NextResponse.json({ error: 'Schedule has invalid cron expression' }, { status: 400 })
}
logger.info(`[${requestId}] Reactivated schedule: ${scheduleId}`)
const now = new Date()
const nextRunAt = cronResult.nextRun
return NextResponse.json({
message: 'Schedule activated successfully',
await db
.update(workflowSchedule)
.set({
status: 'active',
failedCount: 0,
updatedAt: now,
nextRunAt,
})
}
.where(eq(workflowSchedule.id, scheduleId))
if (action === 'disable' || (requestedStatus && requestedStatus === 'disabled')) {
if (schedule.status === 'disabled') {
return NextResponse.json({ message: 'Schedule is already disabled' }, { status: 200 })
}
logger.info(`[${requestId}] Reactivated schedule: ${scheduleId}`)
const now = new Date()
await db
.update(workflowSchedule)
.set({
status: 'disabled',
updatedAt: now,
nextRunAt: null, // Clear next run time when disabled
})
.where(eq(workflowSchedule.id, scheduleId))
logger.info(`[${requestId}] Disabled schedule: ${scheduleId}`)
return NextResponse.json({
message: 'Schedule disabled successfully',
})
}
logger.warn(`[${requestId}] Unsupported update action for schedule: ${scheduleId}`)
return NextResponse.json({ error: 'Unsupported update action' }, { status: 400 })
return NextResponse.json({
message: 'Schedule activated successfully',
nextRunAt,
})
} catch (error) {
logger.error(`[${requestId}] Error updating schedule`, error)
return NextResponse.json({ error: 'Failed to update schedule' }, { status: 500 })

View File

@@ -1,143 +0,0 @@
/**
* Integration tests for schedule status API route
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, mockScheduleStatusDb } from '@/app/api/__test-utils__/utils'
// Common mocks
const mockSchedule = {
id: 'schedule-id',
workflowId: 'workflow-id',
status: 'active',
failedCount: 0,
lastRanAt: new Date('2024-01-01T00:00:00.000Z'),
lastFailedAt: null,
nextRunAt: new Date('2024-01-02T00:00:00.000Z'),
}
beforeEach(() => {
vi.resetModules()
vi.doMock('@/lib/logs/console/logger', () => ({
createLogger: () => ({ info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }),
}))
vi.doMock('crypto', () => ({
randomUUID: vi.fn(() => 'test-uuid'),
default: { randomUUID: vi.fn(() => 'test-uuid') },
}))
})
afterEach(() => {
vi.clearAllMocks()
})
describe('Schedule Status API Route', () => {
it('returns schedule status successfully', async () => {
mockScheduleStatusDb({}) // default mocks
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({ user: { id: 'user-id' } }),
}))
const req = createMockRequest('GET')
const { GET } = await import('@/app/api/schedules/[id]/status/route')
const res = await GET(req, { params: Promise.resolve({ id: 'schedule-id' }) })
expect(res.status).toBe(200)
const data = await res.json()
expect(data).toMatchObject({
status: 'active',
failedCount: 0,
nextRunAt: mockSchedule.nextRunAt.toISOString(),
isDisabled: false,
})
})
it('marks disabled schedules with isDisabled = true', async () => {
mockScheduleStatusDb({ schedule: [{ ...mockSchedule, status: 'disabled' }] })
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({ user: { id: 'user-id' } }),
}))
const req = createMockRequest('GET')
const { GET } = await import('@/app/api/schedules/[id]/status/route')
const res = await GET(req, { params: Promise.resolve({ id: 'schedule-id' }) })
expect(res.status).toBe(200)
const data = await res.json()
expect(data).toHaveProperty('status', 'disabled')
expect(data).toHaveProperty('isDisabled', true)
expect(data).toHaveProperty('lastFailedAt')
})
it('returns 404 if schedule not found', async () => {
mockScheduleStatusDb({ schedule: [] })
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({ user: { id: 'user-id' } }),
}))
const req = createMockRequest('GET')
const { GET } = await import('@/app/api/schedules/[id]/status/route')
const res = await GET(req, { params: Promise.resolve({ id: 'missing-id' }) })
expect(res.status).toBe(404)
const data = await res.json()
expect(data).toHaveProperty('error', 'Schedule not found')
})
it('returns 404 if related workflow not found', async () => {
mockScheduleStatusDb({ workflow: [] })
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({ user: { id: 'user-id' } }),
}))
const req = createMockRequest('GET')
const { GET } = await import('@/app/api/schedules/[id]/status/route')
const res = await GET(req, { params: Promise.resolve({ id: 'schedule-id' }) })
expect(res.status).toBe(404)
const data = await res.json()
expect(data).toHaveProperty('error', 'Workflow not found')
})
it('returns 403 when user is not owner of workflow', async () => {
mockScheduleStatusDb({ workflow: [{ userId: 'another-user' }] })
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue({ user: { id: 'user-id' } }),
}))
const req = createMockRequest('GET')
const { GET } = await import('@/app/api/schedules/[id]/status/route')
const res = await GET(req, { params: Promise.resolve({ id: 'schedule-id' }) })
expect(res.status).toBe(403)
const data = await res.json()
expect(data).toHaveProperty('error', 'Not authorized to view this schedule')
})
it('returns 401 when user is not authenticated', async () => {
mockScheduleStatusDb({})
vi.doMock('@/lib/auth', () => ({
getSession: vi.fn().mockResolvedValue(null),
}))
const req = createMockRequest('GET')
const { GET } = await import('@/app/api/schedules/[id]/status/route')
const res = await GET(req, { params: Promise.resolve({ id: 'schedule-id' }) })
expect(res.status).toBe(401)
const data = await res.json()
expect(data).toHaveProperty('error', 'Unauthorized')
})
})

View File

@@ -1,84 +0,0 @@
import { db } from '@sim/db'
import { workflow, workflowSchedule } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('ScheduleStatusAPI')
export async function GET(req: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
const { id } = await params
const scheduleId = id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized schedule status request`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const [schedule] = await db
.select({
id: workflowSchedule.id,
workflowId: workflowSchedule.workflowId,
status: workflowSchedule.status,
failedCount: workflowSchedule.failedCount,
lastRanAt: workflowSchedule.lastRanAt,
lastFailedAt: workflowSchedule.lastFailedAt,
nextRunAt: workflowSchedule.nextRunAt,
})
.from(workflowSchedule)
.where(eq(workflowSchedule.id, scheduleId))
.limit(1)
if (!schedule) {
logger.warn(`[${requestId}] Schedule not found: ${scheduleId}`)
return NextResponse.json({ error: 'Schedule not found' }, { status: 404 })
}
const [workflowRecord] = await db
.select({ userId: workflow.userId, workspaceId: workflow.workspaceId })
.from(workflow)
.where(eq(workflow.id, schedule.workflowId))
.limit(1)
if (!workflowRecord) {
logger.warn(`[${requestId}] Workflow not found for schedule: ${scheduleId}`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
// Check authorization - either the user owns the workflow or has workspace permissions
let isAuthorized = workflowRecord.userId === session.user.id
// If not authorized by ownership and the workflow belongs to a workspace, check workspace permissions
if (!isAuthorized && workflowRecord.workspaceId) {
const userPermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workflowRecord.workspaceId
)
isAuthorized = userPermission !== null
}
if (!isAuthorized) {
logger.warn(`[${requestId}] User not authorized to view this schedule: ${scheduleId}`)
return NextResponse.json({ error: 'Not authorized to view this schedule' }, { status: 403 })
}
return NextResponse.json({
status: schedule.status,
failedCount: schedule.failedCount,
lastRanAt: schedule.lastRanAt,
lastFailedAt: schedule.lastFailedAt,
nextRunAt: schedule.nextRunAt,
isDisabled: schedule.status === 'disabled',
})
} catch (error) {
logger.error(`[${requestId}] Error retrieving schedule status: ${scheduleId}`, error)
return NextResponse.json({ error: 'Failed to retrieve schedule status' }, { status: 500 })
}
}

View File

@@ -1,43 +1,15 @@
/**
* Integration tests for schedule configuration API route
* Tests for schedule GET API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, mockExecutionDependencies } from '@/app/api/__test-utils__/utils'
const {
mockGetSession,
mockGetUserEntityPermissions,
mockSelectLimit,
mockInsertValues,
mockOnConflictDoUpdate,
mockInsert,
mockUpdate,
mockDelete,
mockTransaction,
mockRandomUUID,
mockGetScheduleTimeValues,
mockGetSubBlockValue,
mockGenerateCronExpression,
mockCalculateNextRunTime,
mockValidateCronExpression,
} = vi.hoisted(() => ({
const { mockGetSession, mockGetUserEntityPermissions, mockDbSelect } = vi.hoisted(() => ({
mockGetSession: vi.fn(),
mockGetUserEntityPermissions: vi.fn(),
mockSelectLimit: vi.fn(),
mockInsertValues: vi.fn(),
mockOnConflictDoUpdate: vi.fn(),
mockInsert: vi.fn(),
mockUpdate: vi.fn(),
mockDelete: vi.fn(),
mockTransaction: vi.fn(),
mockRandomUUID: vi.fn(),
mockGetScheduleTimeValues: vi.fn(),
mockGetSubBlockValue: vi.fn(),
mockGenerateCronExpression: vi.fn(),
mockCalculateNextRunTime: vi.fn(),
mockValidateCronExpression: vi.fn(),
mockDbSelect: vi.fn(),
}))
vi.mock('@/lib/auth', () => ({
@@ -50,231 +22,136 @@ vi.mock('@/lib/workspaces/permissions/utils', () => ({
vi.mock('@sim/db', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: mockSelectLimit,
}),
}),
}),
insert: mockInsert,
update: mockUpdate,
delete: mockDelete,
select: mockDbSelect,
},
}))
vi.mock('@sim/db/schema', () => ({
workflow: {
id: 'workflow_id',
userId: 'user_id',
workspaceId: 'workspace_id',
},
workflowSchedule: {
id: 'schedule_id',
workflowId: 'workflow_id',
blockId: 'block_id',
cronExpression: 'cron_expression',
nextRunAt: 'next_run_at',
status: 'status',
},
workflow: { id: 'id', userId: 'userId', workspaceId: 'workspaceId' },
workflowSchedule: { workflowId: 'workflowId', blockId: 'blockId' },
}))
vi.mock('drizzle-orm', () => ({
eq: vi.fn((...args) => ({ type: 'eq', args })),
and: vi.fn((...args) => ({ type: 'and', args })),
}))
vi.mock('crypto', () => ({
randomUUID: mockRandomUUID,
default: {
randomUUID: mockRandomUUID,
},
}))
vi.mock('@/lib/workflows/schedules/utils', () => ({
getScheduleTimeValues: mockGetScheduleTimeValues,
getSubBlockValue: mockGetSubBlockValue,
generateCronExpression: mockGenerateCronExpression,
calculateNextRunTime: mockCalculateNextRunTime,
validateCronExpression: mockValidateCronExpression,
BlockState: {},
eq: vi.fn(),
and: vi.fn(),
}))
vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: vi.fn(() => 'test-request-id'),
generateRequestId: () => 'test-request-id',
}))
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn(() => ({
createLogger: () => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
})),
}),
}))
vi.mock('@/lib/core/telemetry', () => ({
trackPlatformEvent: vi.fn(),
}))
import { GET } from '@/app/api/schedules/route'
import { db } from '@sim/db'
import { POST } from '@/app/api/schedules/route'
function createRequest(url: string): NextRequest {
return new NextRequest(new URL(url), { method: 'GET' })
}
describe('Schedule Configuration API Route', () => {
function mockDbChain(results: any[]) {
let callIndex = 0
mockDbSelect.mockImplementation(() => ({
from: () => ({
where: () => ({
limit: () => results[callIndex++] || [],
}),
}),
}))
}
describe('Schedule GET API', () => {
beforeEach(() => {
vi.clearAllMocks()
;(db as any).transaction = mockTransaction
mockExecutionDependencies()
mockGetSession.mockResolvedValue({
user: {
id: 'user-id',
email: 'test@example.com',
},
})
mockGetUserEntityPermissions.mockResolvedValue('admin')
mockSelectLimit.mockReturnValue([
{
id: 'workflow-id',
userId: 'user-id',
workspaceId: null,
},
])
mockInsertValues.mockImplementation(() => ({
onConflictDoUpdate: mockOnConflictDoUpdate,
}))
mockOnConflictDoUpdate.mockResolvedValue({})
mockInsert.mockReturnValue({
values: mockInsertValues,
})
mockUpdate.mockImplementation(() => ({
set: vi.fn().mockImplementation(() => ({
where: vi.fn().mockResolvedValue([]),
})),
}))
mockDelete.mockImplementation(() => ({
where: vi.fn().mockResolvedValue([]),
}))
mockTransaction.mockImplementation(async (callback) => {
const tx = {
insert: vi.fn().mockReturnValue({
values: mockInsertValues,
}),
}
return callback(tx)
})
mockRandomUUID.mockReturnValue('test-uuid')
mockGetScheduleTimeValues.mockReturnValue({
scheduleTime: '09:30',
minutesInterval: 15,
hourlyMinute: 0,
dailyTime: [9, 30],
weeklyDay: 1,
weeklyTime: [9, 30],
monthlyDay: 1,
monthlyTime: [9, 30],
})
mockGetSubBlockValue.mockImplementation((block: any, id: string) => {
const subBlocks = {
startWorkflow: 'schedule',
scheduleType: 'daily',
scheduleTime: '09:30',
dailyTime: '09:30',
}
return subBlocks[id as keyof typeof subBlocks] || ''
})
mockGenerateCronExpression.mockReturnValue('0 9 * * *')
mockCalculateNextRunTime.mockReturnValue(new Date())
mockValidateCronExpression.mockReturnValue({ isValid: true })
mockGetSession.mockResolvedValue({ user: { id: 'user-1' } })
mockGetUserEntityPermissions.mockResolvedValue('read')
})
afterEach(() => {
vi.clearAllMocks()
})
it('should create a new schedule successfully', async () => {
const req = createMockRequest('POST', {
workflowId: 'workflow-id',
state: {
blocks: {
'starter-id': {
type: 'starter',
subBlocks: {
startWorkflow: { value: 'schedule' },
scheduleType: { value: 'daily' },
scheduleTime: { value: '09:30' },
dailyTime: { value: '09:30' },
},
},
},
edges: [],
loops: {},
},
})
it('returns schedule data for authorized user', async () => {
mockDbChain([
[{ userId: 'user-1', workspaceId: null }],
[{ id: 'sched-1', cronExpression: '0 9 * * *', status: 'active', failedCount: 0 }],
])
const response = await POST(req)
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
const data = await res.json()
expect(response).toBeDefined()
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toHaveProperty('message', 'Schedule updated')
expect(responseData).toHaveProperty('cronExpression', '0 9 * * *')
expect(responseData).toHaveProperty('nextRunAt')
expect(res.status).toBe(200)
expect(data.schedule.cronExpression).toBe('0 9 * * *')
expect(data.isDisabled).toBe(false)
})
it('should handle errors gracefully', async () => {
mockSelectLimit.mockReturnValue([])
it('returns null when no schedule exists', async () => {
mockDbChain([[{ userId: 'user-1', workspaceId: null }], []])
const req = createMockRequest('POST', {
workflowId: 'workflow-id',
state: { blocks: {}, edges: [], loops: {} },
})
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
const data = await res.json()
const response = await POST(req)
expect(response.status).toBeGreaterThanOrEqual(400)
const data = await response.json()
expect(data).toHaveProperty('error')
expect(res.status).toBe(200)
expect(data.schedule).toBeNull()
})
it('should require authentication', async () => {
it('requires authentication', async () => {
mockGetSession.mockResolvedValue(null)
const req = createMockRequest('POST', {
workflowId: 'workflow-id',
state: { blocks: {}, edges: [], loops: {} },
})
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
const response = await POST(req)
expect(response.status).toBe(401)
const data = await response.json()
expect(data).toHaveProperty('error', 'Unauthorized')
expect(res.status).toBe(401)
})
it('should validate input data', async () => {
const req = createMockRequest('POST', {
workflowId: 'workflow-id',
})
it('requires workflowId parameter', async () => {
const res = await GET(createRequest('http://test/api/schedules'))
const response = await POST(req)
expect(res.status).toBe(400)
})
expect(response.status).toBe(400)
const data = await response.json()
expect(data).toHaveProperty('error', 'Invalid request data')
it('returns 404 for non-existent workflow', async () => {
mockDbChain([[]])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
expect(res.status).toBe(404)
})
it('denies access for unauthorized user', async () => {
mockDbChain([[{ userId: 'other-user', workspaceId: null }]])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
expect(res.status).toBe(403)
})
it('allows workspace members to view', async () => {
mockDbChain([
[{ userId: 'other-user', workspaceId: 'ws-1' }],
[{ id: 'sched-1', status: 'active', failedCount: 0 }],
])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
expect(res.status).toBe(200)
})
it('indicates disabled schedule with failures', async () => {
mockDbChain([
[{ userId: 'user-1', workspaceId: null }],
[{ id: 'sched-1', status: 'disabled', failedCount: 100 }],
])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))
const data = await res.json()
expect(res.status).toBe(200)
expect(data.isDisabled).toBe(true)
expect(data.hasFailures).toBe(true)
})
})

View File

@@ -2,61 +2,13 @@ import { db } from '@sim/db'
import { workflow, workflowSchedule } from '@sim/db/schema'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import {
type BlockState,
calculateNextRunTime,
generateCronExpression,
getScheduleTimeValues,
getSubBlockValue,
validateCronExpression,
} from '@/lib/workflows/schedules/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('ScheduledAPI')
const ScheduleRequestSchema = z.object({
workflowId: z.string(),
blockId: z.string().optional(),
state: z.object({
blocks: z.record(z.any()),
edges: z.array(z.any()),
loops: z.record(z.any()),
}),
})
function hasValidScheduleConfig(
scheduleType: string | undefined,
scheduleValues: ReturnType<typeof getScheduleTimeValues>,
starterBlock: BlockState
): boolean {
switch (scheduleType) {
case 'minutes':
return !!scheduleValues.minutesInterval
case 'hourly':
return scheduleValues.hourlyMinute !== undefined
case 'daily':
return !!scheduleValues.dailyTime[0] || !!scheduleValues.dailyTime[1]
case 'weekly':
return (
!!scheduleValues.weeklyDay &&
(!!scheduleValues.weeklyTime[0] || !!scheduleValues.weeklyTime[1])
)
case 'monthly':
return (
!!scheduleValues.monthlyDay &&
(!!scheduleValues.monthlyTime[0] || !!scheduleValues.monthlyTime[1])
)
case 'custom':
return !!getSubBlockValue(starterBlock, 'cronExpression')
default:
return false
}
}
/**
* Get schedule information for a workflow
*/
@@ -65,11 +17,6 @@ export async function GET(req: NextRequest) {
const url = new URL(req.url)
const workflowId = url.searchParams.get('workflowId')
const blockId = url.searchParams.get('blockId')
const mode = url.searchParams.get('mode')
if (mode && mode !== 'schedule') {
return NextResponse.json({ schedule: null })
}
try {
const session = await getSession()
@@ -145,262 +92,3 @@ export async function GET(req: NextRequest) {
return NextResponse.json({ error: 'Failed to retrieve workflow schedule' }, { status: 500 })
}
}
const saveAttempts = new Map<string, { count: number; resetAt: number }>()
const RATE_LIMIT_WINDOW = 60000 // 1 minute
const RATE_LIMIT_MAX = 10 // 10 saves per minute
/**
* Create or update a schedule for a workflow
*/
export async function POST(req: NextRequest) {
const requestId = generateRequestId()
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized schedule update attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const now = Date.now()
const userKey = session.user.id
const limit = saveAttempts.get(userKey)
if (limit && limit.resetAt > now) {
if (limit.count >= RATE_LIMIT_MAX) {
logger.warn(`[${requestId}] Rate limit exceeded for user: ${userKey}`)
return NextResponse.json(
{ error: 'Too many save attempts. Please wait a moment and try again.' },
{ status: 429 }
)
}
limit.count++
} else {
saveAttempts.set(userKey, { count: 1, resetAt: now + RATE_LIMIT_WINDOW })
}
const body = await req.json()
const { workflowId, blockId, state } = ScheduleRequestSchema.parse(body)
logger.info(`[${requestId}] Processing schedule update for workflow ${workflowId}`)
const [workflowRecord] = await db
.select({ userId: workflow.userId, workspaceId: workflow.workspaceId })
.from(workflow)
.where(eq(workflow.id, workflowId))
.limit(1)
if (!workflowRecord) {
logger.warn(`[${requestId}] Workflow not found: ${workflowId}`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
let isAuthorized = workflowRecord.userId === session.user.id
if (!isAuthorized && workflowRecord.workspaceId) {
const userPermission = await getUserEntityPermissions(
session.user.id,
'workspace',
workflowRecord.workspaceId
)
isAuthorized = userPermission === 'write' || userPermission === 'admin'
}
if (!isAuthorized) {
logger.warn(
`[${requestId}] User not authorized to modify schedule for workflow: ${workflowId}`
)
return NextResponse.json({ error: 'Not authorized to modify this workflow' }, { status: 403 })
}
let targetBlock: BlockState | undefined
if (blockId) {
targetBlock = Object.values(state.blocks).find((block: any) => block.id === blockId) as
| BlockState
| undefined
} else {
targetBlock = Object.values(state.blocks).find(
(block: any) => block.type === 'starter' || block.type === 'schedule'
) as BlockState | undefined
}
if (!targetBlock) {
logger.warn(`[${requestId}] No starter or schedule block found in workflow ${workflowId}`)
return NextResponse.json(
{ error: 'No starter or schedule block found in workflow' },
{ status: 400 }
)
}
const startWorkflow = getSubBlockValue(targetBlock, 'startWorkflow')
const scheduleType = getSubBlockValue(targetBlock, 'scheduleType')
const scheduleValues = getScheduleTimeValues(targetBlock)
const hasScheduleConfig = hasValidScheduleConfig(scheduleType, scheduleValues, targetBlock)
const isScheduleBlock = targetBlock.type === 'schedule'
const hasValidConfig = isScheduleBlock || (startWorkflow === 'schedule' && hasScheduleConfig)
logger.info(`[${requestId}] Schedule validation debug:`, {
workflowId,
blockId,
blockType: targetBlock.type,
isScheduleBlock,
startWorkflow,
scheduleType,
hasScheduleConfig,
hasValidConfig,
scheduleValues: {
minutesInterval: scheduleValues.minutesInterval,
dailyTime: scheduleValues.dailyTime,
cronExpression: scheduleValues.cronExpression,
},
})
if (!hasValidConfig) {
logger.info(
`[${requestId}] Removing schedule for workflow ${workflowId} - no valid configuration found`
)
const deleteConditions = [eq(workflowSchedule.workflowId, workflowId)]
if (blockId) {
deleteConditions.push(eq(workflowSchedule.blockId, blockId))
}
await db
.delete(workflowSchedule)
.where(deleteConditions.length > 1 ? and(...deleteConditions) : deleteConditions[0])
return NextResponse.json({ message: 'Schedule removed' })
}
if (isScheduleBlock) {
logger.info(`[${requestId}] Processing schedule trigger block for workflow ${workflowId}`)
} else if (startWorkflow !== 'schedule') {
logger.info(
`[${requestId}] Setting workflow to scheduled mode based on schedule configuration`
)
}
logger.debug(`[${requestId}] Schedule type for workflow ${workflowId}: ${scheduleType}`)
let cronExpression: string | null = null
let nextRunAt: Date | undefined
const timezone = getSubBlockValue(targetBlock, 'timezone') || 'UTC'
try {
const defaultScheduleType = scheduleType || 'daily'
const scheduleStartAt = getSubBlockValue(targetBlock, 'scheduleStartAt')
const scheduleTime = getSubBlockValue(targetBlock, 'scheduleTime')
logger.debug(`[${requestId}] Schedule configuration:`, {
type: defaultScheduleType,
timezone,
startDate: scheduleStartAt || 'not specified',
time: scheduleTime || 'not specified',
})
const sanitizedScheduleValues =
defaultScheduleType !== 'custom'
? { ...scheduleValues, cronExpression: null }
: scheduleValues
cronExpression = generateCronExpression(defaultScheduleType, sanitizedScheduleValues)
if (cronExpression) {
const validation = validateCronExpression(cronExpression, timezone)
if (!validation.isValid) {
logger.error(`[${requestId}] Invalid cron expression: ${validation.error}`, {
scheduleType: defaultScheduleType,
cronExpression,
})
return NextResponse.json(
{ error: `Invalid schedule configuration: ${validation.error}` },
{ status: 400 }
)
}
}
nextRunAt = calculateNextRunTime(defaultScheduleType, sanitizedScheduleValues)
logger.debug(
`[${requestId}] Generated cron: ${cronExpression}, next run at: ${nextRunAt.toISOString()}`
)
} catch (error: any) {
logger.error(`[${requestId}] Error generating schedule: ${error}`)
const errorMessage = error?.message || 'Failed to generate schedule'
return NextResponse.json({ error: errorMessage }, { status: 400 })
}
const values = {
id: crypto.randomUUID(),
workflowId,
blockId,
cronExpression,
triggerType: 'schedule',
createdAt: new Date(),
updatedAt: new Date(),
nextRunAt,
timezone,
status: 'active', // Ensure new schedules are active
failedCount: 0, // Reset failure count for new schedules
}
const setValues = {
blockId,
cronExpression,
updatedAt: new Date(),
nextRunAt,
timezone,
status: 'active', // Reactivate if previously disabled
failedCount: 0, // Reset failure count on reconfiguration
}
await db.transaction(async (tx) => {
await tx
.insert(workflowSchedule)
.values(values)
.onConflictDoUpdate({
target: [workflowSchedule.workflowId, workflowSchedule.blockId],
set: setValues,
})
})
logger.info(`[${requestId}] Schedule updated for workflow ${workflowId}`, {
nextRunAt: nextRunAt?.toISOString(),
cronExpression,
})
try {
const { trackPlatformEvent } = await import('@/lib/core/telemetry')
trackPlatformEvent('platform.schedule.created', {
'workflow.id': workflowId,
'schedule.type': scheduleType || 'daily',
'schedule.timezone': timezone,
'schedule.is_custom': scheduleType === 'custom',
})
} catch (_e) {
// Silently fail
}
return NextResponse.json({
message: 'Schedule updated',
schedule: { id: values.id },
nextRunAt,
cronExpression,
})
} catch (error: any) {
logger.error(`[${requestId}] Error updating workflow schedule`, error)
if (error instanceof z.ZodError) {
return NextResponse.json(
{ error: 'Invalid request data', details: error.errors },
{ status: 400 }
)
}
const errorMessage = error?.message || 'Failed to update workflow schedule'
return NextResponse.json({ error: errorMessage }, { status: 500 })
}
}

View File

@@ -169,7 +169,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
if (creatorId !== undefined) updateData.creatorId = creatorId
if (updateState && template.workflowId) {
const { verifyWorkflowAccess } = await import('@/socket-server/middleware/permissions')
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
const { hasAccess: hasWorkflowAccess } = await verifyWorkflowAccess(
session.user.id,
template.workflowId

View File

@@ -1,9 +1,9 @@
import { type NextRequest, NextResponse } from 'next/server'
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
import { validateUUID } from '@/lib/core/security/input-validation'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import { isUuidV4 } from '@/executor/constants'
export const dynamic = 'force-dynamic'
const logger = createLogger('GoogleCalendarAPI')
@@ -35,18 +35,14 @@ export async function GET(request: NextRequest) {
return NextResponse.json({ error: 'Credential ID is required' }, { status: 400 })
}
const credentialValidation = validateUUID(credentialId, 'credentialId')
if (!credentialValidation.isValid) {
if (!isUuidV4(credentialId)) {
logger.warn(`[${requestId}] Invalid credentialId format`, { credentialId })
return NextResponse.json({ error: credentialValidation.error }, { status: 400 })
return NextResponse.json({ error: 'Invalid credential ID format' }, { status: 400 })
}
if (workflowId) {
const workflowValidation = validateUUID(workflowId, 'workflowId')
if (!workflowValidation.isValid) {
logger.warn(`[${requestId}] Invalid workflowId format`, { workflowId })
return NextResponse.json({ error: workflowValidation.error }, { status: 400 })
}
if (workflowId && !isUuidV4(workflowId)) {
logger.warn(`[${requestId}] Invalid workflowId format`, { workflowId })
return NextResponse.json({ error: 'Invalid workflow ID format' }, { status: 400 })
}
const authz = await authorizeCredentialUse(request, { credentialId, workflowId })
if (!authz.ok || !authz.credentialOwnerUserId) {

View File

@@ -39,8 +39,10 @@ export async function POST(request: NextRequest) {
const body = await request.json()
const validated = SearchRequestSchema.parse(body)
if (!env.EXA_API_KEY) {
logger.error(`[${requestId}] EXA_API_KEY not configured`)
const exaApiKey = env.EXA_API_KEY
if (!exaApiKey) {
logger.error(`[${requestId}] No Exa API key available`)
return NextResponse.json(
{ success: false, error: 'Search service not configured' },
{ status: 503 }
@@ -57,7 +59,7 @@ export async function POST(request: NextRequest) {
type: 'auto',
useAutoprompt: true,
highlights: true,
apiKey: env.EXA_API_KEY,
apiKey: exaApiKey,
})
if (!result.success) {
@@ -87,8 +89,8 @@ export async function POST(request: NextRequest) {
output: 0,
total: SEARCH_TOOL_COST,
tokens: {
prompt: 0,
completion: 0,
input: 0,
output: 0,
total: 0,
},
model: 'search-exa',

View File

@@ -3,6 +3,7 @@ import { userStats, workflow } from '@sim/db/schema'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import OpenAI, { AzureOpenAI } from 'openai'
import { getBYOKKey } from '@/lib/api-key/byok'
import { getSession } from '@/lib/auth'
import { logModelUsage } from '@/lib/billing/core/usage-log'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
@@ -75,7 +76,8 @@ async function updateUserStatsForWand(
completion_tokens?: number
total_tokens?: number
},
requestId: string
requestId: string,
isBYOK = false
): Promise<void> {
if (!isBillingEnabled) {
logger.debug(`[${requestId}] Billing is disabled, skipping wand usage cost update`)
@@ -93,21 +95,24 @@ async function updateUserStatsForWand(
const completionTokens = usage.completion_tokens || 0
const modelName = useWandAzure ? wandModelName : 'gpt-4o'
const pricing = getModelPricing(modelName)
let costToStore = 0
const costMultiplier = getCostMultiplier()
let modelCost = 0
if (!isBYOK) {
const pricing = getModelPricing(modelName)
const costMultiplier = getCostMultiplier()
let modelCost = 0
if (pricing) {
const inputCost = (promptTokens / 1000000) * pricing.input
const outputCost = (completionTokens / 1000000) * pricing.output
modelCost = inputCost + outputCost
} else {
modelCost = (promptTokens / 1000000) * 0.005 + (completionTokens / 1000000) * 0.015
if (pricing) {
const inputCost = (promptTokens / 1000000) * pricing.input
const outputCost = (completionTokens / 1000000) * pricing.output
modelCost = inputCost + outputCost
} else {
modelCost = (promptTokens / 1000000) * 0.005 + (completionTokens / 1000000) * 0.015
}
costToStore = modelCost * costMultiplier
}
const costToStore = modelCost * costMultiplier
await db
.update(userStats)
.set({
@@ -122,6 +127,7 @@ async function updateUserStatsForWand(
userId,
tokensUsed: totalTokens,
costAdded: costToStore,
isBYOK,
})
await logModelUsage({
@@ -149,14 +155,6 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ success: false, error: 'Unauthorized' }, { status: 401 })
}
if (!client) {
logger.error(`[${requestId}] AI client not initialized. Missing API key.`)
return NextResponse.json(
{ success: false, error: 'Wand generation service is not configured.' },
{ status: 503 }
)
}
try {
const body = (await req.json()) as RequestBody
@@ -170,6 +168,7 @@ export async function POST(req: NextRequest) {
)
}
let workspaceId: string | null = null
if (workflowId) {
const [workflowRecord] = await db
.select({ workspaceId: workflow.workspaceId, userId: workflow.userId })
@@ -182,6 +181,8 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ success: false, error: 'Workflow not found' }, { status: 404 })
}
workspaceId = workflowRecord.workspaceId
if (workflowRecord.workspaceId) {
const permission = await verifyWorkspaceMembership(
session.user.id,
@@ -199,6 +200,28 @@ export async function POST(req: NextRequest) {
}
}
let isBYOK = false
let activeClient = client
let byokApiKey: string | null = null
if (workspaceId && !useWandAzure) {
const byokResult = await getBYOKKey(workspaceId, 'openai')
if (byokResult) {
isBYOK = true
byokApiKey = byokResult.apiKey
activeClient = new OpenAI({ apiKey: byokResult.apiKey })
logger.info(`[${requestId}] Using BYOK OpenAI key for wand generation`)
}
}
if (!activeClient) {
logger.error(`[${requestId}] AI client not initialized. Missing API key.`)
return NextResponse.json(
{ success: false, error: 'Wand generation service is not configured.' },
{ status: 503 }
)
}
const finalSystemPrompt =
systemPrompt ||
'You are a helpful AI assistant. Generate content exactly as requested by the user.'
@@ -241,7 +264,7 @@ export async function POST(req: NextRequest) {
if (useWandAzure) {
headers['api-key'] = azureApiKey!
} else {
headers.Authorization = `Bearer ${openaiApiKey}`
headers.Authorization = `Bearer ${byokApiKey || openaiApiKey}`
}
logger.debug(`[${requestId}] Making streaming request to: ${apiUrl}`)
@@ -310,7 +333,7 @@ export async function POST(req: NextRequest) {
logger.info(`[${requestId}] Received [DONE] signal`)
if (finalUsage) {
await updateUserStatsForWand(session.user.id, finalUsage, requestId)
await updateUserStatsForWand(session.user.id, finalUsage, requestId, isBYOK)
}
controller.enqueue(
@@ -395,7 +418,7 @@ export async function POST(req: NextRequest) {
}
}
const completion = await client.chat.completions.create({
const completion = await activeClient.chat.completions.create({
model: useWandAzure ? wandModelName : 'gpt-4o',
messages: messages,
temperature: 0.3,
@@ -417,7 +440,7 @@ export async function POST(req: NextRequest) {
logger.info(`[${requestId}] Wand generation successful`)
if (completion.usage) {
await updateUserStatsForWand(session.user.id, completion.usage, requestId)
await updateUserStatsForWand(session.user.id, completion.usage, requestId, isBYOK)
}
return NextResponse.json({ success: true, content: generatedContent })

View File

@@ -3,6 +3,8 @@
*
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
@@ -176,6 +178,8 @@ vi.mock('drizzle-orm/postgres-js', () => ({
vi.mock('postgres', () => vi.fn().mockReturnValue({}))
vi.mock('@/lib/logs/console/logger', () => loggerMock)
process.env.DATABASE_URL = 'postgresql://test:test@localhost:5432/test'
import { POST } from '@/app/api/webhooks/trigger/[path]/route'
@@ -257,9 +261,6 @@ describe('Webhook Trigger API Route', () => {
expect(data.message).toBe('Webhook processed')
})
/**
* Test generic webhook with Bearer token authentication
*/
it('should authenticate with Bearer token when no custom header is configured', async () => {
globalMockData.webhooks.push({
id: 'generic-webhook-id',
@@ -489,7 +490,7 @@ describe('Webhook Trigger API Route', () => {
const headers = {
'Content-Type': 'application/json',
Authorization: 'Bearer exclusive-token', // Correct token but wrong header type
Authorization: 'Bearer exclusive-token',
}
const req = createMockRequest('POST', { event: 'exclusivity.test' }, headers)
const params = Promise.resolve({ path: 'test-path' })
@@ -517,7 +518,7 @@ describe('Webhook Trigger API Route', () => {
const headers = {
'Content-Type': 'application/json',
'X-Wrong-Header': 'correct-token', // Correct token but wrong header name
'X-Wrong-Header': 'correct-token',
}
const req = createMockRequest('POST', { event: 'wrong.header.name.test' }, headers)
const params = Promise.resolve({ path: 'test-path' })

View File

@@ -3,7 +3,12 @@ import { and, desc, eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { deployWorkflow } from '@/lib/workflows/persistence/utils'
import { deployWorkflow, loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import {
createSchedulesForDeploy,
deleteSchedulesForWorkflow,
validateWorkflowSchedules,
} from '@/lib/workflows/schedules'
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
@@ -55,13 +60,20 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
const { loadWorkflowFromNormalizedTables } = await import('@/lib/workflows/persistence/utils')
const normalizedData = await loadWorkflowFromNormalizedTables(id)
if (normalizedData) {
const [workflowRecord] = await db
.select({ variables: workflow.variables })
.from(workflow)
.where(eq(workflow.id, id))
.limit(1)
const currentState = {
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
variables: workflowRecord?.variables || {},
}
const { hasWorkflowChanged } = await import('@/lib/workflows/utils')
const { hasWorkflowChanged } = await import('@/lib/workflows/comparison')
needsRedeployment = hasWorkflowChanged(currentState as any, active.state as any)
}
}
@@ -98,13 +110,25 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return createErrorResponse(error.message, error.status)
}
// Attribution: this route is UI-only; require session user as actor
const actorUserId: string | null = session?.user?.id ?? null
if (!actorUserId) {
logger.warn(`[${requestId}] Unable to resolve actor user for workflow deployment: ${id}`)
return createErrorResponse('Unable to determine deploying user', 400)
}
const normalizedData = await loadWorkflowFromNormalizedTables(id)
if (!normalizedData) {
return createErrorResponse('Failed to load workflow state', 500)
}
const scheduleValidation = validateWorkflowSchedules(normalizedData.blocks)
if (!scheduleValidation.isValid) {
logger.warn(
`[${requestId}] Schedule validation failed for workflow ${id}: ${scheduleValidation.error}`
)
return createErrorResponse(`Invalid schedule configuration: ${scheduleValidation.error}`, 400)
}
const deployResult = await deployWorkflow({
workflowId: id,
deployedBy: actorUserId,
@@ -117,6 +141,23 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
const deployedAt = deployResult.deployedAt!
let scheduleInfo: { scheduleId?: string; cronExpression?: string; nextRunAt?: Date } = {}
const scheduleResult = await createSchedulesForDeploy(id, normalizedData.blocks, db)
if (!scheduleResult.success) {
logger.error(
`[${requestId}] Failed to create schedule for workflow ${id}: ${scheduleResult.error}`
)
} else if (scheduleResult.scheduleId) {
scheduleInfo = {
scheduleId: scheduleResult.scheduleId,
cronExpression: scheduleResult.cronExpression,
nextRunAt: scheduleResult.nextRunAt,
}
logger.info(
`[${requestId}] Schedule created for workflow ${id}: ${scheduleResult.scheduleId}`
)
}
logger.info(`[${requestId}] Workflow deployed successfully: ${id}`)
const responseApiKeyInfo = workflowData!.workspaceId
@@ -127,6 +168,13 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
apiKey: responseApiKeyInfo,
isDeployed: true,
deployedAt,
schedule: scheduleInfo.scheduleId
? {
id: scheduleInfo.scheduleId,
cronExpression: scheduleInfo.cronExpression,
nextRunAt: scheduleInfo.nextRunAt,
}
: undefined,
})
} catch (error: any) {
logger.error(`[${requestId}] Error deploying workflow: ${id}`, {
@@ -156,6 +204,8 @@ export async function DELETE(
}
await db.transaction(async (tx) => {
await deleteSchedulesForWorkflow(id, tx)
await tx
.update(workflowDeploymentVersion)
.set({ isActive: false })
@@ -169,7 +219,6 @@ export async function DELETE(
logger.info(`[${requestId}] Workflow undeployed successfully: ${id}`)
// Track workflow undeployment
try {
const { trackPlatformEvent } = await import('@/lib/core/telemetry')
trackPlatformEvent('platform.workflow.undeployed', {

View File

@@ -7,6 +7,7 @@ import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing'
import { createLogger } from '@/lib/logs/console/logger'
@@ -22,6 +23,7 @@ import {
import { createStreamingResponse } from '@/lib/workflows/streaming/streaming'
import { createHttpResponseFromBlock, workflowHasResponseBlock } from '@/lib/workflows/utils'
import type { WorkflowExecutionPayload } from '@/background/workflow-execution'
import { normalizeName } from '@/executor/constants'
import { type ExecutionMetadata, ExecutionSnapshot } from '@/executor/execution/snapshot'
import type { StreamingExecution } from '@/executor/types'
import { Serializer } from '@/serializer'
@@ -86,10 +88,9 @@ function resolveOutputIds(
const blockName = outputId.substring(0, dotIndex)
const path = outputId.substring(dotIndex + 1)
const normalizedBlockName = blockName.toLowerCase().replace(/\s+/g, '')
const normalizedBlockName = normalizeName(blockName)
const block = Object.values(blocks).find((b: any) => {
const normalized = (b.name || '').toLowerCase().replace(/\s+/g, '')
return normalized === normalizedBlockName
return normalizeName(b.name || '') === normalizedBlockName
})
if (!block) {
@@ -317,6 +318,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
loops: Record<string, any>
parallels: Record<string, any>
deploymentVersionId?: string
variables?: Record<string, any>
} | null = null
let processedInput = input
@@ -326,6 +328,11 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
: await loadDeployedWorkflowState(workflowId)
if (workflowData) {
const deployedVariables =
!shouldUseDraftState && 'variables' in workflowData
? (workflowData as any).variables
: undefined
cachedWorkflowData = {
blocks: workflowData.blocks,
edges: workflowData.edges,
@@ -335,6 +342,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
!shouldUseDraftState && 'deploymentVersionId' in workflowData
? (workflowData.deploymentVersionId as string)
: undefined,
variables: deployedVariables,
}
const serializedWorkflow = new Serializer().serializeWorkflow(
@@ -404,11 +412,13 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
workflowStateOverride: effectiveWorkflowStateOverride,
}
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
const snapshot = new ExecutionSnapshot(
metadata,
workflow,
processedInput,
workflow.variables || {},
executionVariables,
selectedOutputs
)
@@ -470,6 +480,8 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
selectedOutputs,
cachedWorkflowData?.blocks || {}
)
const streamVariables = cachedWorkflowData?.variables ?? (workflow as any).variables
const stream = await createStreamingResponse({
requestId,
workflow: {
@@ -477,7 +489,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
userId: actorUserId,
workspaceId,
isDeployed: workflow.isDeployed,
variables: (workflow as any).variables,
variables: streamVariables,
},
input: processedInput,
executingUserId: actorUserId,
@@ -496,7 +508,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}
const encoder = new TextEncoder()
let executorInstance: any = null
const abortController = new AbortController()
let isStreamClosed = false
const stream = new ReadableStream<Uint8Array>({
@@ -674,11 +686,13 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
workflowStateOverride: effectiveWorkflowStateOverride,
}
const sseExecutionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
const snapshot = new ExecutionSnapshot(
metadata,
workflow,
processedInput,
workflow.variables || {},
sseExecutionVariables,
selectedOutputs
)
@@ -688,11 +702,9 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
onBlockStart,
onBlockComplete,
onStream,
onExecutorCreated: (executor) => {
executorInstance = executor
},
},
loggingSession,
abortSignal: abortController.signal,
})
if (result.status === 'paused') {
@@ -713,7 +725,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
await PauseResumeManager.processQueuedResumes(executionId)
}
if (result.error === 'Workflow execution was cancelled') {
if (result.status === 'cancelled') {
logger.info(`[${requestId}] Workflow execution was cancelled`)
sendEvent({
type: 'execution:cancelled',
@@ -769,11 +781,9 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
},
cancel() {
isStreamClosed = true
logger.info(`[${requestId}] Client aborted SSE stream, cancelling executor`)
if (executorInstance && typeof executorInstance.cancel === 'function') {
executorInstance.cancel()
}
logger.info(`[${requestId}] Client aborted SSE stream, signalling cancellation`)
abortController.abort()
markExecutionCancelled(executionId).catch(() => {})
},
})

View File

@@ -0,0 +1,47 @@
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('CancelExecutionAPI')
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function POST(
req: NextRequest,
{ params }: { params: Promise<{ id: string; executionId: string }> }
) {
const { id: workflowId, executionId } = await params
try {
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
logger.info('Cancel execution requested', { workflowId, executionId, userId: auth.userId })
const marked = await markExecutionCancelled(executionId)
if (marked) {
logger.info('Execution marked as cancelled in Redis', { executionId })
} else {
logger.info('Redis not available, cancellation will rely on connection close', {
executionId,
})
}
return NextResponse.json({
success: true,
executionId,
redisAvailable: marked,
})
} catch (error: any) {
logger.error('Failed to cancel execution', { workflowId, executionId, error: error.message })
return NextResponse.json(
{ error: error.message || 'Failed to cancel execution' },
{ status: 500 }
)
}
}

View File

@@ -1,5 +1,5 @@
import { db } from '@sim/db'
import { webhook, workflow, workflowSchedule } from '@sim/db/schema'
import { webhook, workflow } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
@@ -10,12 +10,6 @@ import { createLogger } from '@/lib/logs/console/logger'
import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom-tools-persistence'
import { saveWorkflowToNormalizedTables } from '@/lib/workflows/persistence/utils'
import { sanitizeAgentToolsInBlocks } from '@/lib/workflows/sanitization/validation'
import {
calculateNextRunTime,
generateCronExpression,
getScheduleTimeValues,
validateCronExpression,
} from '@/lib/workflows/schedules/utils'
import { getWorkflowAccessContext } from '@/lib/workflows/utils'
import type { BlockState } from '@/stores/workflows/workflow/types'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
@@ -210,7 +204,6 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
}
await syncWorkflowWebhooks(workflowId, workflowState.blocks)
await syncWorkflowSchedules(workflowId, workflowState.blocks)
// Extract and persist custom tools to database
try {
@@ -318,79 +311,6 @@ async function syncWorkflowWebhooks(
})
}
type ScheduleBlockInput = Parameters<typeof getScheduleTimeValues>[0]
async function syncWorkflowSchedules(
workflowId: string,
blocks: Record<string, any>
): Promise<void> {
await syncBlockResources(workflowId, blocks, {
resourceName: 'schedule',
subBlockId: 'scheduleId',
buildMetadata: buildScheduleMetadata,
applyMetadata: upsertScheduleRecord,
})
}
interface ScheduleMetadata {
cronExpression: string | null
nextRunAt: Date | null
timezone: string
}
function buildScheduleMetadata(block: BlockState): ScheduleMetadata | null {
const scheduleType = getSubBlockValue<string>(block, 'scheduleType') || 'daily'
const scheduleBlock = convertToScheduleBlock(block)
const scheduleValues = getScheduleTimeValues(scheduleBlock)
const sanitizedValues =
scheduleType !== 'custom' ? { ...scheduleValues, cronExpression: null } : scheduleValues
try {
const cronExpression = generateCronExpression(scheduleType, sanitizedValues)
const timezone = scheduleValues.timezone || 'UTC'
if (cronExpression) {
const validation = validateCronExpression(cronExpression, timezone)
if (!validation.isValid) {
logger.warn('Invalid cron expression while syncing schedule', {
blockId: block.id,
cronExpression,
error: validation.error,
})
return null
}
}
const nextRunAt = calculateNextRunTime(scheduleType, sanitizedValues)
return {
cronExpression,
timezone,
nextRunAt,
}
} catch (error) {
logger.error('Failed to build schedule metadata during sync', {
blockId: block.id,
error,
})
return null
}
}
function convertToScheduleBlock(block: BlockState): ScheduleBlockInput {
const subBlocks: ScheduleBlockInput['subBlocks'] = {}
Object.entries(block.subBlocks || {}).forEach(([id, subBlock]) => {
subBlocks[id] = { value: stringifySubBlockValue(subBlock?.value) }
})
return {
type: block.type,
subBlocks,
}
}
interface WebhookMetadata {
triggerPath: string
provider: string | null
@@ -473,58 +393,6 @@ async function upsertWebhookRecord(
})
}
async function upsertScheduleRecord(
workflowId: string,
block: BlockState,
scheduleId: string,
metadata: ScheduleMetadata
): Promise<void> {
const now = new Date()
const [existing] = await db
.select({
id: workflowSchedule.id,
nextRunAt: workflowSchedule.nextRunAt,
})
.from(workflowSchedule)
.where(eq(workflowSchedule.id, scheduleId))
.limit(1)
if (existing) {
await db
.update(workflowSchedule)
.set({
workflowId,
blockId: block.id,
cronExpression: metadata.cronExpression,
nextRunAt: metadata.nextRunAt ?? existing.nextRunAt,
timezone: metadata.timezone,
updatedAt: now,
})
.where(eq(workflowSchedule.id, scheduleId))
return
}
await db.insert(workflowSchedule).values({
id: scheduleId,
workflowId,
blockId: block.id,
cronExpression: metadata.cronExpression,
nextRunAt: metadata.nextRunAt ?? null,
triggerType: 'schedule',
timezone: metadata.timezone,
status: 'active',
failedCount: 0,
createdAt: now,
updatedAt: now,
})
logger.info('Recreated missing schedule after workflow save', {
workflowId,
blockId: block.id,
scheduleId,
})
}
interface BlockResourceSyncConfig<T> {
resourceName: string
subBlockId: string
@@ -573,27 +441,3 @@ async function syncBlockResources<T>(
}
}
}
function stringifySubBlockValue(value: unknown): string {
if (value === undefined || value === null) {
return ''
}
if (typeof value === 'string') {
return value
}
if (typeof value === 'number' || typeof value === 'boolean') {
return String(value)
}
if (value instanceof Date) {
return value.toISOString()
}
try {
return JSON.stringify(value)
} catch {
return String(value)
}
}

View File

@@ -1,10 +1,10 @@
import { db, workflowDeploymentVersion } from '@sim/db'
import { db, workflow, workflowDeploymentVersion } from '@sim/db'
import { and, desc, eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { hasWorkflowChanged } from '@/lib/workflows/comparison'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { hasWorkflowChanged } from '@/lib/workflows/utils'
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
@@ -22,17 +22,12 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return createErrorResponse(validation.error.message, validation.error.status)
}
// Check if the workflow has meaningful changes that would require redeployment
let needsRedeployment = false
if (validation.workflow.isDeployed) {
// Get current state from normalized tables (same logic as deployment API)
// Load current state from normalized tables using centralized helper
const normalizedData = await loadWorkflowFromNormalizedTables(id)
if (!normalizedData) {
// Workflow exists but has no blocks in normalized tables (empty workflow or not migrated)
// This is valid state - return success with no redeployment needed
return createSuccessResponse({
isDeployed: validation.workflow.isDeployed,
deployedAt: validation.workflow.deployedAt,
@@ -41,11 +36,18 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
})
}
const [workflowRecord] = await db
.select({ variables: workflow.variables })
.from(workflow)
.where(eq(workflow.id, id))
.limit(1)
const currentState = {
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
variables: workflowRecord?.variables || {},
lastSaved: Date.now(),
}
@@ -69,6 +71,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return createSuccessResponse({
isDeployed: validation.workflow.isDeployed,
deployedAt: validation.workflow.deployedAt,
isPublished: validation.workflow.isPublished,
needsRedeployment,
})
} catch (error) {

View File

@@ -1,117 +0,0 @@
import { type NextRequest, NextResponse } from 'next/server'
import { simAgentClient } from '@/lib/copilot/client'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { getAllBlocks } from '@/blocks/registry'
import type { BlockConfig } from '@/blocks/types'
import { resolveOutputType } from '@/blocks/utils'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
const logger = createLogger('WorkflowYamlAPI')
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
logger.info(`[${requestId}] Converting workflow JSON to YAML`)
const body = await request.json()
const { workflowState, subBlockValues, includeMetadata = false } = body
if (!workflowState) {
return NextResponse.json(
{ success: false, error: 'workflowState is required' },
{ status: 400 }
)
}
// Ensure loop blocks have their data populated with defaults
if (workflowState.blocks) {
Object.entries(workflowState.blocks).forEach(([blockId, block]: [string, any]) => {
if (block.type === 'loop') {
// Ensure data field exists
if (!block.data) {
block.data = {}
}
// Apply defaults if not set
if (!block.data.loopType) {
block.data.loopType = 'for'
}
if (!block.data.count && block.data.count !== 0) {
block.data.count = 5
}
if (!block.data.collection) {
block.data.collection = ''
}
if (!block.data.maxConcurrency) {
block.data.maxConcurrency = 1
}
logger.debug(`[${requestId}] Applied defaults to loop block ${blockId}:`, {
loopType: block.data.loopType,
count: block.data.count,
})
}
})
}
// Gather block registry and utilities for sim-agent
const blocks = getAllBlocks()
const blockRegistry = blocks.reduce(
(acc, block) => {
const blockType = block.type
acc[blockType] = {
...block,
id: blockType,
subBlocks: block.subBlocks || [],
outputs: block.outputs || {},
} as any
return acc
},
{} as Record<string, BlockConfig>
)
// Call sim-agent directly
const result = await simAgentClient.makeRequest('/api/workflow/to-yaml', {
body: {
workflowState,
subBlockValues,
blockRegistry,
utilities: {
generateLoopBlocks: generateLoopBlocks.toString(),
generateParallelBlocks: generateParallelBlocks.toString(),
resolveOutputType: resolveOutputType.toString(),
},
},
})
if (!result.success || !result.data?.yaml) {
return NextResponse.json(
{
success: false,
error: result.error || 'Failed to generate YAML',
},
{ status: result.status || 500 }
)
}
logger.info(`[${requestId}] Successfully generated YAML`, {
yamlLength: result.data.yaml.length,
})
return NextResponse.json({
success: true,
yaml: result.data.yaml,
})
} catch (error) {
logger.error(`[${requestId}] YAML generation failed`, error)
return NextResponse.json(
{
success: false,
error: `Failed to generate YAML: ${error instanceof Error ? error.message : 'Unknown error'}`,
},
{ status: 500 }
)
}
}

View File

@@ -1,210 +0,0 @@
import { db } from '@sim/db'
import { workflow } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { simAgentClient } from '@/lib/copilot/client'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import { getAllBlocks } from '@/blocks/registry'
import type { BlockConfig } from '@/blocks/types'
import { resolveOutputType } from '@/blocks/utils'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
const logger = createLogger('WorkflowYamlExportAPI')
export async function GET(request: NextRequest) {
const requestId = generateRequestId()
const url = new URL(request.url)
const workflowId = url.searchParams.get('workflowId')
try {
logger.info(`[${requestId}] Exporting workflow YAML from database: ${workflowId}`)
if (!workflowId) {
return NextResponse.json({ success: false, error: 'workflowId is required' }, { status: 400 })
}
// Get the session for authentication
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
// Fetch the workflow from database
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
// Check if user has access to this workflow
let hasAccess = false
// Case 1: User owns the workflow
if (workflowData.userId === userId) {
hasAccess = true
}
// Case 2: Workflow belongs to a workspace the user has permissions for
if (!hasAccess && workflowData.workspaceId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission !== null) {
hasAccess = true
}
}
if (!hasAccess) {
logger.warn(`[${requestId}] User ${userId} denied access to workflow ${workflowId}`)
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Try to load from normalized tables first
logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`)
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
let workflowState: any
const subBlockValues: Record<string, Record<string, any>> = {}
if (normalizedData) {
logger.debug(`[${requestId}] Found normalized data for workflow ${workflowId}:`, {
blocksCount: Object.keys(normalizedData.blocks).length,
edgesCount: normalizedData.edges.length,
})
// Use normalized table data - construct state from normalized tables
workflowState = {
deploymentStatuses: {},
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
lastSaved: Date.now(),
isDeployed: workflowData.isDeployed || false,
deployedAt: workflowData.deployedAt,
}
// Extract subblock values from the normalized blocks
Object.entries(normalizedData.blocks).forEach(([blockId, block]: [string, any]) => {
subBlockValues[blockId] = {}
if (block.subBlocks) {
Object.entries(block.subBlocks).forEach(([subBlockId, subBlock]: [string, any]) => {
if (subBlock && typeof subBlock === 'object' && 'value' in subBlock) {
subBlockValues[blockId][subBlockId] = subBlock.value
}
})
}
})
logger.info(`[${requestId}] Loaded workflow ${workflowId} from normalized tables`)
} else {
return NextResponse.json(
{ success: false, error: 'Workflow has no normalized data' },
{ status: 400 }
)
}
// Ensure loop blocks have their data populated with defaults
if (workflowState.blocks) {
Object.entries(workflowState.blocks).forEach(([blockId, block]: [string, any]) => {
if (block.type === 'loop') {
// Ensure data field exists
if (!block.data) {
block.data = {}
}
// Apply defaults if not set
if (!block.data.loopType) {
block.data.loopType = 'for'
}
if (!block.data.count && block.data.count !== 0) {
block.data.count = 5
}
if (!block.data.collection) {
block.data.collection = ''
}
if (!block.data.maxConcurrency) {
block.data.maxConcurrency = 1
}
logger.debug(`[${requestId}] Applied defaults to loop block ${blockId}:`, {
loopType: block.data.loopType,
count: block.data.count,
})
}
})
}
// Gather block registry and utilities for sim-agent
const blocks = getAllBlocks()
const blockRegistry = blocks.reduce(
(acc, block) => {
const blockType = block.type
acc[blockType] = {
...block,
id: blockType,
subBlocks: block.subBlocks || [],
outputs: block.outputs || {},
} as any
return acc
},
{} as Record<string, BlockConfig>
)
// Call sim-agent directly
const result = await simAgentClient.makeRequest('/api/workflow/to-yaml', {
body: {
workflowState,
subBlockValues,
blockRegistry,
utilities: {
generateLoopBlocks: generateLoopBlocks.toString(),
generateParallelBlocks: generateParallelBlocks.toString(),
resolveOutputType: resolveOutputType.toString(),
},
},
})
if (!result.success || !result.data?.yaml) {
return NextResponse.json(
{
success: false,
error: result.error || 'Failed to generate YAML',
},
{ status: result.status || 500 }
)
}
logger.info(`[${requestId}] Successfully generated YAML from database`, {
yamlLength: result.data.yaml.length,
})
return NextResponse.json({
success: true,
yaml: result.data.yaml,
})
} catch (error) {
logger.error(`[${requestId}] YAML export failed`, error)
return NextResponse.json(
{
success: false,
error: `Failed to export YAML: ${error instanceof Error ? error.message : 'Unknown error'}`,
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,256 @@
import { db } from '@sim/db'
import { workspace, workspaceBYOKKeys } from '@sim/db/schema'
import { and, eq } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { decryptSecret, encryptSecret } from '@/lib/core/security/encryption'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('WorkspaceBYOKKeysAPI')
const VALID_PROVIDERS = ['openai', 'anthropic', 'google', 'mistral'] as const
const UpsertKeySchema = z.object({
providerId: z.enum(VALID_PROVIDERS),
apiKey: z.string().min(1, 'API key is required'),
})
const DeleteKeySchema = z.object({
providerId: z.enum(VALID_PROVIDERS),
})
function maskApiKey(key: string): string {
if (key.length <= 8) {
return '•'.repeat(8)
}
if (key.length <= 12) {
return `${key.slice(0, 4)}...${key.slice(-4)}`
}
return `${key.slice(0, 6)}...${key.slice(-4)}`
}
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
const workspaceId = (await params).id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized BYOK keys access attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const ws = await db.select().from(workspace).where(eq(workspace.id, workspaceId)).limit(1)
if (!ws.length) {
return NextResponse.json({ error: 'Workspace not found' }, { status: 404 })
}
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
if (!permission) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const byokKeys = await db
.select({
id: workspaceBYOKKeys.id,
providerId: workspaceBYOKKeys.providerId,
encryptedApiKey: workspaceBYOKKeys.encryptedApiKey,
createdBy: workspaceBYOKKeys.createdBy,
createdAt: workspaceBYOKKeys.createdAt,
updatedAt: workspaceBYOKKeys.updatedAt,
})
.from(workspaceBYOKKeys)
.where(eq(workspaceBYOKKeys.workspaceId, workspaceId))
.orderBy(workspaceBYOKKeys.providerId)
const formattedKeys = await Promise.all(
byokKeys.map(async (key) => {
try {
const { decrypted } = await decryptSecret(key.encryptedApiKey)
return {
id: key.id,
providerId: key.providerId,
maskedKey: maskApiKey(decrypted),
createdBy: key.createdBy,
createdAt: key.createdAt,
updatedAt: key.updatedAt,
}
} catch (error) {
logger.error(`[${requestId}] Failed to decrypt BYOK key for provider ${key.providerId}`, {
error,
})
return {
id: key.id,
providerId: key.providerId,
maskedKey: '••••••••',
createdBy: key.createdBy,
createdAt: key.createdAt,
updatedAt: key.updatedAt,
}
}
})
)
return NextResponse.json({ keys: formattedKeys })
} catch (error: unknown) {
logger.error(`[${requestId}] BYOK keys GET error`, error)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to load BYOK keys' },
{ status: 500 }
)
}
}
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
const workspaceId = (await params).id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized BYOK key creation attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
if (permission !== 'admin') {
return NextResponse.json(
{ error: 'Only workspace admins can manage BYOK keys' },
{ status: 403 }
)
}
const body = await request.json()
const { providerId, apiKey } = UpsertKeySchema.parse(body)
const { encrypted } = await encryptSecret(apiKey)
const existingKey = await db
.select()
.from(workspaceBYOKKeys)
.where(
and(
eq(workspaceBYOKKeys.workspaceId, workspaceId),
eq(workspaceBYOKKeys.providerId, providerId)
)
)
.limit(1)
if (existingKey.length > 0) {
await db
.update(workspaceBYOKKeys)
.set({
encryptedApiKey: encrypted,
updatedAt: new Date(),
})
.where(eq(workspaceBYOKKeys.id, existingKey[0].id))
logger.info(`[${requestId}] Updated BYOK key for ${providerId} in workspace ${workspaceId}`)
return NextResponse.json({
success: true,
key: {
id: existingKey[0].id,
providerId,
maskedKey: maskApiKey(apiKey),
updatedAt: new Date(),
},
})
}
const [newKey] = await db
.insert(workspaceBYOKKeys)
.values({
id: nanoid(),
workspaceId,
providerId,
encryptedApiKey: encrypted,
createdBy: userId,
createdAt: new Date(),
updatedAt: new Date(),
})
.returning({
id: workspaceBYOKKeys.id,
providerId: workspaceBYOKKeys.providerId,
createdAt: workspaceBYOKKeys.createdAt,
})
logger.info(`[${requestId}] Created BYOK key for ${providerId} in workspace ${workspaceId}`)
return NextResponse.json({
success: true,
key: {
...newKey,
maskedKey: maskApiKey(apiKey),
},
})
} catch (error: unknown) {
logger.error(`[${requestId}] BYOK key POST error`, error)
if (error instanceof z.ZodError) {
return NextResponse.json({ error: error.errors[0].message }, { status: 400 })
}
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to save BYOK key' },
{ status: 500 }
)
}
}
export async function DELETE(
request: NextRequest,
{ params }: { params: Promise<{ id: string }> }
) {
const requestId = generateRequestId()
const workspaceId = (await params).id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized BYOK key deletion attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
if (permission !== 'admin') {
return NextResponse.json(
{ error: 'Only workspace admins can manage BYOK keys' },
{ status: 403 }
)
}
const body = await request.json()
const { providerId } = DeleteKeySchema.parse(body)
const result = await db
.delete(workspaceBYOKKeys)
.where(
and(
eq(workspaceBYOKKeys.workspaceId, workspaceId),
eq(workspaceBYOKKeys.providerId, providerId)
)
)
logger.info(`[${requestId}] Deleted BYOK key for ${providerId} from workspace ${workspaceId}`)
return NextResponse.json({ success: true })
} catch (error: unknown) {
logger.error(`[${requestId}] BYOK key DELETE error`, error)
if (error instanceof z.ZodError) {
return NextResponse.json({ error: error.errors[0].message }, { status: 400 })
}
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to delete BYOK key' },
{ status: 500 }
)
}
}

View File

@@ -53,7 +53,7 @@ function buildTestPayload(subscription: typeof workspaceNotificationSubscription
totalDurationMs: 5000,
cost: {
total: 0.00123,
tokens: { prompt: 100, completion: 50, total: 150 },
tokens: { input: 100, output: 50, total: 150 },
},
},
links: {

View File

@@ -1,3 +1,4 @@
import { createSession, createWorkspaceRecord, loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
@@ -59,14 +60,7 @@ vi.mock('@/lib/workspaces/permissions/utils', () => ({
mockHasWorkspaceAdminAccess(userId, workspaceId),
}))
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn().mockReturnValue({
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
}),
}))
vi.mock('@/lib/logs/console/logger', () => loggerMock)
vi.mock('@/lib/core/utils/urls', () => ({
getBaseUrl: vi.fn().mockReturnValue('https://test.sim.ai'),
@@ -127,9 +121,14 @@ const mockUser = {
name: 'Test User',
}
const mockWorkspace = {
const mockWorkspaceData = createWorkspaceRecord({
id: 'workspace-456',
name: 'Test Workspace',
})
const mockWorkspace = {
id: mockWorkspaceData.id,
name: mockWorkspaceData.name,
}
const mockInvitation = {
@@ -140,7 +139,7 @@ const mockInvitation = {
status: 'pending',
token: 'token-abc123',
permissions: 'read',
expiresAt: new Date(Date.now() + 86400000), // 1 day from now
expiresAt: new Date(Date.now() + 86400000),
createdAt: new Date(),
updatedAt: new Date(),
}
@@ -154,7 +153,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
describe('GET /api/workspaces/invitations/[invitationId]', () => {
it('should return invitation details when called without token', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
dbSelectResults = [[mockInvitation], [mockWorkspace]]
const request = new NextRequest('http://localhost/api/workspaces/invitations/invitation-789')
@@ -202,15 +202,18 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should accept invitation when called with valid token', async () => {
mockGetSession.mockResolvedValue({
user: { ...mockUser, email: 'invited@example.com' },
const session = createSession({
userId: mockUser.id,
email: 'invited@example.com',
name: mockUser.name,
})
mockGetSession.mockResolvedValue(session)
dbSelectResults = [
[mockInvitation], // invitation lookup
[mockWorkspace], // workspace lookup
[{ ...mockUser, email: 'invited@example.com' }], // user lookup
[], // existing permission check (empty = no existing)
[mockInvitation],
[mockWorkspace],
[{ ...mockUser, email: 'invited@example.com' }],
[],
]
const request = new NextRequest(
@@ -225,13 +228,16 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should redirect to error page when invitation expired', async () => {
mockGetSession.mockResolvedValue({
user: { ...mockUser, email: 'invited@example.com' },
const session = createSession({
userId: mockUser.id,
email: 'invited@example.com',
name: mockUser.name,
})
mockGetSession.mockResolvedValue(session)
const expiredInvitation = {
...mockInvitation,
expiresAt: new Date(Date.now() - 86400000), // 1 day ago
expiresAt: new Date(Date.now() - 86400000),
}
dbSelectResults = [[expiredInvitation], [mockWorkspace]]
@@ -250,9 +256,12 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should redirect to error page when email mismatch', async () => {
mockGetSession.mockResolvedValue({
user: { ...mockUser, email: 'wrong@example.com' },
const session = createSession({
userId: mockUser.id,
email: 'wrong@example.com',
name: mockUser.name,
})
mockGetSession.mockResolvedValue(session)
dbSelectResults = [
[mockInvitation],
@@ -274,8 +283,9 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 404 when invitation not found', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
dbSelectResults = [[]] // Empty result
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
dbSelectResults = [[]]
const request = new NextRequest('http://localhost/api/workspaces/invitations/non-existent')
const params = Promise.resolve({ invitationId: 'non-existent' })
@@ -306,7 +316,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 404 when invitation does not exist', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
dbSelectResults = [[]]
const request = new NextRequest('http://localhost/api/workspaces/invitations/non-existent', {
@@ -322,7 +333,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 403 when user lacks admin access', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
mockHasWorkspaceAdminAccess.mockResolvedValue(false)
dbSelectResults = [[mockInvitation]]
@@ -341,7 +353,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 400 when trying to delete non-pending invitation', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
mockHasWorkspaceAdminAccess.mockResolvedValue(true)
const acceptedInvitation = { ...mockInvitation, status: 'accepted' }
@@ -361,7 +374,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should successfully delete pending invitation when user has admin access', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
mockHasWorkspaceAdminAccess.mockResolvedValue(true)
dbSelectResults = [[mockInvitation]]

View File

@@ -117,7 +117,7 @@ export default function ChatClient({ identifier }: { identifier: string }) {
const [error, setError] = useState<string | null>(null)
const messagesEndRef = useRef<HTMLDivElement>(null)
const messagesContainerRef = useRef<HTMLDivElement>(null)
const [starCount, setStarCount] = useState('19.4k')
const [starCount, setStarCount] = useState('24.4k')
const [conversationId, setConversationId] = useState('')
const [showScrollButton, setShowScrollButton] = useState(false)

Some files were not shown because too many files have changed in this diff Show More