Compare commits

...

17 Commits

Author SHA1 Message Date
waleed
e6c7bd3534 feat(kb): added tags information to kb docs table 2025-12-26 02:06:50 -08:00
Waleed
b7f6bab282 feat(tests): added testing package, overhauled tests (#2586)
* feat(tests): added testing package, overhauled tests

* fix build
2025-12-25 16:06:47 -08:00
Waleed
61e7213425 feat(i18n): update translations (#2585)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-25 13:33:14 -08:00
Waleed
3201abab56 improvement(schedules): use tanstack query to fetch schedule data, cleanup ui on schedule info component (#2584)
* improvement(schedules): use tanstack query to fetch schedule data, cleanup ui on schedule info component

* update trigger-save UI, increase auto disable to 100 consecutive from 10

* updated docs

* consolidate consts
2025-12-25 12:09:58 -08:00
Waleed
d79696beae feat(docs): added vector search (#2583)
* feat(docs): added vector search

* ack comments
2025-12-25 11:00:57 -08:00
Waleed
f604ca39a5 feat(chat-otp): added db fallback for chat otp (#2582)
* feat(chat-otp): added db fallback for chat otp

* ack PR comments
2025-12-25 09:37:20 -08:00
Waleed
26ec12599f improvement(byok): updated styling for byok page (#2581) 2025-12-25 08:36:55 -08:00
Waleed
97372533ec feat(i18n): update translations (#2578) 2025-12-24 23:37:35 -08:00
Vikhyath Mondreti
66766a9d81 improvement(byok): remove web search block exa (#2579)
* remove exa from byok

* improvement(byok): remove web search block exa

* fix autolayout

* fix type
2025-12-24 19:26:48 -08:00
Vikhyath Mondreti
47a259b428 feat(byok): byok for hosted model capabilities (#2574)
* feat(byok): byok for hosted model capabilities

* fix type

* add ignore lint

* accidentally added feature flags

* centralize byok fetch for LLM calls

* remove feature flags ts

* fix tests

* update docs
2025-12-24 18:20:54 -08:00
Waleed
40a6bf5c8c improvement(variables): update workflows to use deployed variables, not local ones to align with the rest of the canvas components (#2577)
* improvement(variables): update workflows to use deployed variables, not local ones to align with the rest of the canvas components

* update change detection to ignore trigger id since it is runtime metadata and not actually required to be redeployed
2025-12-24 17:40:23 -08:00
Waleed
da7eca9590 fix(change-detection): move change detection logic to client-side to prevent unnecessary API calls, consolidate utils (#2576)
* fix(change-detection): move change detection logic to client-side to prevent unnecessary API calls, consolidate utils

* added tests

* ack PR comments

* added isPublished to API response
2025-12-24 17:16:35 -08:00
Waleed
92b2e34d25 feat(autolayout): add fitToView on autolayout and reduce horizontal spacing between blocks (#2575)
* feat(autolayout): add fitToView on autolayout and reduce horizontal spacing between blocks

* remove additional yaml code
2025-12-24 16:19:29 -08:00
Vikhyath Mondreti
77521a3a57 fix(cancel-workflow-exec): move cancellation tracking for multi-task envs to redis (#2573)
* fix(cancel-workflow-exec): move cancellation tracking for multi-task envs to redis

* cleanup cancellation keys after execution
2025-12-24 11:51:09 -08:00
Waleed
cb8b9c547a fix(router): update router to handle azure creds the same way the agent block does (#2572)
* fix(router): update router to handle azure creds the same way the agent block does

* cleanup
2025-12-24 10:22:47 -08:00
Vikhyath Mondreti
b1cd8d151d fix(executor): workflow abort has to send abort signal to route for correct state update (#2571) 2025-12-24 02:50:58 -08:00
Waleed
1145f5c043 fix(shortcut): fixed global keyboard commands provider to follow latest ref pattern (#2569)
* fix(shortcut): fixed global commands provider to follow best practices

* cleanup

* ack PR comment
2025-12-24 00:25:15 -08:00
228 changed files with 36059 additions and 5303 deletions

View File

@@ -1,16 +1,126 @@
import { createFromSource } from 'fumadocs-core/search/server'
import { source } from '@/lib/source'
import { sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { db, docsEmbeddings } from '@/lib/db'
import { generateSearchEmbedding } from '@/lib/embeddings'
export const revalidate = 3600 // Revalidate every hour
export const runtime = 'nodejs'
export const revalidate = 0
export const { GET } = createFromSource(source, {
localeMap: {
en: { language: 'english' },
es: { language: 'spanish' },
fr: { language: 'french' },
de: { language: 'german' },
// ja and zh are not supported by the stemmer library, so we'll skip language config for them
ja: {},
zh: {},
},
})
/**
* Hybrid search API endpoint
* - English: Vector embeddings + keyword search
* - Other languages: Keyword search only
*/
export async function GET(request: NextRequest) {
try {
const searchParams = request.nextUrl.searchParams
const query = searchParams.get('query') || searchParams.get('q') || ''
const locale = searchParams.get('locale') || 'en'
const limit = Number.parseInt(searchParams.get('limit') || '10', 10)
if (!query || query.trim().length === 0) {
return NextResponse.json([])
}
const candidateLimit = limit * 3
const similarityThreshold = 0.6
const localeMap: Record<string, string> = {
en: 'english',
es: 'spanish',
fr: 'french',
de: 'german',
ja: 'simple', // PostgreSQL doesn't have Japanese support, use simple
zh: 'simple', // PostgreSQL doesn't have Chinese support, use simple
}
const tsConfig = localeMap[locale] || 'simple'
const useVectorSearch = locale === 'en'
let vectorResults: Array<{
chunkId: string
chunkText: string
sourceDocument: string
sourceLink: string
headerText: string
headerLevel: number
similarity: number
searchType: string
}> = []
if (useVectorSearch) {
const queryEmbedding = await generateSearchEmbedding(query)
vectorResults = await db
.select({
chunkId: docsEmbeddings.chunkId,
chunkText: docsEmbeddings.chunkText,
sourceDocument: docsEmbeddings.sourceDocument,
sourceLink: docsEmbeddings.sourceLink,
headerText: docsEmbeddings.headerText,
headerLevel: docsEmbeddings.headerLevel,
similarity: sql<number>`1 - (${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector)`,
searchType: sql<string>`'vector'`,
})
.from(docsEmbeddings)
.where(
sql`1 - (${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector) >= ${similarityThreshold}`
)
.orderBy(sql`${docsEmbeddings.embedding} <=> ${JSON.stringify(queryEmbedding)}::vector`)
.limit(candidateLimit)
}
const keywordResults = await db
.select({
chunkId: docsEmbeddings.chunkId,
chunkText: docsEmbeddings.chunkText,
sourceDocument: docsEmbeddings.sourceDocument,
sourceLink: docsEmbeddings.sourceLink,
headerText: docsEmbeddings.headerText,
headerLevel: docsEmbeddings.headerLevel,
similarity: sql<number>`ts_rank(${docsEmbeddings.chunkTextTsv}, plainto_tsquery(${tsConfig}, ${query}))`,
searchType: sql<string>`'keyword'`,
})
.from(docsEmbeddings)
.where(sql`${docsEmbeddings.chunkTextTsv} @@ plainto_tsquery(${tsConfig}, ${query})`)
.orderBy(
sql`ts_rank(${docsEmbeddings.chunkTextTsv}, plainto_tsquery(${tsConfig}, ${query})) DESC`
)
.limit(candidateLimit)
const seenIds = new Set<string>()
const mergedResults = []
for (let i = 0; i < Math.max(vectorResults.length, keywordResults.length); i++) {
if (i < vectorResults.length && !seenIds.has(vectorResults[i].chunkId)) {
mergedResults.push(vectorResults[i])
seenIds.add(vectorResults[i].chunkId)
}
if (i < keywordResults.length && !seenIds.has(keywordResults[i].chunkId)) {
mergedResults.push(keywordResults[i])
seenIds.add(keywordResults[i].chunkId)
}
}
const filteredResults = mergedResults.slice(0, limit)
const searchResults = filteredResults.map((result) => {
const title = result.headerText || result.sourceDocument.replace('.mdx', '')
const pathParts = result.sourceDocument
.replace('.mdx', '')
.split('/')
.map((part) => part.charAt(0).toUpperCase() + part.slice(1))
return {
id: result.chunkId,
type: 'page' as const,
url: result.sourceLink,
content: title,
breadcrumbs: pathParts,
}
})
return NextResponse.json(searchResults)
} catch (error) {
console.error('Semantic search error:', error)
return NextResponse.json([])
}
}

View File

@@ -105,28 +105,32 @@ Die Modellaufschlüsselung zeigt:
Die angezeigten Preise entsprechen den Tarifen vom 10. September 2025. Überprüfen Sie die Dokumentation der Anbieter für aktuelle Preise.
</Callout>
## Bring Your Own Key (BYOK)
Sie können Ihre eigenen API-Schlüssel für gehostete Modelle (OpenAI, Anthropic, Google, Mistral) unter **Einstellungen → BYOK** verwenden, um Basispreise zu zahlen. Schlüssel werden verschlüsselt und gelten arbeitsbereichsweit.
## Strategien zur Kostenoptimierung
- **Modellauswahl**: Wählen Sie Modelle basierend auf der Komplexität der Aufgabe. Einfache Aufgaben können GPT-4.1-nano verwenden, während komplexes Denken möglicherweise o1 oder Claude Opus erfordert.
- **Prompt-Engineering**: Gut strukturierte, präzise Prompts reduzieren den Token-Verbrauch ohne Qualitätseinbußen.
- **Modellauswahl**: Wählen Sie Modelle basierend auf der Aufgabenkomplexität. Einfache Aufgaben können GPT-4.1-nano verwenden, während komplexes Reasoning o1 oder Claude Opus erfordern könnte.
- **Prompt Engineering**: Gut strukturierte, prägnante Prompts reduzieren den Token-Verbrauch ohne Qualitätsverlust.
- **Lokale Modelle**: Verwenden Sie Ollama oder VLLM für unkritische Aufgaben, um API-Kosten vollständig zu eliminieren.
- **Caching und Wiederverwendung**: Speichern Sie häufig verwendete Ergebnisse in Variablen oder Dateien, um wiederholte KI-Modellaufrufe zu vermeiden.
- **Batch-Verarbeitung**: Verarbeiten Sie mehrere Elemente in einer einzigen KI-Anfrage anstatt einzelne Aufrufe zu tätigen.
- **Caching und Wiederverwendung**: Speichern Sie häufig verwendete Ergebnisse in Variablen oder Dateien, um wiederholte AI-Modellaufrufe zu vermeiden.
- **Batch-Verarbeitung**: Verarbeiten Sie mehrere Elemente in einer einzigen AI-Anfrage, anstatt einzelne Aufrufe zu tätigen.
## Nutzungsüberwachung
Überwachen Sie Ihre Nutzung und Abrechnung unter Einstellungen → Abonnement:
- **Aktuelle Nutzung**: Echtzeit-Nutzung und -Kosten für den aktuellen Zeitraum
- **Nutzungslimits**: Plangrenzen mit visuellen Fortschrittsanzeigen
- **Aktuelle Nutzung**: Echtzeit-Nutzung und Kosten für den aktuellen Zeitraum
- **Nutzungslimits**: Plan-Limits mit visuellen Fortschrittsindikatoren
- **Abrechnungsdetails**: Prognostizierte Gebühren und Mindestverpflichtungen
- **Planverwaltung**: Upgrade-Optionen und Abrechnungsverlauf
- **Plan-Verwaltung**: Upgrade-Optionen und Abrechnungsverlauf
### Programmatische Nutzungsverfolgung
### Programmatisches Nutzungs-Tracking
Sie können Ihre aktuelle Nutzung und Limits programmatisch über die API abfragen:
**Endpunkt:**
**Endpoint:**
```text
GET /api/users/me/usage-limits
@@ -172,69 +176,69 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
```
**Rate-Limit-Felder:**
- `requestsPerMinute`: Dauerhafte Rate-Begrenzung (Tokens werden mit dieser Rate aufgefüllt)
- `maxBurst`: Maximale Tokens, die Sie ansammeln können (Burst-Kapazität)
- `remaining`: Aktuell verfügbare Tokens (können bis zu `maxBurst` sein)
- `requestsPerMinute`: Dauerhaftes Rate-Limit (Tokens werden mit dieser Rate aufgefüllt)
- `maxBurst`: Maximale Tokens, die Sie akkumulieren können (Burst-Kapazität)
- `remaining`: Aktuell verfügbare Tokens (kann bis zu `maxBurst` betragen)
**Antwortfelder:**
- `currentPeriodCost` spiegelt die Nutzung in der aktuellen Abrechnungsperiode wider
- `limit` wird von individuellen Limits (Free/Pro) oder gepoolten Organisationslimits (Team/Enterprise) abgeleitet
- `plan` ist der aktive Plan mit der höchsten Priorität, der mit Ihrem Benutzer verknüpft ist
- `currentPeriodCost` spiegelt die Nutzung im aktuellen Abrechnungszeitraum wider
- `limit` wird aus individuellen Limits (Free/Pro) oder gepoolten Organisationslimits (Team/Enterprise) abgeleitet
- `plan` ist der Plan mit der höchsten Priorität, der Ihrem Benutzer zugeordnet ist
## Plan-Limits
Verschiedene Abonnementpläne haben unterschiedliche Nutzungslimits:
Verschiedene Abonnement-Pläne haben unterschiedliche Nutzungslimits:
| Plan | Monatliches Nutzungslimit | Ratenlimits (pro Minute) |
|------|-------------------|-------------------------|
| **Free** | 20 $ | 5 synchron, 10 asynchron |
| **Pro** | 100 $ | 10 synchron, 50 asynchron |
| **Team** | 500 $ (gepoolt) | 50 synchron, 100 asynchron |
| **Free** | 20 $ | 5 sync, 10 async |
| **Pro** | 100 $ | 10 sync, 50 async |
| **Team** | 500 $ (gemeinsam) | 50 sync, 100 async |
| **Enterprise** | Individuell | Individuell |
## Abrechnungsmodell
Sim verwendet ein **Basisabonnement + Mehrverbrauch**-Abrechnungsmodell:
Sim verwendet ein **Basis-Abonnement + Mehrverbrauch**-Abrechnungsmodell:
### Wie es funktioniert
### So funktioniert es
**Pro-Plan ($20/Monat):**
- Monatliches Abonnement beinhaltet $20 Nutzung
- Nutzung unter $20 → Keine zusätzlichen Kosten
- Nutzung über $20 → Zahlen Sie den Mehrverbrauch am Monatsende
- Beispiel: $35 Nutzung = $20 (Abonnement) + $15 (Mehrverbrauch)
**Pro-Plan (20 $/Monat):**
- Monatsabonnement beinhaltet 20 $ Nutzung
- Nutzung unter 20 $ → Keine zusätzlichen Gebühren
- Nutzung über 20 $ → Mehrverbrauch am Monatsende zahlen
- Beispiel: 35 $ Nutzung = 20 $ (Abonnement) + 15 $ (Mehrverbrauch)
**Team-Plan ($40/Benutzer/Monat):**
- Gepoolte Nutzung für alle Teammitglieder
- Mehrverbrauch wird aus der Gesamtnutzung des Teams berechnet
**Team-Plan (40 $/Platz/Monat):**
- Gemeinsame Nutzung über alle Teammitglieder
- Mehrverbrauch wird aus der gesamten Team-Nutzung berechnet
- Organisationsinhaber erhält eine Rechnung
**Enterprise-Pläne:**
- Fester monatlicher Preis, kein Mehrverbrauch
- Fester Monatspreis, kein Mehrverbrauch
- Individuelle Nutzungslimits gemäß Vereinbarung
### Schwellenwert-Abrechnung
Wenn der nicht abgerechnete Mehrverbrauch $50 erreicht, berechnet Sim automatisch den gesamten nicht abgerechneten Betrag.
Wenn der nicht abgerechnete Mehrverbrauch 50 $ erreicht, rechnet Sim automatisch den gesamten nicht abgerechneten Betrag ab.
**Beispiel:**
- Tag 10: $70 Mehrverbrauch → Sofortige Abrechnung von $70
- Tag 15: Zusätzliche $35 Nutzung ($105 insgesamt) → Bereits abgerechnet, keine Aktion
- Tag 20: Weitere $50 Nutzung ($155 insgesamt, $85 nicht abgerechnet) → Sofortige Abrechnung von $85
- Tag 10: 70 $ Mehrverbrauch → 70 $ sofort abrechnen
- Tag 15: Zusätzliche 35 $ Nutzung (105 $ gesamt) → Bereits abgerechnet, keine Aktion
- Tag 20: Weitere 50 $ Nutzung (155 $ gesamt, 85 $ nicht abgerechnet) → 85 $ sofort abrechnen
Dies verteilt große Überziehungsgebühren über den Monat, anstatt eine große Rechnung am Ende des Abrechnungszeitraums zu erhalten.
Dies verteilt große Mehrverbrauchsgebühren über den Monat, anstatt einer großen Rechnung am Periodenende.
## Best Practices für Kostenmanagement
1. **Regelmäßig überwachen**: Überprüfen Sie Ihr Nutzungs-Dashboard häufig, um Überraschungen zu vermeiden
2. **Budgets festlegen**: Nutzen Sie Planlimits als Leitplanken für Ihre Ausgaben
2. **Budgets festlegen**: Nutzen Sie Plan-Limits als Leitplanken für Ihre Ausgaben
3. **Workflows optimieren**: Überprüfen Sie kostenintensive Ausführungen und optimieren Sie Prompts oder Modellauswahl
4. **Passende Modelle verwenden**: Passen Sie die Modellkomplexität an die Aufgabenanforderungen an
5. **Ähnliche Aufgaben bündeln**: Kombinieren Sie wenn möglich mehrere Anfragen, um den Overhead zu reduzieren
5. **Ähnliche Aufgaben bündeln**: Kombinieren Sie mehrere Anfragen, wenn möglich, um Overhead zu reduzieren
## Nächste Schritte
- Überprüfen Sie Ihre aktuelle Nutzung unter [Einstellungen → Abonnement](https://sim.ai/settings/subscription)
- Erfahren Sie mehr über [Protokollierung](/execution/logging), um Ausführungsdetails zu verfolgen
- Erkunden Sie die [Externe API](/execution/api) für programmatische Kostenüberwachung
- Entdecken Sie die [externe API](/execution/api) für programmatische Kostenüberwachung
- Sehen Sie sich [Workflow-Optimierungstechniken](/blocks) an, um Kosten zu reduzieren

View File

@@ -56,7 +56,7 @@ Sie müssen Ihren Workflow bereitstellen, damit der Zeitplan mit der Ausführung
## Automatische Deaktivierung
Zeitpläne werden nach **10 aufeinanderfolgenden Fehlschlägen** automatisch deaktiviert, um unkontrollierte Fehler zu verhindern. Bei Deaktivierung:
Zeitpläne werden nach **100 aufeinanderfolgenden Fehlern** automatisch deaktiviert, um unkontrollierte Fehler zu verhindern. Bei Deaktivierung:
- Erscheint ein Warnhinweis auf dem Zeitplan-Block
- Die Ausführung des Zeitplans wird gestoppt

View File

@@ -104,6 +104,10 @@ The model breakdown shows:
Pricing shown reflects rates as of September 10, 2025. Check provider documentation for current pricing.
</Callout>
## Bring Your Own Key (BYOK)
You can use your own API keys for hosted models (OpenAI, Anthropic, Google, Mistral) in **Settings → BYOK** to pay base prices. Keys are encrypted and apply workspace-wide.
## Cost Optimization Strategies
- **Model Selection**: Choose models based on task complexity. Simple tasks can use GPT-4.1-nano while complex reasoning might need o1 or Claude Opus.

View File

@@ -56,7 +56,7 @@ You must deploy your workflow for the schedule to start running. Configure the s
## Automatic Disabling
Schedules automatically disable after **10 consecutive failures** to prevent runaway errors. When disabled:
Schedules automatically disable after **100 consecutive failures** to prevent runaway errors. When disabled:
- A warning badge appears on the schedule block
- The schedule stops executing

View File

@@ -105,26 +105,30 @@ El desglose del modelo muestra:
Los precios mostrados reflejan las tarifas a partir del 10 de septiembre de 2025. Consulta la documentación del proveedor para conocer los precios actuales.
</Callout>
## Trae tu propia clave (BYOK)
Puedes usar tus propias claves API para modelos alojados (OpenAI, Anthropic, Google, Mistral) en **Configuración → BYOK** para pagar precios base. Las claves están encriptadas y se aplican a todo el espacio de trabajo.
## Estrategias de optimización de costos
- **Selección de modelos**: Elige modelos según la complejidad de la tarea. Las tareas simples pueden usar GPT-4.1-nano mientras que el razonamiento complejo podría necesitar o1 o Claude Opus.
- **Ingeniería de prompts**: Los prompts bien estructurados y concisos reducen el uso de tokens sin sacrificar la calidad.
- **Modelos locales**: Usa Ollama o VLLM para tareas no críticas para eliminar por completo los costos de API.
- **Almacenamiento en caché y reutilización**: Guarda resultados frecuentemente utilizados en variables o archivos para evitar llamadas repetidas al modelo de IA.
- **Procesamiento por lotes**: Procesa múltiples elementos en una sola solicitud de IA en lugar de hacer llamadas individuales.
- **Selección de modelo**: elige modelos según la complejidad de la tarea. Las tareas simples pueden usar GPT-4.1-nano mientras que el razonamiento complejo podría necesitar o1 o Claude Opus.
- **Ingeniería de prompts**: los prompts bien estructurados y concisos reducen el uso de tokens sin sacrificar calidad.
- **Modelos locales**: usa Ollama o VLLM para tareas no críticas para eliminar completamente los costos de API.
- **Almacenamiento en caché y reutilización**: guarda resultados usados frecuentemente en variables o archivos para evitar llamadas repetidas al modelo de IA.
- **Procesamiento por lotes**: procesa múltiples elementos en una sola solicitud de IA en lugar de hacer llamadas individuales.
## Monitoreo de uso
Monitorea tu uso y facturación en Configuración → Suscripción:
- **Uso actual**: Uso y costos en tiempo real para el período actual
- **Límites de uso**: Límites del plan con indicadores visuales de progreso
- **Detalles de facturación**: Cargos proyectados y compromisos mínimos
- **Gestión del plan**: Opciones de actualización e historial de facturación
- **Uso actual**: uso y costos en tiempo real para el período actual
- **Límites de uso**: límites del plan con indicadores visuales de progreso
- **Detalles de facturación**: cargos proyectados y compromisos mínimos
- **Gestión de plan**: opciones de actualización e historial de facturación
### Seguimiento programático de uso
### Seguimiento de uso programático
Puedes consultar tu uso actual y límites de forma programática utilizando la API:
Puedes consultar tu uso y límites actuales de forma programática usando la API:
**Endpoint:**
@@ -135,13 +139,13 @@ GET /api/users/me/usage-limits
**Autenticación:**
- Incluye tu clave API en el encabezado `X-API-Key`
**Ejemplo de solicitud:**
**Solicitud de ejemplo:**
```bash
curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" https://sim.ai/api/users/me/usage-limits
```
**Ejemplo de respuesta:**
**Respuesta de ejemplo:**
```json
{
@@ -172,14 +176,14 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
```
**Campos de límite de tasa:**
- `requestsPerMinute`: Límite de tasa sostenida (los tokens se recargan a esta velocidad)
- `maxBurst`: Máximo de tokens que puedes acumular (capacidad de ráfaga)
- `remaining`: Tokens disponibles actualmente (puede ser hasta `maxBurst`)
- `requestsPerMinute`: límite de tasa sostenida (los tokens se recargan a esta tasa)
- `maxBurst`: tokens máximos que puedes acumular (capacidad de ráfaga)
- `remaining`: tokens actuales disponibles (puede ser hasta `maxBurst`)
**Campos de respuesta:**
- `currentPeriodCost` refleja el uso en el período de facturación actual
- `limit` se deriva de límites individuales (Gratuito/Pro) o límites agrupados de la organización (Equipo/Empresa)
- `plan` es el plan activo de mayor prioridad asociado a tu usuario
- `limit` se deriva de límites individuales (Free/Pro) o límites de organización agrupados (Team/Enterprise)
- `plan` es el plan activo de mayor prioridad asociado con tu usuario
## Límites del plan
@@ -187,10 +191,10 @@ Los diferentes planes de suscripción tienen diferentes límites de uso:
| Plan | Límite de uso mensual | Límites de tasa (por minuto) |
|------|-------------------|-------------------------|
| **Gratis** | $20 | 5 síncronas, 10 asíncronas |
| **Pro** | $100 | 10 síncronas, 50 asíncronas |
| **Equipo** | $500 (compartido) | 50 síncronas, 100 asíncronas |
| **Empresarial** | Personalizado | Personalizado |
| **Gratuito** | $20 | 5 sync, 10 async |
| **Pro** | $100 | 10 sync, 50 async |
| **Equipo** | $500 (compartido) | 50 sync, 100 async |
| **Empresa** | Personalizado | Personalizado |
## Modelo de facturación
@@ -200,16 +204,16 @@ Sim utiliza un modelo de facturación de **suscripción base + excedente**:
**Plan Pro ($20/mes):**
- La suscripción mensual incluye $20 de uso
- Uso por debajo de $20 → Sin cargos adicionales
- Uso por encima de $20 → Pagas el excedente al final del mes
- Uso inferior a $20 → Sin cargos adicionales
- Uso superior a $20 → Paga el excedente al final del mes
- Ejemplo: $35 de uso = $20 (suscripción) + $15 (excedente)
**Plan de Equipo ($40/usuario/mes):**
- Uso agrupado entre todos los miembros del equipo
- Excedente calculado del uso total del equipo
**Plan Equipo ($40/usuario/mes):**
- Uso compartido entre todos los miembros del equipo
- El excedente se calcula a partir del uso total del equipo
- El propietario de la organización recibe una sola factura
**Planes Empresariales:**
**Planes Empresa:**
- Precio mensual fijo, sin excedentes
- Límites de uso personalizados según el acuerdo
@@ -218,23 +222,23 @@ Sim utiliza un modelo de facturación de **suscripción base + excedente**:
Cuando el excedente no facturado alcanza los $50, Sim factura automáticamente el monto total no facturado.
**Ejemplo:**
- Día 10: $70 de excedente → Factura inmediata de $70
- Día 15: $35 adicionales de uso ($105 en total) → Ya facturado, sin acción
- Día 20: Otros $50 de uso ($155 en total, $85 no facturados) → Factura inmediata de $85
- Día 10: $70 de excedente → Factura $70 inmediatamente
- Día 15: $35 adicionales de uso ($105 total) → Ya facturado, sin acción
- Día 20: Otros $50 de uso ($155 total, $85 sin facturar) → Factura $85 inmediatamente
Esto distribuye los cargos por exceso a lo largo del mes en lugar de una gran factura al final del período.
Esto distribuye los cargos por excedentes grandes a lo largo del mes en lugar de una sola factura grande al final del período.
## Mejores prácticas para la gestión de costos
## Mejores prácticas de gestión de costos
1. **Monitorear regularmente**: Revisa tu panel de uso con frecuencia para evitar sorpresas
2. **Establecer presupuestos**: Utiliza los límites del plan como guías para tu gasto
3. **Optimizar flujos de trabajo**: Revisa las ejecuciones de alto costo y optimiza los prompts o la selección de modelos
4. **Usar modelos apropiados**: Ajusta la complejidad del modelo a los requisitos de la tarea
5. **Agrupar tareas similares**: Combina múltiples solicitudes cuando sea posible para reducir la sobrecarga
1. **Monitorea regularmente**: Revisa tu panel de uso con frecuencia para evitar sorpresas
2. **Establece presupuestos**: Usa los límites del plan como barreras de protección para tu gasto
3. **Optimiza flujos de trabajo**: Revisa las ejecuciones de alto costo y optimiza los prompts o la selección de modelos
4. **Usa modelos apropiados**: Ajusta la complejidad del modelo a los requisitos de la tarea
5. **Agrupa tareas similares**: Combina múltiples solicitudes cuando sea posible para reducir la sobrecarga
## Próximos pasos
- Revisa tu uso actual en [Configuración → Suscripción](https://sim.ai/settings/subscription)
- Aprende sobre [Registro](/execution/logging) para seguir los detalles de ejecución
- Explora la [API externa](/execution/api) para el monitoreo programático de costos
- Consulta las [técnicas de optimización de flujo de trabajo](/blocks) para reducir costos
- Aprende sobre [Registro](/execution/logging) para rastrear detalles de ejecución
- Explora la [API externa](/execution/api) para monitoreo programático de costos
- Consulta las [técnicas de optimización de flujos de trabajo](/blocks) para reducir costos

View File

@@ -56,7 +56,7 @@ Debes desplegar tu flujo de trabajo para que la programación comience a ejecuta
## Desactivación automática
Las programaciones se desactivan automáticamente después de **10 fallos consecutivos** para evitar errores descontrolados. Cuando se desactiva:
Las programaciones se desactivan automáticamente después de **100 fallos consecutivos** para evitar errores descontrolados. Cuando están desactivadas:
- Aparece una insignia de advertencia en el bloque de programación
- La programación deja de ejecutarse

View File

@@ -105,26 +105,30 @@ La répartition des modèles montre :
Les prix indiqués reflètent les tarifs en date du 10 septembre 2025. Consultez la documentation des fournisseurs pour les tarifs actuels.
</Callout>
## Apportez votre propre clé (BYOK)
Vous pouvez utiliser vos propres clés API pour les modèles hébergés (OpenAI, Anthropic, Google, Mistral) dans **Paramètres → BYOK** pour payer les prix de base. Les clés sont chiffrées et s'appliquent à l'ensemble de l'espace de travail.
## Stratégies d'optimisation des coûts
- **Sélection du modèle** : choisissez les modèles en fonction de la complexité de la tâche. Les tâches simples peuvent utiliser GPT-4.1-nano tandis que le raisonnement complexe pourrait nécessiter o1 ou Claude Opus.
- **Ingénierie de prompt** : des prompts bien structurés et concis réduisent l'utilisation de tokens sans sacrifier la qualité.
- **Sélection du modèle** : choisissez les modèles en fonction de la complexité de la tâche. Les tâches simples peuvent utiliser GPT-4.1-nano tandis que le raisonnement complexe peut nécessiter o1 ou Claude Opus.
- **Ingénierie des prompts** : des prompts bien structurés et concis réduisent l'utilisation de jetons sans sacrifier la qualité.
- **Modèles locaux** : utilisez Ollama ou VLLM pour les tâches non critiques afin d'éliminer complètement les coûts d'API.
- **Mise en cache et réutilisation** : stockez les résultats fréquemment utilisés dans des variables ou des fichiers pour éviter des appels répétés aux modèles d'IA.
- **Traitement par lots** : traitez plusieurs éléments dans une seule requête d'IA plutôt que de faire des appels individuels.
- **Mise en cache et réutilisation** : stockez les résultats fréquemment utilisés dans des variables ou des fichiers pour éviter les appels répétés aux modèles d'IA.
- **Traitement par lots** : traitez plusieurs éléments dans une seule requête d'IA plutôt que d'effectuer des appels individuels.
## Suivi de l'utilisation
## Surveillance de l'utilisation
Surveillez votre utilisation et votre facturation dans Paramètres → Abonnement :
- **Utilisation actuelle** : utilisation et coûts en temps réel pour la période en cours
- **Limites d'utilisation** : limites du forfait avec indicateurs visuels de progression
- **Détails de facturation** : frais prévisionnels et engagements minimums
- **Limites d'utilisation** : limites du forfait avec indicateurs de progression visuels
- **Détails de facturation** : frais projetés et engagements minimums
- **Gestion du forfait** : options de mise à niveau et historique de facturation
### Suivi d'utilisation programmatique
### Suivi programmatique de l'utilisation
Vous pouvez interroger votre utilisation actuelle et vos limites par programmation en utilisant l'API :
Vous pouvez interroger votre utilisation et vos limites actuelles de manière programmatique à l'aide de l'API :
**Point de terminaison :**
@@ -172,14 +176,14 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
```
**Champs de limite de débit :**
- `requestsPerMinute` : limite de débit soutenu (les jetons se rechargent à ce rythme)
- `requestsPerMinute` : limite de débit soutenue (les jetons se rechargent à ce rythme)
- `maxBurst` : nombre maximum de jetons que vous pouvez accumuler (capacité de rafale)
- `remaining` : jetons actuellement disponibles (peut aller jusqu'à `maxBurst`)
**Champs de réponse :**
- `currentPeriodCost` reflète l'utilisation dans la période de facturation actuelle
- `limit` est dérivé des limites individuelles (Gratuit/Pro) ou des limites mutualisées de l'organisation (Équipe/Entreprise)
- `plan` est le plan actif de plus haute priorité associé à votre utilisateur
- `limit` est dérivé des limites individuelles (Free/Pro) ou des limites d'organisation mutualisées (Team/Enterprise)
- `plan` est le forfait actif de priorité la plus élevée associé à votre utilisateur
## Limites des forfaits
@@ -196,21 +200,21 @@ Les différents forfaits d'abonnement ont des limites d'utilisation différentes
Sim utilise un modèle de facturation **abonnement de base + dépassement** :
### Comment ça fonctionne
### Fonctionnement
**Forfait Pro (20 $/mois) :**
- L'abonnement mensuel inclut 20 $ d'utilisation
- Utilisation inférieure à 20 $ → Pas de frais supplémentaires
- Utilisation inférieure à 20 $ → Aucun frais supplémentaire
- Utilisation supérieure à 20 $ → Paiement du dépassement en fin de mois
- Exemple : 35 $ d'utilisation = 20 $ (abonnement) + 15 $ (dépassement)
**Forfait Équipe (40 $/siège/mois) :**
- Utilisation mutualisée pour tous les membres de l'équipe
- Dépassement calculé à partir de l'utilisation totale de l'équipe
**Forfait Équipe (40 $/utilisateur/mois) :**
- Utilisation mutualisée entre tous les membres de l'équipe
- Dépassement calculé sur l'utilisation totale de l'équipe
- Le propriétaire de l'organisation reçoit une seule facture
**Forfaits Entreprise :**
- Prix mensuel fixe, pas de dépassements
- Prix mensuel fixe, sans dépassement
- Limites d'utilisation personnalisées selon l'accord
### Facturation par seuil
@@ -220,21 +224,21 @@ Lorsque le dépassement non facturé atteint 50 $, Sim facture automatiquement l
**Exemple :**
- Jour 10 : 70 $ de dépassement → Facturation immédiate de 70 $
- Jour 15 : 35 $ d'utilisation supplémentaire (105 $ au total) → Déjà facturé, aucune action
- Jour 20 : 50 $ d'utilisation supplémentaire (155 $ au total, 85 $ non facturés) → Facturation immédiate de 85 $
- Jour 20 : 50 $ d'utilisation supplémentaire (155 $ au total, 85 $ non facturé) → Facturation immédiate de 85 $
Cela répartit les frais de dépassement importants tout au long du mois au lieu d'une seule facture importante en fin de période.
## Meilleures pratiques de gestion des coûts
## Bonnes pratiques de gestion des coûts
1. **Surveillez régulièrement** : vérifiez fréquemment votre tableau de bord d'utilisation pour éviter les surprises
2. **Définissez des budgets** : utilisez les limites du plan comme garde-fous pour vos dépenses
3. **Optimisez les flux de travail** : examinez les exécutions à coût élevé et optimisez les prompts ou la sélection de modèles
4. **Utilisez des modèles appropriés** : adaptez la complexité du modèle aux exigences de la tâche
5. **Regroupez les tâches similaires** : combinez plusieurs requêtes lorsque c'est possible pour réduire les frais généraux
1. **Surveillez régulièrement** : Consultez fréquemment votre tableau de bord d'utilisation pour éviter les surprises
2. **Définissez des budgets** : Utilisez les limites des forfaits comme garde-fous pour vos dépenses
3. **Optimisez les flux de travail** : Examinez les exécutions coûteuses et optimisez les prompts ou la sélection de modèles
4. **Utilisez les modèles appropriés** : Adaptez la complexité du modèle aux exigences de la tâche
5. **Regroupez les tâches similaires** : Combinez plusieurs requêtes lorsque c'est possible pour réduire les frais généraux
## Prochaines étapes
- Examinez votre utilisation actuelle dans [Paramètres → Abonnement](https://sim.ai/settings/subscription)
- Apprenez-en plus sur la [Journalisation](/execution/logging) pour suivre les détails d'exécution
- Consultez votre utilisation actuelle dans [Paramètres → Abonnement](https://sim.ai/settings/subscription)
- Découvrez la [journalisation](/execution/logging) pour suivre les détails d'exécution
- Explorez l'[API externe](/execution/api) pour la surveillance programmatique des coûts
- Consultez les [techniques d'optimisation de flux de travail](/blocks) pour réduire les coûts
- Consultez les [techniques d'optimisation des workflows](/blocks) pour réduire les coûts

View File

@@ -56,7 +56,7 @@ Vous devez déployer votre workflow pour que la planification commence à s'exé
## Désactivation automatique
Les planifications se désactivent automatiquement après **10 échecs consécutifs** pour éviter les erreurs incontrôlées. Lorsqu'elle est désactivée :
Les planifications se désactivent automatiquement après **100 échecs consécutifs** pour éviter les erreurs en cascade. Lorsqu'elles sont désactivées :
- Un badge d'avertissement apparaît sur le bloc de planification
- La planification cesse de s'exécuter

View File

@@ -105,43 +105,47 @@ AIブロックを使用するワークフローでは、ログで詳細なコス
表示価格は2025年9月10日時点のレートを反映しています。最新の価格については各プロバイダーのドキュメントをご確認ください。
</Callout>
## Bring Your Own Key (BYOK)
ホストされたモデルOpenAI、Anthropic、Google、Mistralに対して、**設定 → BYOK**で独自のAPIキーを使用し、基本価格で支払うことができます。キーは暗号化され、ワークスペース全体に適用されます。
## コスト最適化戦略
- **モデル選択**: タスクの複雑さに基づいてモデルを選択してください。単純なタスクにはGPT-4.1-nanoを使用し、複雑な推論にはo1やClaude Opusが必要場合があります。
- **プロンプトエンジニアリング**: 構造化された簡潔なプロンプトは、品質を犠牲にすることなくトークン使用量を削減します。
- **ローカルモデル**: 重要度の低いタスクにはOllamaやVLLMを使用して、API費用を完全に排除します。
- **キャッシュと再利用**: 頻繁に使用される結果を変数やファイルに保存して、AIモデル呼び出しの繰り返しを避けます。
- **モデル選択**: タスクの複雑さに基づいてモデルを選択します。シンプルなタスクにはGPT-4.1-nanoを使用し、複雑な推論にはo1やClaude Opusが必要になる場合があります。
- **プロンプトエンジニアリング**: 適切に構造化された簡潔なプロンプトは、品質を犠牲にすることなくトークン使用量を削減します。
- **ローカルモデル**: 重要度の低いタスクにはOllamaやVLLMを使用して、APIコストを完全に排除します。
- **キャッシュと再利用**: 頻繁に使用される結果を変数やファイルに保存して、AIモデルの繰り返し呼び出しを回避します。
- **バッチ処理**: 個別の呼び出しを行うのではなく、単一のAIリクエストで複数のアイテムを処理します。
## 使用状況モニタリング
## 使用状況の監視
設定 → サブスクリプションで使用状況と請求を監視できます
設定 → サブスクリプションで使用状況と請求を監視ます:
- **現在の使用状況**: 現在の期間のリアルタイムの使用状況とコスト
- **使用制限**: 視覚的な進捗指標付きのプラン制限
- **請求詳細**: 予測される料金と最低利用額
- **使用制限**: 視覚的な進行状況インジケーター付きのプラン制限
- **請求詳細**: 予測される料金と最低コミットメント
- **プラン管理**: アップグレードオプションと請求履歴
### プログラムによる使用状況の追跡
APIを使用して、現在の使用状況と制限をプログラムで照会できます
APIを使用して、現在の使用状況と制限をプログラムでクエリできます:
**エンドポイント**
**エンドポイント:**
```text
GET /api/users/me/usage-limits
```
**認証**
- APIキーを `X-API-Key` ヘッダーに含めてください
**認証:**
- `X-API-Key`ヘッダーにAPIキーを含めます
**リクエスト例**
**リクエスト例:**
```bash
curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" https://sim.ai/api/users/me/usage-limits
```
**レスポンス例**
**レスポンス例:**
```json
{
@@ -171,70 +175,70 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
}
```
**レート制限フィールド**
- `requestsPerMinute`持続的なレート制限(トークンはこの速度で補充されます)
- `maxBurst`蓄積できる最大トークン数(バースト容量)
- `remaining`現在利用可能なトークン(最大`maxBurst`まで)
**レート制限フィールド:**
- `requestsPerMinute`: 持続的なレート制限(トークンはこのレートで補充されます)
- `maxBurst`: 蓄積できる最大トークン数(バースト容量)
- `remaining`: 現在利用可能なトークン(最大`maxBurst`まで)
**レスポンスフィールド**
**レスポンスフィールド:**
- `currentPeriodCost`は現在の請求期間の使用状況を反映します
- `limit`は個別の制限(無料/プロ)または組織のプール制限(チーム/エンタープライズ)から派生します
- `plan`はユーザーに関連付けられた最優先アクティブなプランです
- `limit`は個別の制限(Free/Proまたはプールされた組織の制限Team/Enterpriseから導出されます
- `plan`はユーザーに関連付けられた最優先度の高いアクティブなプランです
## プラン制限
## プラン制限
サブスクリプションプランによって使用制限が異なります
サブスクリプションプランによって使用量の制限が異なります
| プラン | 月間使用制限 | レート制限(毎分 |
| プラン | 月間使用制限 | レート制限(1分あたり |
|------|-------------------|-------------------------|
| **Free** | $20 | 同期5、非同期10 |
| **Pro** | $100 | 同期10、非同期50 |
| **Team** | $500プール | 同期50、非同期100 |
| **Enterprise** | カスタム | カスタム |
| **無料** | $20 | 同期5、非同期10 |
| **プロ** | $100 | 同期10、非同期50 |
| **チーム** | $500プール | 同期50、非同期100 |
| **エンタープライズ** | カスタム | カスタム |
## 課金モデル
Simは**基本サブスクリプション+超過分**の課金モデルを使用しています
Simは**基本サブスクリプション + 超過料金**の課金モデルを用しています
### 仕組み
**プロプラン(月額$20**
- 月額サブスクリプションには$20分の使用量が含まれます
- 使用量が$20未満 → 追加料金なし
- 使用量が$20を超える → 月末に超過分を支払い
- 例:$35の使用量 = $20サブスクリプション+ $15超過
- 使用量が$20超過 → 月末に超過分を支払い
- 例:使用量$35 = $20サブスクリプション+ $15超過料金
**チームプラン(席あたり月額$40**
- チームメンバー全体でプールされた使用量
- チーム全体の使用量から超過を計算
- 組織のオーナーが一括で請求を受け
**チームプラン(1席あたり月額$40**
- チームメンバー全員で使用量をプール
- チーム全体の使用量から超過料金を計算
- 組織のオーナーが1つの請求を受け取ります
**エンタープライズプラン:**
- 固定月額料金、超過料金なし
- 契約に基づくカスタム使用制限
- 契約に基づくカスタム使用制限
### しきい値課金
未請求の超過が$50に達すると、Simは自動的に未請求の全額を請求します。
未請求の超過料金が$50に達すると、Simは未請求金額の全額を自動的に請求します。
**例:**
- 10日目$70の超過分 → 即に$70を請求
- 15日目追加$35の使用(合計$105 → すでに請求済み、アクションなし
- 20日目さらに$50の使用合計$155、未請求$85 → 即に$85を請求
- 10日目超過料金$70 → 即に$70を請求
- 15日目追加使用量$35(合計$105 → すでに請求済み、アクションなし
- 20日目さらに$50の使用(合計$155、未請求$85 → 即に$85を請求
これにより、期間終了時に一度に大きな請求が発生するのではなく、月全体に大きな超過料金が分散されます。
これにより、期間終了時の1回の大きな請求ではなく、大きな超過料金を月全体に分散させることができます。
## コスト管理のベストプラクティス
1. **定期的な監視**: 予期せぬ事態を避けるため、使用状況ダッシュボードを頻繁に確認する
2. **予算の設定**: プランの制限を支出のガードレールとして使用する
3. **ワークフローの最適化**: コストの高い実行を見直し、プロンプトやモデル選択を最適化する
4. **適切なモデルの使用**: タスクの要件にモデルの複雑さを合わせる
5. **類似タスクのバッチ処理**: 可能な場合は複数のリクエストを組み合わせてオーバーヘッドを削減する
1. **定期的な監視**:予期しない事態を避けるため、使用状況ダッシュボードを頻繁に確認してください
2. **予算の設定**プランの制限を支出のガードレールとして使用してください
3. **ワークフローの最適化**コストの高い実行を確認し、プロンプトやモデル選択を最適化してください
4. **適切なモデルの使用**タスクの要件に合わせてモデルの複雑さを選択してください
5. **類似タスクのバッチ処理**可能な限り複数のリクエストを組み合わせてオーバーヘッドを削減してください
## 次のステップ
- [設定 → サブスクリプション](https://sim.ai/settings/subscription)で現在の使用状況を確認する
- 実行詳細を追跡するための[ロギング](/execution/logging)について学ぶ
- 実行詳細を追跡するための[ログ記録](/execution/logging)について学ぶ
- プログラムによるコスト監視のための[外部API](/execution/api)を探索する
- コスト削減ための[ワークフロー最適化テクニック](/blocks)をチェックする
- コスト削減するための[ワークフロー最適化テクニック](/blocks)を確認する

View File

@@ -56,7 +56,7 @@ import { Image } from '@/components/ui/image'
## 自動無効化
スケジュールは**10回連続で失敗**すると、エラーの連鎖を防ぐため自動的に無効化されます。無効化されると
スケジュールは**100回連続で失敗**すると、エラーの連鎖を防ぐため自動的に無効化されます。無効化されると:
- スケジュールブロックに警告バッジが表示されます
- スケジュールの実行が停止します

View File

@@ -105,43 +105,47 @@ totalCost = baseExecutionCharge + modelCost
显示的价格为截至 2025 年 9 月 10 日的费率。请查看提供商文档以获取最新价格。
</Callout>
## 自带密钥BYOK
你可以在 **设置 → BYOK** 中为托管模型OpenAI、Anthropic、Google、Mistral使用你自己的 API 密钥,以按基础价格计费。密钥会被加密,并在整个工作区范围内生效。
## 成本优化策略
- **模型选择**:根据任务复杂选择模型。简单任务可以使用 GPT-4.1-nano复杂推理可能需要 o1 或 Claude Opus。
- **提示工程**:结构良好、简洁的提示可以减少令牌使用,同时保质量。
- **本地模型**:对于非关键任务,使用 Ollama 或 VLLM 完全消除 API 成本。
- **缓存和重用**:将经常使用的结果存储在变量或文件中,避免重复调用 AI 模型。
- **批量处理**在单次 AI 请求处理多个项目,而不是逐一调用
- **模型选择**:根据任务复杂选择合适的模型。简单任务可用 GPT-4.1-nano复杂推理可 o1 或 Claude Opus。
- **提示工程**:结构清晰、简洁的提示能减少 token 使用,同时保质量。
- **本地模型**:对于非关键任务,使用 Ollama 或 VLLM,可完全消除 API 成本。
- **缓存与复用**:将常用结果存储在变量或文件中,避免重复调用 AI 模型。
- **批量处理**次 AI 请求处理多个项目,减少单独调用次数
## 使用监控
在 设置 → 订阅 中监控您的使用情况和账单:
你可以在 设置 → 订阅 中监控你的用量和账单:
- **当前使用情况**:当前周期的实时使用和成本
- **使用限制**:计划限制及其可视化进度指示
- **账单详情**:预计费用和最低承诺
- **计划管理**:升级选项和账单历史记录
- **当前用量**:当前周期的实时用量和费用
- **用量上限**:带有可视化进度指示的套餐限制
- **账单明细**:预计费用和最低承诺金额
- **套餐管理**:升级选项和账单历史
### 程序化使用跟
### 编程方式用量追
可以通过 API 程序化地查询当前的使用情况和限制:
可以通过 API 以编程方式查询当前用量和限制:
**端点**
**接口地址**
```text
GET /api/users/me/usage-limits
```
**认证:**
- 在 `X-API-Key` 标头中包含的 API 密钥
**认证方式**
- 在 `X-API-Key` header 中包含的 API 密钥
**示例请求:**
**请求示例**
```bash
curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" https://sim.ai/api/users/me/usage-limits
```
**示例响应:**
**响应示例**
```json
{
@@ -171,70 +175,70 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
}
```
**速率限制字段:**
- `requestsPerMinute`:持续速率限制(令牌以此速率补充)
- `maxBurst`您可以累积的最大令牌数(突发容量)
- `remaining`:当前可用令牌数(最多可达 `maxBurst`
**限流字段:**
- `requestsPerMinute`:持续速率限制(token 按此速率补充)
- `maxBurst`你可累计的最大 token 数(突发容量)
- `remaining`:当前可用 token 数(最多可达 `maxBurst`
**响应字段:**
- `currentPeriodCost` 反映当前计费周期的使用情况
- `limit` 来源于个人限制(免费/专业)或组织池限制(团队/企业
- `plan` 是与的用户关联的最高优先级的活动计划
- `currentPeriodCost` 反映当前账单周期的用量
- `limit` 来源于个人限Free/Pro或组织池化限额Team/Enterprise
- `plan` 是与的用户关联的最高优先级的激活套餐
## 计划限制
## 套餐限制
不同的订阅计划有不同的使用限制:
不同的订阅套餐有不同的使用限制:
| 方案 | 每月使用额 | 速率限制(每分钟) |
| 套餐 | 每月使用额 | 速率限制(每分钟) |
|------|-------------------|-------------------------|
| **Free** | $20 | 5 sync10 async |
| **Pro** | $100 | 10 sync50 async |
| **Team** | $500共享 | 50 sync100 async |
| **Enterprise** | 定制 | 定制 |
| **Enterprise** | 自定义 | 自定义 |
## 计费模式
Sim 使用 **基础订阅 + 超额**计费模式:
Sim 采用**基础订阅 + 超额**计费模式:
### 工作原理
### 计费方式说明
**专业计划$20/月):**
- 月订阅包含 $20 使用额度
- 使用低于 $20 → 无额外费用
- 使用超过 $20 → 月底支付超额部分
**Pro 套餐$20/月):**
- 月订阅包含 $20 使用额度
- 使用未超过 $20 → 无额外费用
- 使用超过 $20 → 月底结算超额部分
- 示例:$35 使用 = $20订阅+ $15超额
**团队计划$40/每席位/月):**
- 团队成员之间共享使用额度
- 超额费用根据团队总使用量计算
- 组织所有者收到一张账单
**Team 套餐$40//月):**
- 团队成员共享使用额度
- 超额费用团队总用量计算
- 账单由组织所有者统一支付
**企业计划**
**Enterprise 套餐**
- 固定月费,无超额费用
- 根据协议自定义使用限
- 使用额度可按协议定
### 阈值计费
当未计费的超额费用达到 $50 时Sim 会自动计费全额未计费金额。
当未结算的超额费用达到 $50 时Sim 会自动结算全部未结算金额。
**示例:**
- 第 10 天:$70 超额 → 立即计费 $70
- 第 15 天:额外使用 $35计 $105→ 已计费,无需操作
- 第 20 天:再使用 $50计 $155计费 $85→ 立即计费 $85
- 第 10 天:超额 $70 → 立即结算 $70
- 第 15 天:新增 $35 使用(累计 $105→ 已结算,无需操作
- 第 20 天:再用 $50计 $155结算 $85→ 立即结算 $85
会将大量的超额费用分散到整个月,而不是在周期结束时收到一张大账单。
样可以将大额超额费用分摊到每月多次结算,避免期末一次性大额账单。
## 成本管理最佳实践
1. **定期监控**:经常检查您的使用仪表,避免意外情况
2. **设预算**使用计划限制作为支出控制的护栏
3. **优化工作流程**查高成本执行操作,优化提示或模型选择
4. **使用合适模型**:根据任务需求匹配模型复杂度
5. **批量处理相似任务**:尽可能合并多个请求减少开销
1. **定期监控**:经常查看用量仪表,避免意外支出
2. **设预算**用套餐额度作为支出警戒线
3. **优化流程**查高成本执行,优化提示或模型选择
4. **选择合适模型**:根据任务需求匹配模型复杂度
5. **批量处理相似任务**:尽量合并请求减少额外开销
## 下一步
- 在[设置 → 订阅](https://sim.ai/settings/subscription)中查看您当前的使用情况
- 了解[日志记录](/execution/logging)以跟踪执行详情
- 探索[外部 API](/execution/api)以进行程序化成本监控
- 查看[工作流优化技](/blocks)以降低成本
- 在 [设置 → 订阅](https://sim.ai/settings/subscription) 中查看您当前的使用情况
- 了解 [日志记录](/execution/logging)以跟踪执行详情
- 探索 [外部 API](/execution/api),实现程序化成本监控
- 查看 [工作流优化技](/blocks)以降低成本

View File

@@ -56,7 +56,7 @@ import { Image } from '@/components/ui/image'
## 自动禁用
计划在连续 **10 次失败** 后会自动禁用,以防止错误持续发生。禁用后:
为防止持续性错误,计划任务在**连续失败 100 次**后会自动禁用。禁用后:
- 计划块上会显示警告徽章
- 计划将停止执行

View File

@@ -228,7 +228,7 @@ checksums:
content/8: ab4fe131de634064f9a7744a11599434
content/9: 2f6c9564a33ad9f752df55840b0c8e16
content/10: fef34568e5bbd5a50e2a89412f85302c
content/11: b7ae0ecf6fbaa92b049c718720e4007e
content/11: a891bfb5cf490148001f05acde467f68
content/12: bcd95e6bef30b6f480fee33800928b13
content/13: 2ff1c8bf00c740f66bce8a4a7f768ca8
content/14: 16eb64906b9e981ea3c11525ff5a1c2e
@@ -4581,39 +4581,41 @@ checksums:
content/19: 83fc31418ff454a5e06b290e3708ef32
content/20: 4392b5939a6d5774fb080cad1ee1dbb8
content/21: 890b65b7326a9eeef3933a8b63f6ccdd
content/22: 892d6a80d8ac5a895a20408462f63cc5
content/23: 930176b3786ebbe9eb1f76488f183140
content/24: 22d9d167630c581e868d6d7a9fdddbcf
content/25: d250621762d63cd87b3359236c95bdac
content/26: 50be8ae73b8ce27de7ddd21964ee29e8
content/27: cd622841b5bc748a7b2a0d9252e72bd5
content/28: 38608a5d416eb33f373c6f9e6bf546b9
content/29: 074c12c794283c3af53a3f038fbda2a6
content/30: 5cdcf7e32294e087612b77914d850d26
content/31: 7529829b2f064fedf956da639aaea8e1
content/32: 7b5e2207a0d93fd434b92f2f290a8dd5
content/33: f950b8f58af1973a3e00393d860bce02
content/34: d5ff07fec9455183e1d93f7ddf1dab1b
content/35: 5d2d85e082d9fdd3859fb5c788d5f9a3
content/36: 23a7de9c5adb6e07c28c23a9d4e03dc2
content/37: 7bb928aba33a4013ad5f08487da5bbf9
content/38: dbbf313837f13ddfa4a8843d71cb9cc4
content/39: cf10560ae6defb8ee5da344fc6509f6e
content/40: 1dea5c6442c127ae290185db0cef067b
content/41: 332dab0588fb35dabb64b674ba6120eb
content/42: 714b3f99b0a8686bbb3434deb1f682b3
content/43: ba18ac99184b17d7e49bd1abdc814437
content/44: bed2b629274d55c38bd637e6a28dbc4a
content/45: 71487ae6f6fb1034d1787456de442e6d
content/46: 137d9874cf5ec8d09bd447f224cc7a7c
content/47: 6b5b4c3b2f98b8fc7dd908fef2605ce8
content/48: 3af6812662546ce647a55939241fd88e
content/49: 6a4d7f0ccb8c28303251d1ef7b3dcca7
content/50: 5dce779f77cc2b0abf12802a833df499
content/51: aa47ff01b631252f024eaaae0c773e42
content/52: 1266d1c7582bb617cdef56857be34f30
content/53: c2cef2688104adaf6641092f43d4969a
content/54: 089fc64b4589b2eaa371de7e04c4aed9
content/22: ada515cf6e2e0f9d3f57f720f79699d3
content/23: 332e0d08f601da9fb56c6b7e7c8e9daf
content/24: 892d6a80d8ac5a895a20408462f63cc5
content/25: 930176b3786ebbe9eb1f76488f183140
content/26: 22d9d167630c581e868d6d7a9fdddbcf
content/27: d250621762d63cd87b3359236c95bdac
content/28: 50be8ae73b8ce27de7ddd21964ee29e8
content/29: cd622841b5bc748a7b2a0d9252e72bd5
content/30: 38608a5d416eb33f373c6f9e6bf546b9
content/31: 074c12c794283c3af53a3f038fbda2a6
content/32: 5cdcf7e32294e087612b77914d850d26
content/33: 7529829b2f064fedf956da639aaea8e1
content/34: 7b5e2207a0d93fd434b92f2f290a8dd5
content/35: f950b8f58af1973a3e00393d860bce02
content/36: d5ff07fec9455183e1d93f7ddf1dab1b
content/37: 5d2d85e082d9fdd3859fb5c788d5f9a3
content/38: 23a7de9c5adb6e07c28c23a9d4e03dc2
content/39: 7bb928aba33a4013ad5f08487da5bbf9
content/40: dbbf313837f13ddfa4a8843d71cb9cc4
content/41: cf10560ae6defb8ee5da344fc6509f6e
content/42: 1dea5c6442c127ae290185db0cef067b
content/43: 332dab0588fb35dabb64b674ba6120eb
content/44: 714b3f99b0a8686bbb3434deb1f682b3
content/45: ba18ac99184b17d7e49bd1abdc814437
content/46: bed2b629274d55c38bd637e6a28dbc4a
content/47: 71487ae6f6fb1034d1787456de442e6d
content/48: 137d9874cf5ec8d09bd447f224cc7a7c
content/49: 6b5b4c3b2f98b8fc7dd908fef2605ce8
content/50: 3af6812662546ce647a55939241fd88e
content/51: 6a4d7f0ccb8c28303251d1ef7b3dcca7
content/52: 5dce779f77cc2b0abf12802a833df499
content/53: aa47ff01b631252f024eaaae0c773e42
content/54: 1266d1c7582bb617cdef56857be34f30
content/55: c2cef2688104adaf6641092f43d4969a
content/56: 089fc64b4589b2eaa371de7e04c4aed9
722959335ba76c9d0097860e2ad5a952:
meta/title: 1f5b53b9904ec41d49c1e726e3d56b40
content/0: c2b41859d63a751682f0d9aec488e581

4
apps/docs/lib/db.ts Normal file
View File

@@ -0,0 +1,4 @@
import { db } from '@sim/db'
import { docsEmbeddings } from '@sim/db/schema'
export { db, docsEmbeddings }

View File

@@ -0,0 +1,40 @@
/**
* Generate embeddings for search queries using OpenAI API
*/
export async function generateSearchEmbedding(query: string): Promise<number[]> {
const apiKey = process.env.OPENAI_API_KEY
if (!apiKey) {
throw new Error('OPENAI_API_KEY environment variable is required')
}
const response = await fetch('https://api.openai.com/v1/embeddings', {
method: 'POST',
headers: {
Authorization: `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
input: query,
model: 'text-embedding-3-small',
encoding_format: 'float',
}),
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(`OpenAI API failed: ${response.status} ${response.statusText} - ${errorText}`)
}
const data = await response.json()
if (!data?.data || !Array.isArray(data.data) || data.data.length === 0) {
throw new Error('OpenAI API returned invalid response structure: missing or empty data array')
}
if (!data.data[0]?.embedding || !Array.isArray(data.data[0].embedding)) {
throw new Error('OpenAI API returned invalid response structure: missing or invalid embedding')
}
return data.data[0].embedding
}

View File

@@ -11,16 +11,19 @@
"type-check": "tsc --noEmit"
},
"dependencies": {
"@sim/db": "workspace:*",
"@tabler/icons-react": "^3.31.0",
"@vercel/og": "^0.6.5",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"drizzle-orm": "^0.44.5",
"fumadocs-core": "16.2.3",
"fumadocs-mdx": "14.1.0",
"fumadocs-ui": "16.2.3",
"lucide-react": "^0.511.0",
"next": "16.1.0-canary.21",
"next-themes": "^0.4.6",
"postgres": "^3.4.5",
"react": "19.2.1",
"react-dom": "19.2.1",
"tailwind-merge": "^3.0.2"

View File

@@ -20,7 +20,7 @@ interface NavProps {
}
export default function Nav({ hideAuthButtons = false, variant = 'landing' }: NavProps = {}) {
const [githubStars, setGithubStars] = useState('24k')
const [githubStars, setGithubStars] = useState('24.4k')
const [isHovered, setIsHovered] = useState(false)
const [isLoginHovered, setIsLoginHovered] = useState(false)
const router = useRouter()

View File

@@ -1,6 +1,9 @@
import { createMockLogger as createSimTestingMockLogger } from '@sim/testing'
import { NextRequest } from 'next/server'
import { vi } from 'vitest'
export { createMockLogger } from '@sim/testing'
export interface MockUser {
id: string
email: string
@@ -214,12 +217,11 @@ export const mockDb = {
})),
}
export const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
/**
* Mock logger using @sim/testing createMockLogger.
* This provides a consistent mock logger across all API tests.
*/
export const mockLogger = createSimTestingMockLogger()
export const mockUser = {
id: 'user-123',
@@ -729,7 +731,8 @@ export function mockKnowledgeSchemas() {
}
/**
* Mock console logger
* Mock console logger using the shared mockLogger instance.
* This ensures tests can assert on the same mockLogger instance exported from this module.
*/
export function mockConsoleLogger() {
vi.doMock('@/lib/logs/console/logger', () => ({

View File

@@ -4,7 +4,7 @@
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Connections API Route', () => {
const mockGetSession = vi.fn()
@@ -14,12 +14,7 @@ describe('OAuth Connections API Route', () => {
where: vi.fn().mockReturnThis(),
limit: vi.fn(),
}
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockParseProvider = vi.fn()
const mockEvaluateScopeCoverage = vi.fn()

View File

@@ -6,6 +6,7 @@
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockLogger } from '@/app/api/__test-utils__/utils'
describe('OAuth Credentials API Route', () => {
const mockGetSession = vi.fn()
@@ -17,12 +18,7 @@ describe('OAuth Credentials API Route', () => {
where: vi.fn().mockReturnThis(),
limit: vi.fn(),
}
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'

View File

@@ -4,7 +4,7 @@
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Disconnect API Route', () => {
const mockGetSession = vi.fn()
@@ -12,12 +12,7 @@ describe('OAuth Disconnect API Route', () => {
delete: vi.fn().mockReturnThis(),
where: vi.fn(),
}
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'

View File

@@ -4,7 +4,7 @@
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest } from '@/app/api/__test-utils__/utils'
import { createMockLogger, createMockRequest } from '@/app/api/__test-utils__/utils'
describe('OAuth Token API Routes', () => {
const mockGetUserId = vi.fn()
@@ -13,12 +13,7 @@ describe('OAuth Token API Routes', () => {
const mockAuthorizeCredentialUse = vi.fn()
const mockCheckHybridAuth = vi.fn()
const mockLogger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}
const mockLogger = createMockLogger()
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'
const mockRequestId = mockUUID.slice(0, 8)

View File

@@ -3,9 +3,11 @@
*
* @vitest-environment node
*/
import { createSession, loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const mockSession = { user: { id: 'test-user-id' } }
const mockSession = createSession({ userId: 'test-user-id' })
const mockGetSession = vi.fn()
vi.mock('@/lib/auth', () => ({
@@ -29,14 +31,7 @@ vi.mock('@/lib/oauth/oauth', () => ({
OAUTH_PROVIDERS: {},
}))
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn().mockReturnValue({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
}),
}))
vi.mock('@/lib/logs/console/logger', () => loggerMock)
import { db } from '@sim/db'
import { refreshOAuthToken } from '@/lib/oauth'
@@ -47,14 +42,14 @@ import {
refreshTokenIfNeeded,
} from '@/app/api/auth/oauth/utils'
const mockDb = db as any
const mockDbTyped = db as any
const mockRefreshOAuthToken = refreshOAuthToken as any
describe('OAuth Utils', () => {
beforeEach(() => {
vi.clearAllMocks()
mockGetSession.mockResolvedValue(mockSession)
mockDb.limit.mockReturnValue([])
mockDbTyped.limit.mockReturnValue([])
})
afterEach(() => {
@@ -69,14 +64,14 @@ describe('OAuth Utils', () => {
})
it('should get user ID from workflow when workflowId is provided', async () => {
mockDb.limit.mockReturnValueOnce([{ userId: 'workflow-owner-id' }])
mockDbTyped.limit.mockReturnValueOnce([{ userId: 'workflow-owner-id' }])
const userId = await getUserId('request-id', 'workflow-id')
expect(mockDb.select).toHaveBeenCalled()
expect(mockDb.from).toHaveBeenCalled()
expect(mockDb.where).toHaveBeenCalled()
expect(mockDb.limit).toHaveBeenCalledWith(1)
expect(mockDbTyped.select).toHaveBeenCalled()
expect(mockDbTyped.from).toHaveBeenCalled()
expect(mockDbTyped.where).toHaveBeenCalled()
expect(mockDbTyped.limit).toHaveBeenCalledWith(1)
expect(userId).toBe('workflow-owner-id')
})
@@ -89,7 +84,7 @@ describe('OAuth Utils', () => {
})
it('should return undefined if workflow is not found', async () => {
mockDb.limit.mockReturnValueOnce([])
mockDbTyped.limit.mockReturnValueOnce([])
const userId = await getUserId('request-id', 'nonexistent-workflow-id')
@@ -100,20 +95,20 @@ describe('OAuth Utils', () => {
describe('getCredential', () => {
it('should return credential when found', async () => {
const mockCredential = { id: 'credential-id', userId: 'test-user-id' }
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const credential = await getCredential('request-id', 'credential-id', 'test-user-id')
expect(mockDb.select).toHaveBeenCalled()
expect(mockDb.from).toHaveBeenCalled()
expect(mockDb.where).toHaveBeenCalled()
expect(mockDb.limit).toHaveBeenCalledWith(1)
expect(mockDbTyped.select).toHaveBeenCalled()
expect(mockDbTyped.from).toHaveBeenCalled()
expect(mockDbTyped.where).toHaveBeenCalled()
expect(mockDbTyped.limit).toHaveBeenCalledWith(1)
expect(credential).toEqual(mockCredential)
})
it('should return undefined when credential is not found', async () => {
mockDb.limit.mockReturnValueOnce([])
mockDbTyped.limit.mockReturnValueOnce([])
const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id')
@@ -127,7 +122,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'valid-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000), // 1 hour in the future
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000),
providerId: 'google',
}
@@ -142,7 +137,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
}
@@ -155,8 +150,8 @@ describe('OAuth Utils', () => {
const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'new-token', refreshed: true })
})
@@ -165,7 +160,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
}
@@ -181,7 +176,7 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'token',
refreshToken: null,
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
}
@@ -198,11 +193,11 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'valid-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000), // 1 hour in the future
accessTokenExpiresAt: new Date(Date.now() + 3600 * 1000),
providerId: 'google',
userId: 'test-user-id',
}
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
@@ -215,11 +210,11 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
userId: 'test-user-id',
}
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockRefreshOAuthToken.mockResolvedValueOnce({
accessToken: 'new-token',
@@ -230,13 +225,13 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(mockDbTyped.update).toHaveBeenCalled()
expect(mockDbTyped.set).toHaveBeenCalled()
expect(token).toBe('new-token')
})
it('should return null if credential not found', async () => {
mockDb.limit.mockReturnValueOnce([])
mockDbTyped.limit.mockReturnValueOnce([])
const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id')
@@ -248,11 +243,11 @@ describe('OAuth Utils', () => {
id: 'credential-id',
accessToken: 'expired-token',
refreshToken: 'refresh-token',
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000), // 1 hour in the past
accessTokenExpiresAt: new Date(Date.now() - 3600 * 1000),
providerId: 'google',
userId: 'test-user-id',
}
mockDb.limit.mockReturnValueOnce([mockCredential])
mockDbTyped.limit.mockReturnValueOnce([mockCredential])
mockRefreshOAuthToken.mockResolvedValueOnce(null)

View File

@@ -0,0 +1,550 @@
/**
* Tests for chat OTP API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
describe('Chat OTP API Route', () => {
const mockEmail = 'test@example.com'
const mockChatId = 'chat-123'
const mockIdentifier = 'test-chat'
const mockOTP = '123456'
const mockRedisSet = vi.fn()
const mockRedisGet = vi.fn()
const mockRedisDel = vi.fn()
const mockGetRedisClient = vi.fn()
const mockDbSelect = vi.fn()
const mockDbInsert = vi.fn()
const mockDbDelete = vi.fn()
const mockSendEmail = vi.fn()
const mockRenderOTPEmail = vi.fn()
const mockAddCorsHeaders = vi.fn()
const mockCreateSuccessResponse = vi.fn()
const mockCreateErrorResponse = vi.fn()
const mockSetChatAuthCookie = vi.fn()
const mockGenerateRequestId = vi.fn()
let storageMethod: 'redis' | 'database' = 'redis'
beforeEach(() => {
vi.resetModules()
vi.clearAllMocks()
vi.spyOn(Math, 'random').mockReturnValue(0.123456)
vi.spyOn(Date, 'now').mockReturnValue(1640995200000)
vi.stubGlobal('crypto', {
...crypto,
randomUUID: vi.fn().mockReturnValue('test-uuid-1234'),
})
const mockRedisClient = {
set: mockRedisSet,
get: mockRedisGet,
del: mockRedisDel,
}
mockGetRedisClient.mockReturnValue(mockRedisClient)
mockRedisSet.mockResolvedValue('OK')
mockRedisGet.mockResolvedValue(null)
mockRedisDel.mockResolvedValue(1)
vi.doMock('@/lib/core/config/redis', () => ({
getRedisClient: mockGetRedisClient,
}))
const createDbChain = (result: any) => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue(result),
}),
}),
})
mockDbSelect.mockImplementation(() => createDbChain([]))
mockDbInsert.mockImplementation(() => ({
values: vi.fn().mockResolvedValue(undefined),
}))
mockDbDelete.mockImplementation(() => ({
where: vi.fn().mockResolvedValue(undefined),
}))
vi.doMock('@sim/db', () => ({
db: {
select: mockDbSelect,
insert: mockDbInsert,
delete: mockDbDelete,
transaction: vi.fn(async (callback) => {
return callback({
select: mockDbSelect,
insert: mockDbInsert,
delete: mockDbDelete,
})
}),
},
}))
vi.doMock('@sim/db/schema', () => ({
chat: {
id: 'id',
authType: 'authType',
allowedEmails: 'allowedEmails',
title: 'title',
},
verification: {
id: 'id',
identifier: 'identifier',
value: 'value',
expiresAt: 'expiresAt',
createdAt: 'createdAt',
updatedAt: 'updatedAt',
},
}))
vi.doMock('drizzle-orm', () => ({
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
gt: vi.fn((field, value) => ({ field, value, type: 'gt' })),
lt: vi.fn((field, value) => ({ field, value, type: 'lt' })),
}))
vi.doMock('@/lib/core/storage', () => ({
getStorageMethod: vi.fn(() => storageMethod),
}))
mockSendEmail.mockResolvedValue({ success: true })
mockRenderOTPEmail.mockResolvedValue('<html>OTP Email</html>')
vi.doMock('@/lib/messaging/email/mailer', () => ({
sendEmail: mockSendEmail,
}))
vi.doMock('@/components/emails/render-email', () => ({
renderOTPEmail: mockRenderOTPEmail,
}))
mockAddCorsHeaders.mockImplementation((response) => response)
mockCreateSuccessResponse.mockImplementation((data) => ({
json: () => Promise.resolve(data),
status: 200,
}))
mockCreateErrorResponse.mockImplementation((message, status) => ({
json: () => Promise.resolve({ error: message }),
status,
}))
vi.doMock('@/app/api/chat/utils', () => ({
addCorsHeaders: mockAddCorsHeaders,
setChatAuthCookie: mockSetChatAuthCookie,
}))
vi.doMock('@/app/api/workflows/utils', () => ({
createSuccessResponse: mockCreateSuccessResponse,
createErrorResponse: mockCreateErrorResponse,
}))
vi.doMock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn().mockReturnValue({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
}),
}))
vi.doMock('zod', () => ({
z: {
object: vi.fn().mockReturnValue({
parse: vi.fn().mockImplementation((data) => data),
}),
string: vi.fn().mockReturnValue({
email: vi.fn().mockReturnThis(),
length: vi.fn().mockReturnThis(),
}),
},
}))
mockGenerateRequestId.mockReturnValue('req-123')
vi.doMock('@/lib/core/utils/request', () => ({
generateRequestId: mockGenerateRequestId,
}))
})
afterEach(() => {
vi.restoreAllMocks()
})
describe('POST - Store OTP (Redis path)', () => {
beforeEach(() => {
storageMethod = 'redis'
})
it('should store OTP in Redis when storage method is redis', async () => {
const { POST } = await import('./route')
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
allowedEmails: [mockEmail],
title: 'Test Chat',
},
]),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'POST',
body: JSON.stringify({ email: mockEmail }),
})
await POST(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisSet).toHaveBeenCalledWith(
`otp:${mockEmail}:${mockChatId}`,
expect.any(String),
'EX',
900 // 15 minutes
)
expect(mockDbInsert).not.toHaveBeenCalled()
})
})
describe('POST - Store OTP (Database path)', () => {
beforeEach(() => {
storageMethod = 'database'
mockGetRedisClient.mockReturnValue(null)
})
it('should store OTP in database when storage method is database', async () => {
const { POST } = await import('./route')
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
allowedEmails: [mockEmail],
title: 'Test Chat',
},
]),
}),
}),
}))
const mockInsertValues = vi.fn().mockResolvedValue(undefined)
mockDbInsert.mockImplementationOnce(() => ({
values: mockInsertValues,
}))
const mockDeleteWhere = vi.fn().mockResolvedValue(undefined)
mockDbDelete.mockImplementation(() => ({
where: mockDeleteWhere,
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'POST',
body: JSON.stringify({ email: mockEmail }),
})
await POST(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockDbDelete).toHaveBeenCalled()
expect(mockDbInsert).toHaveBeenCalled()
expect(mockInsertValues).toHaveBeenCalledWith({
id: expect.any(String),
identifier: `chat-otp:${mockChatId}:${mockEmail}`,
value: expect.any(String),
expiresAt: expect.any(Date),
createdAt: expect.any(Date),
updatedAt: expect.any(Date),
})
expect(mockRedisSet).not.toHaveBeenCalled()
})
})
describe('PUT - Verify OTP (Redis path)', () => {
beforeEach(() => {
storageMethod = 'redis'
mockRedisGet.mockResolvedValue(mockOTP)
})
it('should retrieve OTP from Redis and verify successfully', async () => {
const { PUT } = await import('./route')
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
},
]),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisGet).toHaveBeenCalledWith(`otp:${mockEmail}:${mockChatId}`)
expect(mockRedisDel).toHaveBeenCalledWith(`otp:${mockEmail}:${mockChatId}`)
expect(mockDbSelect).toHaveBeenCalledTimes(1)
})
})
describe('PUT - Verify OTP (Database path)', () => {
beforeEach(() => {
storageMethod = 'database'
mockGetRedisClient.mockReturnValue(null)
})
it('should retrieve OTP from database and verify successfully', async () => {
const { PUT } = await import('./route')
let selectCallCount = 0
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockImplementation(() => {
selectCallCount++
if (selectCallCount === 1) {
return Promise.resolve([
{
id: mockChatId,
authType: 'email',
},
])
}
return Promise.resolve([
{
value: mockOTP,
expiresAt: new Date(Date.now() + 10 * 60 * 1000),
},
])
}),
}),
}),
}))
const mockDeleteWhere = vi.fn().mockResolvedValue(undefined)
mockDbDelete.mockImplementation(() => ({
where: mockDeleteWhere,
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockDbSelect).toHaveBeenCalledTimes(2)
expect(mockDbDelete).toHaveBeenCalled()
expect(mockRedisGet).not.toHaveBeenCalled()
})
it('should reject expired OTP from database', async () => {
const { PUT } = await import('./route')
let selectCallCount = 0
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockImplementation(() => {
selectCallCount++
if (selectCallCount === 1) {
return Promise.resolve([
{
id: mockChatId,
authType: 'email',
},
])
}
return Promise.resolve([])
}),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
'No verification code found, request a new one',
400
)
})
})
describe('DELETE OTP (Redis path)', () => {
beforeEach(() => {
storageMethod = 'redis'
})
it('should delete OTP from Redis after verification', async () => {
const { PUT } = await import('./route')
mockRedisGet.mockResolvedValue(mockOTP)
mockDbSelect.mockImplementationOnce(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
},
]),
}),
}),
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisDel).toHaveBeenCalledWith(`otp:${mockEmail}:${mockChatId}`)
expect(mockDbDelete).not.toHaveBeenCalled()
})
})
describe('DELETE OTP (Database path)', () => {
beforeEach(() => {
storageMethod = 'database'
mockGetRedisClient.mockReturnValue(null)
})
it('should delete OTP from database after verification', async () => {
const { PUT } = await import('./route')
let selectCallCount = 0
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockImplementation(() => {
selectCallCount++
if (selectCallCount === 1) {
return Promise.resolve([{ id: mockChatId, authType: 'email' }])
}
return Promise.resolve([
{ value: mockOTP, expiresAt: new Date(Date.now() + 10 * 60 * 1000) },
])
}),
}),
}),
}))
const mockDeleteWhere = vi.fn().mockResolvedValue(undefined)
mockDbDelete.mockImplementation(() => ({
where: mockDeleteWhere,
}))
const request = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUT(request, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockDbDelete).toHaveBeenCalled()
expect(mockRedisDel).not.toHaveBeenCalled()
})
})
describe('Behavior consistency between Redis and Database', () => {
it('should have same behavior for missing OTP in both storage methods', async () => {
storageMethod = 'redis'
mockRedisGet.mockResolvedValue(null)
const { PUT: PUTRedis } = await import('./route')
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ id: mockChatId, authType: 'email' }]),
}),
}),
}))
const requestRedis = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'PUT',
body: JSON.stringify({ email: mockEmail, otp: mockOTP }),
})
await PUTRedis(requestRedis, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockCreateErrorResponse).toHaveBeenCalledWith(
'No verification code found, request a new one',
400
)
})
it('should have same OTP expiry time in both storage methods', async () => {
const OTP_EXPIRY = 15 * 60
storageMethod = 'redis'
const { POST: POSTRedis } = await import('./route')
mockDbSelect.mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
id: mockChatId,
authType: 'email',
allowedEmails: [mockEmail],
title: 'Test Chat',
},
]),
}),
}),
}))
const requestRedis = new NextRequest('http://localhost:3000/api/chat/test/otp', {
method: 'POST',
body: JSON.stringify({ email: mockEmail }),
})
await POSTRedis(requestRedis, { params: Promise.resolve({ identifier: mockIdentifier }) })
expect(mockRedisSet).toHaveBeenCalledWith(
expect.any(String),
expect.any(String),
'EX',
OTP_EXPIRY
)
})
})
})

View File

@@ -1,6 +1,7 @@
import { randomUUID } from 'crypto'
import { db } from '@sim/db'
import { chat } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { chat, verification } from '@sim/db/schema'
import { and, eq, gt } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { z } from 'zod'
import { renderOTPEmail } from '@/components/emails/render-email'
@@ -22,24 +23,11 @@ const OTP_EXPIRY = 15 * 60 // 15 minutes
const OTP_EXPIRY_MS = OTP_EXPIRY * 1000
/**
* In-memory OTP storage for single-instance deployments without Redis.
* Only used when REDIS_URL is not configured (determined once at startup).
*
* Warning: This does NOT work in multi-instance/serverless deployments.
* Stores OTP in Redis or database depending on storage method.
* Uses the verification table for database storage.
*/
const inMemoryOTPStore = new Map<string, { otp: string; expiresAt: number }>()
function cleanupExpiredOTPs() {
const now = Date.now()
for (const [key, value] of inMemoryOTPStore.entries()) {
if (value.expiresAt < now) {
inMemoryOTPStore.delete(key)
}
}
}
async function storeOTP(email: string, chatId: string, otp: string): Promise<void> {
const key = `otp:${email}:${chatId}`
const identifier = `chat-otp:${chatId}:${email}`
const storageMethod = getStorageMethod()
if (storageMethod === 'redis') {
@@ -47,18 +35,28 @@ async function storeOTP(email: string, chatId: string, otp: string): Promise<voi
if (!redis) {
throw new Error('Redis configured but client unavailable')
}
const key = `otp:${email}:${chatId}`
await redis.set(key, otp, 'EX', OTP_EXPIRY)
} else {
cleanupExpiredOTPs()
inMemoryOTPStore.set(key, {
otp,
expiresAt: Date.now() + OTP_EXPIRY_MS,
const now = new Date()
const expiresAt = new Date(now.getTime() + OTP_EXPIRY_MS)
await db.transaction(async (tx) => {
await tx.delete(verification).where(eq(verification.identifier, identifier))
await tx.insert(verification).values({
id: randomUUID(),
identifier,
value: otp,
expiresAt,
createdAt: now,
updatedAt: now,
})
})
}
}
async function getOTP(email: string, chatId: string): Promise<string | null> {
const key = `otp:${email}:${chatId}`
const identifier = `chat-otp:${chatId}:${email}`
const storageMethod = getStorageMethod()
if (storageMethod === 'redis') {
@@ -66,22 +64,27 @@ async function getOTP(email: string, chatId: string): Promise<string | null> {
if (!redis) {
throw new Error('Redis configured but client unavailable')
}
const key = `otp:${email}:${chatId}`
return redis.get(key)
}
const entry = inMemoryOTPStore.get(key)
if (!entry) return null
const now = new Date()
const [record] = await db
.select({
value: verification.value,
expiresAt: verification.expiresAt,
})
.from(verification)
.where(and(eq(verification.identifier, identifier), gt(verification.expiresAt, now)))
.limit(1)
if (entry.expiresAt < Date.now()) {
inMemoryOTPStore.delete(key)
return null
}
if (!record) return null
return entry.otp
return record.value
}
async function deleteOTP(email: string, chatId: string): Promise<void> {
const key = `otp:${email}:${chatId}`
const identifier = `chat-otp:${chatId}:${email}`
const storageMethod = getStorageMethod()
if (storageMethod === 'redis') {
@@ -89,9 +92,10 @@ async function deleteOTP(email: string, chatId: string): Promise<void> {
if (!redis) {
throw new Error('Redis configured but client unavailable')
}
const key = `otp:${email}:${chatId}`
await redis.del(key)
} else {
inMemoryOTPStore.delete(key)
await db.delete(verification).where(eq(verification.identifier, identifier))
}
}

View File

@@ -0,0 +1,361 @@
/**
* Tests for copilot api-keys API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { mockAuth, mockCryptoUuid, setupCommonApiMocks } from '@/app/api/__test-utils__/utils'
describe('Copilot API Keys API Route', () => {
const mockFetch = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
global.fetch = mockFetch
vi.doMock('@/lib/copilot/constants', () => ({
SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com',
}))
vi.doMock('@/lib/core/config/env', () => ({
env: {
SIM_AGENT_API_URL: null,
COPILOT_API_KEY: 'test-api-key',
},
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('GET', () => {
it('should return 401 when user is not authenticated', async () => {
const authMocks = mockAuth()
authMocks.setUnauthenticated()
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return list of API keys with masked values', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const mockApiKeys = [
{
id: 'key-1',
apiKey: 'sk-sim-abcdefghijklmnopqrstuv',
name: 'Production Key',
createdAt: '2024-01-01T00:00:00.000Z',
lastUsed: '2024-01-15T00:00:00.000Z',
},
{
id: 'key-2',
apiKey: 'sk-sim-zyxwvutsrqponmlkjihgfe',
name: null,
createdAt: '2024-01-02T00:00:00.000Z',
lastUsed: null,
},
]
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve(mockApiKeys),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.keys).toHaveLength(2)
expect(responseData.keys[0].id).toBe('key-1')
expect(responseData.keys[0].displayKey).toBe('•••••qrstuv')
expect(responseData.keys[0].name).toBe('Production Key')
expect(responseData.keys[1].displayKey).toBe('•••••jihgfe')
expect(responseData.keys[1].name).toBeNull()
})
it('should return empty array when user has no API keys', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve([]),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.keys).toEqual([])
})
it('should forward userId to Sim Agent', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve([]),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
await GET(request)
expect(mockFetch).toHaveBeenCalledWith(
'https://agent.sim.example.com/api/validate-key/get-api-keys',
expect.objectContaining({
method: 'POST',
headers: expect.objectContaining({
'Content-Type': 'application/json',
'x-api-key': 'test-api-key',
}),
body: JSON.stringify({ userId: 'user-123' }),
})
)
})
it('should return error when Sim Agent returns non-ok response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: false,
status: 503,
json: () => Promise.resolve({ error: 'Service unavailable' }),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(503)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to get keys' })
})
it('should return 500 when Sim Agent returns invalid response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ invalid: 'response' }),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
it('should handle network errors gracefully', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockRejectedValueOnce(new Error('Network error'))
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to get keys' })
})
it('should handle API keys with empty apiKey string', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const mockApiKeys = [
{
id: 'key-1',
apiKey: '',
name: 'Empty Key',
createdAt: '2024-01-01T00:00:00.000Z',
lastUsed: null,
},
]
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve(mockApiKeys),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.keys[0].displayKey).toBe('•••••')
})
it('should handle JSON parsing errors from Sim Agent', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.reject(new Error('Invalid JSON')),
})
const { GET } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await GET(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
})
describe('DELETE', () => {
it('should return 401 when user is not authenticated', async () => {
const authMocks = mockAuth()
authMocks.setUnauthenticated()
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return 400 when id parameter is missing', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys')
const response = await DELETE(request)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'id is required' })
})
it('should successfully delete an API key', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: true }),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
expect(mockFetch).toHaveBeenCalledWith(
'https://agent.sim.example.com/api/validate-key/delete',
expect.objectContaining({
method: 'POST',
headers: expect.objectContaining({
'Content-Type': 'application/json',
'x-api-key': 'test-api-key',
}),
body: JSON.stringify({ userId: 'user-123', apiKeyId: 'key-123' }),
})
)
})
it('should return error when Sim Agent returns non-ok response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: false,
status: 404,
json: () => Promise.resolve({ error: 'Key not found' }),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=non-existent')
const response = await DELETE(request)
expect(response.status).toBe(404)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to delete key' })
})
it('should return 500 when Sim Agent returns invalid response', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: false }),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
it('should handle network errors gracefully', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockRejectedValueOnce(new Error('Network error'))
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Failed to delete key' })
})
it('should handle JSON parsing errors from Sim Agent on delete', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.reject(new Error('Invalid JSON')),
})
const { DELETE } = await import('@/app/api/copilot/api-keys/route')
const request = new NextRequest('http://localhost:3000/api/copilot/api-keys?id=key-123')
const response = await DELETE(request)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Invalid response from Sim Agent' })
})
})
})

View File

@@ -0,0 +1,189 @@
/**
* Tests for copilot chat delete API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockAuth,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Chat Delete API Route', () => {
const mockDelete = vi.fn()
const mockWhere = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
mockDelete.mockReturnValue({ where: mockWhere })
mockWhere.mockResolvedValue([])
vi.doMock('@sim/db', () => ({
db: {
delete: mockDelete,
},
}))
vi.doMock('@sim/db/schema', () => ({
copilotChats: {
id: 'id',
userId: 'userId',
},
}))
vi.doMock('drizzle-orm', () => ({
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('DELETE', () => {
it('should return 401 when user is not authenticated', async () => {
const authMocks = mockAuth()
authMocks.setUnauthenticated()
const req = createMockRequest('DELETE', {
chatId: 'chat-123',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Unauthorized' })
})
it('should successfully delete a chat', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockWhere.mockResolvedValueOnce([{ id: 'chat-123' }])
const req = createMockRequest('DELETE', {
chatId: 'chat-123',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
expect(mockDelete).toHaveBeenCalled()
expect(mockWhere).toHaveBeenCalled()
})
it('should return 500 for invalid request body - missing chatId', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = createMockRequest('DELETE', {})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to delete chat')
})
it('should return 500 for invalid request body - chatId is not a string', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = createMockRequest('DELETE', {
chatId: 12345,
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to delete chat')
})
it('should handle database errors gracefully', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockWhere.mockRejectedValueOnce(new Error('Database connection failed'))
const req = createMockRequest('DELETE', {
chatId: 'chat-123',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Failed to delete chat' })
})
it('should handle JSON parsing errors in request body', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = new NextRequest('http://localhost:3000/api/copilot/chat/delete', {
method: 'DELETE',
body: '{invalid-json',
headers: {
'Content-Type': 'application/json',
},
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to delete chat')
})
it('should delete chat even if it does not exist (idempotent)', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
mockWhere.mockResolvedValueOnce([])
const req = createMockRequest('DELETE', {
chatId: 'non-existent-chat',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
})
it('should delete chat with empty string chatId (validation should fail)', async () => {
const authMocks = mockAuth()
authMocks.setAuthenticated()
const req = createMockRequest('DELETE', {
chatId: '',
})
const { DELETE } = await import('@/app/api/copilot/chat/delete/route')
const response = await DELETE(req)
expect(response.status).toBe(200)
expect(mockDelete).toHaveBeenCalled()
})
})
})

View File

@@ -1066,7 +1066,6 @@ export async function GET(req: NextRequest) {
model: chat.model,
messages: Array.isArray(chat.messages) ? chat.messages : [],
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
previewYaml: null, // Not needed for chat list
planArtifact: chat.planArtifact || null,
config: chat.config || null,
createdAt: chat.createdAt,

View File

@@ -0,0 +1,277 @@
/**
* Tests for copilot chats list API route
*
* @vitest-environment node
*/
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { mockCryptoUuid, setupCommonApiMocks } from '@/app/api/__test-utils__/utils'
describe('Copilot Chats List API Route', () => {
const mockSelect = vi.fn()
const mockFrom = vi.fn()
const mockWhere = vi.fn()
const mockOrderBy = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
mockSelect.mockReturnValue({ from: mockFrom })
mockFrom.mockReturnValue({ where: mockWhere })
mockWhere.mockReturnValue({ orderBy: mockOrderBy })
mockOrderBy.mockResolvedValue([])
vi.doMock('@sim/db', () => ({
db: {
select: mockSelect,
},
}))
vi.doMock('@sim/db/schema', () => ({
copilotChats: {
id: 'id',
title: 'title',
workflowId: 'workflowId',
userId: 'userId',
updatedAt: 'updatedAt',
},
}))
vi.doMock('drizzle-orm', () => ({
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
desc: vi.fn((field) => ({ field, type: 'desc' })),
}))
vi.doMock('@/lib/copilot/request-helpers', () => ({
authenticateCopilotRequestSessionOnly: vi.fn(),
createUnauthorizedResponse: vi
.fn()
.mockReturnValue(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })),
createInternalServerErrorResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 500 })
),
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('GET', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return empty chats array when user has no chats', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockOrderBy.mockResolvedValueOnce([])
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({
success: true,
chats: [],
})
})
it('should return list of chats for authenticated user', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'chat-1',
title: 'First Chat',
workflowId: 'workflow-1',
updatedAt: new Date('2024-01-02'),
},
{
id: 'chat-2',
title: 'Second Chat',
workflowId: 'workflow-2',
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.chats).toHaveLength(2)
expect(responseData.chats[0].id).toBe('chat-1')
expect(responseData.chats[0].title).toBe('First Chat')
expect(responseData.chats[1].id).toBe('chat-2')
})
it('should return chats ordered by updatedAt descending', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'newest-chat',
title: 'Newest',
workflowId: 'workflow-1',
updatedAt: new Date('2024-01-10'),
},
{
id: 'older-chat',
title: 'Older',
workflowId: 'workflow-2',
updatedAt: new Date('2024-01-05'),
},
{
id: 'oldest-chat',
title: 'Oldest',
workflowId: 'workflow-3',
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.chats[0].id).toBe('newest-chat')
expect(responseData.chats[2].id).toBe('oldest-chat')
})
it('should handle chats with null workflowId', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'chat-no-workflow',
title: 'Chat without workflow',
workflowId: null,
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.chats[0].workflowId).toBeNull()
})
it('should handle database errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockOrderBy.mockRejectedValueOnce(new Error('Database connection failed'))
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to fetch user chats')
})
it('should only return chats belonging to authenticated user', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockChats = [
{
id: 'my-chat',
title: 'My Chat',
workflowId: 'workflow-1',
updatedAt: new Date('2024-01-01'),
},
]
mockOrderBy.mockResolvedValueOnce(mockChats)
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
await GET(request as any)
expect(mockSelect).toHaveBeenCalled()
expect(mockWhere).toHaveBeenCalled()
})
it('should return 401 when userId is null despite isAuthenticated being true', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: true,
})
const { GET } = await import('@/app/api/copilot/chats/route')
const request = new Request('http://localhost:3000/api/copilot/chats')
const response = await GET(request as any)
expect(response.status).toBe(401)
})
})
})

View File

@@ -0,0 +1,516 @@
/**
* Tests for copilot feedback API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Feedback API Route', () => {
const mockInsert = vi.fn()
const mockValues = vi.fn()
const mockReturning = vi.fn()
const mockSelect = vi.fn()
const mockFrom = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
mockInsert.mockReturnValue({ values: mockValues })
mockValues.mockReturnValue({ returning: mockReturning })
mockReturning.mockResolvedValue([])
mockSelect.mockReturnValue({ from: mockFrom })
mockFrom.mockResolvedValue([])
vi.doMock('@sim/db', () => ({
db: {
insert: mockInsert,
select: mockSelect,
},
}))
vi.doMock('@sim/db/schema', () => ({
copilotFeedback: {
feedbackId: 'feedbackId',
userId: 'userId',
chatId: 'chatId',
userQuery: 'userQuery',
agentResponse: 'agentResponse',
isPositive: 'isPositive',
feedback: 'feedback',
workflowYaml: 'workflowYaml',
createdAt: 'createdAt',
},
}))
vi.doMock('drizzle-orm', () => ({
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
}))
vi.doMock('@/lib/copilot/request-helpers', () => ({
authenticateCopilotRequestSessionOnly: vi.fn(),
createUnauthorizedResponse: vi
.fn()
.mockReturnValue(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })),
createBadRequestResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 400 })
),
createInternalServerErrorResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 500 })
),
createRequestTracker: vi.fn().mockReturnValue({
requestId: 'test-request-id',
getDuration: vi.fn().mockReturnValue(100),
}),
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('POST', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should successfully submit positive feedback', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const feedbackRecord = {
feedbackId: 'feedback-123',
userId: 'user-123',
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositive: true,
feedback: null,
workflowYaml: null,
createdAt: new Date('2024-01-01'),
}
mockReturning.mockResolvedValueOnce([feedbackRecord])
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedbackId).toBe('feedback-123')
expect(responseData.message).toBe('Feedback submitted successfully')
})
it('should successfully submit negative feedback with text', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const feedbackRecord = {
feedbackId: 'feedback-456',
userId: 'user-123',
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I deploy?',
agentResponse: 'Here is how to deploy...',
isPositive: false,
feedback: 'The response was not helpful',
workflowYaml: null,
createdAt: new Date('2024-01-01'),
}
mockReturning.mockResolvedValueOnce([feedbackRecord])
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I deploy?',
agentResponse: 'Here is how to deploy...',
isPositiveFeedback: false,
feedback: 'The response was not helpful',
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedbackId).toBe('feedback-456')
})
it('should successfully submit feedback with workflow YAML', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const workflowYaml = `
blocks:
- id: starter
type: starter
- id: agent
type: agent
edges:
- source: starter
target: agent
`
const feedbackRecord = {
feedbackId: 'feedback-789',
userId: 'user-123',
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'Build a simple agent workflow',
agentResponse: 'I created a workflow for you.',
isPositive: true,
feedback: null,
workflowYaml: workflowYaml,
createdAt: new Date('2024-01-01'),
}
mockReturning.mockResolvedValueOnce([feedbackRecord])
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'Build a simple agent workflow',
agentResponse: 'I created a workflow for you.',
isPositiveFeedback: true,
workflowYaml: workflowYaml,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(mockValues).toHaveBeenCalledWith(
expect.objectContaining({
workflowYaml: workflowYaml,
})
)
})
it('should return 400 for invalid chatId format', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: 'not-a-uuid',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should return 400 for empty userQuery', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: '',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should return 400 for empty agentResponse', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: '',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should return 400 for missing isPositiveFeedback', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toContain('Invalid request data')
})
it('should handle database errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockReturning.mockRejectedValueOnce(new Error('Database connection failed'))
const req = createMockRequest('POST', {
chatId: '550e8400-e29b-41d4-a716-446655440000',
userQuery: 'How do I create a workflow?',
agentResponse: 'You can create a workflow by...',
isPositiveFeedback: true,
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to submit feedback')
})
it('should handle JSON parsing errors in request body', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = new NextRequest('http://localhost:3000/api/copilot/feedback', {
method: 'POST',
body: '{invalid-json',
headers: {
'Content-Type': 'application/json',
},
})
const { POST } = await import('@/app/api/copilot/feedback/route')
const response = await POST(req)
expect(response.status).toBe(500)
})
})
describe('GET', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should return empty feedback array when no feedback exists', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFrom.mockResolvedValueOnce([])
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedback).toEqual([])
})
it('should return all feedback records', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const mockFeedback = [
{
feedbackId: 'feedback-1',
userId: 'user-123',
chatId: 'chat-1',
userQuery: 'Query 1',
agentResponse: 'Response 1',
isPositive: true,
feedback: null,
workflowYaml: null,
createdAt: new Date('2024-01-01'),
},
{
feedbackId: 'feedback-2',
userId: 'user-456',
chatId: 'chat-2',
userQuery: 'Query 2',
agentResponse: 'Response 2',
isPositive: false,
feedback: 'Not helpful',
workflowYaml: 'yaml: content',
createdAt: new Date('2024-01-02'),
},
]
mockFrom.mockResolvedValueOnce(mockFeedback)
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.success).toBe(true)
expect(responseData.feedback).toHaveLength(2)
expect(responseData.feedback[0].feedbackId).toBe('feedback-1')
expect(responseData.feedback[1].feedbackId).toBe('feedback-2')
})
it('should handle database errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFrom.mockRejectedValueOnce(new Error('Database connection failed'))
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to retrieve feedback')
})
it('should return metadata with response', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFrom.mockResolvedValueOnce([])
const { GET } = await import('@/app/api/copilot/feedback/route')
const request = new Request('http://localhost:3000/api/copilot/feedback')
const response = await GET(request as any)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData.metadata).toBeDefined()
expect(responseData.metadata.requestId).toBeDefined()
expect(responseData.metadata.duration).toBeDefined()
})
})
})

View File

@@ -0,0 +1,367 @@
/**
* Tests for copilot stats API route
*
* @vitest-environment node
*/
import { NextRequest } from 'next/server'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
mockCryptoUuid,
setupCommonApiMocks,
} from '@/app/api/__test-utils__/utils'
describe('Copilot Stats API Route', () => {
const mockFetch = vi.fn()
beforeEach(() => {
vi.resetModules()
setupCommonApiMocks()
mockCryptoUuid()
global.fetch = mockFetch
vi.doMock('@/lib/copilot/request-helpers', () => ({
authenticateCopilotRequestSessionOnly: vi.fn(),
createUnauthorizedResponse: vi
.fn()
.mockReturnValue(new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401 })),
createBadRequestResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 400 })
),
createInternalServerErrorResponse: vi
.fn()
.mockImplementation(
(message) => new Response(JSON.stringify({ error: message }), { status: 500 })
),
createRequestTracker: vi.fn().mockReturnValue({
requestId: 'test-request-id',
getDuration: vi.fn().mockReturnValue(100),
}),
}))
vi.doMock('@/lib/copilot/constants', () => ({
SIM_AGENT_API_URL_DEFAULT: 'https://agent.sim.example.com',
}))
vi.doMock('@/lib/core/config/env', () => ({
env: {
SIM_AGENT_API_URL: null,
COPILOT_API_KEY: 'test-api-key',
},
}))
})
afterEach(() => {
vi.clearAllMocks()
vi.restoreAllMocks()
})
describe('POST', () => {
it('should return 401 when user is not authenticated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: null,
isAuthenticated: false,
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(401)
const responseData = await response.json()
expect(responseData).toEqual({ error: 'Unauthorized' })
})
it('should successfully forward stats to Sim Agent', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: true }),
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: true,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(200)
const responseData = await response.json()
expect(responseData).toEqual({ success: true })
expect(mockFetch).toHaveBeenCalledWith(
'https://agent.sim.example.com/api/stats',
expect.objectContaining({
method: 'POST',
headers: expect.objectContaining({
'Content-Type': 'application/json',
'x-api-key': 'test-api-key',
}),
body: JSON.stringify({
messageId: 'message-123',
diffCreated: true,
diffAccepted: true,
}),
})
)
})
it('should return 400 for invalid request body - missing messageId', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should return 400 for invalid request body - missing diffCreated', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should return 400 for invalid request body - missing diffAccepted', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should return 400 when upstream Sim Agent returns error', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: false,
json: () => Promise.resolve({ error: 'Invalid message ID' }),
})
const req = createMockRequest('POST', {
messageId: 'invalid-message',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Invalid message ID' })
})
it('should handle upstream error with message field', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: false,
json: () => Promise.resolve({ message: 'Rate limit exceeded' }),
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Rate limit exceeded' })
})
it('should handle upstream error with no JSON response', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: false,
json: () => Promise.reject(new Error('Not JSON')),
})
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData).toEqual({ success: false, error: 'Upstream error' })
})
it('should handle network errors gracefully', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockRejectedValueOnce(new Error('Network error'))
const req = createMockRequest('POST', {
messageId: 'message-123',
diffCreated: true,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(500)
const responseData = await response.json()
expect(responseData.error).toBe('Failed to forward copilot stats')
})
it('should handle JSON parsing errors in request body', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
const req = new NextRequest('http://localhost:3000/api/copilot/stats', {
method: 'POST',
body: '{invalid-json',
headers: {
'Content-Type': 'application/json',
},
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(400)
const responseData = await response.json()
expect(responseData.error).toBe('Invalid request body for copilot stats')
})
it('should forward stats with diffCreated=false and diffAccepted=false', async () => {
const { authenticateCopilotRequestSessionOnly } = await import(
'@/lib/copilot/request-helpers'
)
vi.mocked(authenticateCopilotRequestSessionOnly).mockResolvedValueOnce({
userId: 'user-123',
isAuthenticated: true,
})
mockFetch.mockResolvedValueOnce({
ok: true,
json: () => Promise.resolve({ success: true }),
})
const req = createMockRequest('POST', {
messageId: 'message-456',
diffCreated: false,
diffAccepted: false,
})
const { POST } = await import('@/app/api/copilot/stats/route')
const response = await POST(req)
expect(response.status).toBe(200)
expect(mockFetch).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
body: JSON.stringify({
messageId: 'message-456',
diffCreated: false,
diffAccepted: false,
}),
})
)
})
})
})

View File

@@ -31,7 +31,7 @@ export async function GET(
const payload = run.payload as any
if (payload?.workflowId) {
const { verifyWorkflowAccess } = await import('@/socket-server/middleware/permissions')
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
const accessCheck = await verifyWorkflowAccess(authenticatedUserId, payload.workflowId)
if (!accessCheck.hasAccess) {
logger.warn(`[${requestId}] User ${authenticatedUserId} denied access to task ${taskId}`, {

View File

@@ -100,7 +100,12 @@ export async function PUT(
try {
const validatedData = UpdateChunkSchema.parse(body)
const updatedChunk = await updateChunk(chunkId, validatedData, requestId)
const updatedChunk = await updateChunk(
chunkId,
validatedData,
requestId,
accessCheck.knowledgeBase?.workspaceId
)
logger.info(
`[${requestId}] Chunk updated: ${chunkId} in document ${documentId} in knowledge base ${knowledgeBaseId}`

View File

@@ -184,7 +184,8 @@ export async function POST(
documentId,
docTags,
validatedData,
requestId
requestId,
accessCheck.knowledgeBase?.workspaceId
)
let cost = null

View File

@@ -183,11 +183,11 @@ export async function POST(request: NextRequest) {
)
}
// Generate query embedding only if query is provided
const workspaceId = accessChecks.find((ac) => ac?.hasAccess)?.knowledgeBase?.workspaceId
const hasQuery = validatedData.query && validatedData.query.trim().length > 0
// Start embedding generation early and await when needed
const queryEmbeddingPromise = hasQuery
? generateSearchEmbedding(validatedData.query!)
? generateSearchEmbedding(validatedData.query!, undefined, workspaceId)
: Promise.resolve(null)
// Check if any requested knowledge bases were not accessible

View File

@@ -99,7 +99,7 @@ export interface EmbeddingData {
export interface KnowledgeBaseAccessResult {
hasAccess: true
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId'>
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId' | 'workspaceId'>
}
export interface KnowledgeBaseAccessDenied {
@@ -113,7 +113,7 @@ export type KnowledgeBaseAccessCheck = KnowledgeBaseAccessResult | KnowledgeBase
export interface DocumentAccessResult {
hasAccess: true
document: DocumentData
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId'>
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId' | 'workspaceId'>
}
export interface DocumentAccessDenied {
@@ -128,7 +128,7 @@ export interface ChunkAccessResult {
hasAccess: true
chunk: EmbeddingData
document: DocumentData
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId'>
knowledgeBase: Pick<KnowledgeBaseData, 'id' | 'userId' | 'workspaceId'>
}
export interface ChunkAccessDenied {

View File

@@ -7,7 +7,6 @@ import { createLogger } from '@/lib/logs/console/logger'
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
import type { StreamingExecution } from '@/executor/types'
import { executeProviderRequest } from '@/providers'
import { getApiKey } from '@/providers/utils'
const logger = createLogger('ProvidersAPI')
@@ -80,23 +79,20 @@ export async function POST(request: NextRequest) {
verbosity,
})
let finalApiKey: string
let finalApiKey: string | undefined = apiKey
try {
if (provider === 'vertex' && vertexCredential) {
finalApiKey = await resolveVertexCredential(requestId, vertexCredential)
} else {
finalApiKey = getApiKey(provider, model, apiKey)
}
} catch (error) {
logger.error(`[${requestId}] Failed to get API key:`, {
logger.error(`[${requestId}] Failed to resolve Vertex credential:`, {
provider,
model,
error: error instanceof Error ? error.message : String(error),
hasProvidedApiKey: !!apiKey,
hasVertexCredential: !!vertexCredential,
})
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'API key error' },
{ error: error instanceof Error ? error.message : 'Credential error' },
{ status: 400 }
)
}
@@ -108,7 +104,6 @@ export async function POST(request: NextRequest) {
hasApiKey: !!finalApiKey,
})
// Execute provider request directly with the managed key
const response = await executeProviderRequest(provider, {
model,
systemPrompt,

View File

@@ -144,7 +144,7 @@ describe('Schedule GET API', () => {
it('indicates disabled schedule with failures', async () => {
mockDbChain([
[{ userId: 'user-1', workspaceId: null }],
[{ id: 'sched-1', status: 'disabled', failedCount: 10 }],
[{ id: 'sched-1', status: 'disabled', failedCount: 100 }],
])
const res = await GET(createRequest('http://test/api/schedules?workflowId=wf-1'))

View File

@@ -169,7 +169,7 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
if (creatorId !== undefined) updateData.creatorId = creatorId
if (updateState && template.workflowId) {
const { verifyWorkflowAccess } = await import('@/socket-server/middleware/permissions')
const { verifyWorkflowAccess } = await import('@/socket/middleware/permissions')
const { hasAccess: hasWorkflowAccess } = await verifyWorkflowAccess(
session.user.id,
template.workflowId

View File

@@ -39,8 +39,10 @@ export async function POST(request: NextRequest) {
const body = await request.json()
const validated = SearchRequestSchema.parse(body)
if (!env.EXA_API_KEY) {
logger.error(`[${requestId}] EXA_API_KEY not configured`)
const exaApiKey = env.EXA_API_KEY
if (!exaApiKey) {
logger.error(`[${requestId}] No Exa API key available`)
return NextResponse.json(
{ success: false, error: 'Search service not configured' },
{ status: 503 }
@@ -57,7 +59,7 @@ export async function POST(request: NextRequest) {
type: 'auto',
useAutoprompt: true,
highlights: true,
apiKey: env.EXA_API_KEY,
apiKey: exaApiKey,
})
if (!result.success) {

View File

@@ -3,6 +3,7 @@ import { userStats, workflow } from '@sim/db/schema'
import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import OpenAI, { AzureOpenAI } from 'openai'
import { getBYOKKey } from '@/lib/api-key/byok'
import { getSession } from '@/lib/auth'
import { logModelUsage } from '@/lib/billing/core/usage-log'
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
@@ -75,7 +76,8 @@ async function updateUserStatsForWand(
completion_tokens?: number
total_tokens?: number
},
requestId: string
requestId: string,
isBYOK = false
): Promise<void> {
if (!isBillingEnabled) {
logger.debug(`[${requestId}] Billing is disabled, skipping wand usage cost update`)
@@ -93,21 +95,24 @@ async function updateUserStatsForWand(
const completionTokens = usage.completion_tokens || 0
const modelName = useWandAzure ? wandModelName : 'gpt-4o'
const pricing = getModelPricing(modelName)
let costToStore = 0
const costMultiplier = getCostMultiplier()
let modelCost = 0
if (!isBYOK) {
const pricing = getModelPricing(modelName)
const costMultiplier = getCostMultiplier()
let modelCost = 0
if (pricing) {
const inputCost = (promptTokens / 1000000) * pricing.input
const outputCost = (completionTokens / 1000000) * pricing.output
modelCost = inputCost + outputCost
} else {
modelCost = (promptTokens / 1000000) * 0.005 + (completionTokens / 1000000) * 0.015
if (pricing) {
const inputCost = (promptTokens / 1000000) * pricing.input
const outputCost = (completionTokens / 1000000) * pricing.output
modelCost = inputCost + outputCost
} else {
modelCost = (promptTokens / 1000000) * 0.005 + (completionTokens / 1000000) * 0.015
}
costToStore = modelCost * costMultiplier
}
const costToStore = modelCost * costMultiplier
await db
.update(userStats)
.set({
@@ -122,6 +127,7 @@ async function updateUserStatsForWand(
userId,
tokensUsed: totalTokens,
costAdded: costToStore,
isBYOK,
})
await logModelUsage({
@@ -149,14 +155,6 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ success: false, error: 'Unauthorized' }, { status: 401 })
}
if (!client) {
logger.error(`[${requestId}] AI client not initialized. Missing API key.`)
return NextResponse.json(
{ success: false, error: 'Wand generation service is not configured.' },
{ status: 503 }
)
}
try {
const body = (await req.json()) as RequestBody
@@ -170,6 +168,7 @@ export async function POST(req: NextRequest) {
)
}
let workspaceId: string | null = null
if (workflowId) {
const [workflowRecord] = await db
.select({ workspaceId: workflow.workspaceId, userId: workflow.userId })
@@ -182,6 +181,8 @@ export async function POST(req: NextRequest) {
return NextResponse.json({ success: false, error: 'Workflow not found' }, { status: 404 })
}
workspaceId = workflowRecord.workspaceId
if (workflowRecord.workspaceId) {
const permission = await verifyWorkspaceMembership(
session.user.id,
@@ -199,6 +200,28 @@ export async function POST(req: NextRequest) {
}
}
let isBYOK = false
let activeClient = client
let byokApiKey: string | null = null
if (workspaceId && !useWandAzure) {
const byokResult = await getBYOKKey(workspaceId, 'openai')
if (byokResult) {
isBYOK = true
byokApiKey = byokResult.apiKey
activeClient = new OpenAI({ apiKey: byokResult.apiKey })
logger.info(`[${requestId}] Using BYOK OpenAI key for wand generation`)
}
}
if (!activeClient) {
logger.error(`[${requestId}] AI client not initialized. Missing API key.`)
return NextResponse.json(
{ success: false, error: 'Wand generation service is not configured.' },
{ status: 503 }
)
}
const finalSystemPrompt =
systemPrompt ||
'You are a helpful AI assistant. Generate content exactly as requested by the user.'
@@ -241,7 +264,7 @@ export async function POST(req: NextRequest) {
if (useWandAzure) {
headers['api-key'] = azureApiKey!
} else {
headers.Authorization = `Bearer ${openaiApiKey}`
headers.Authorization = `Bearer ${byokApiKey || openaiApiKey}`
}
logger.debug(`[${requestId}] Making streaming request to: ${apiUrl}`)
@@ -310,7 +333,7 @@ export async function POST(req: NextRequest) {
logger.info(`[${requestId}] Received [DONE] signal`)
if (finalUsage) {
await updateUserStatsForWand(session.user.id, finalUsage, requestId)
await updateUserStatsForWand(session.user.id, finalUsage, requestId, isBYOK)
}
controller.enqueue(
@@ -395,7 +418,7 @@ export async function POST(req: NextRequest) {
}
}
const completion = await client.chat.completions.create({
const completion = await activeClient.chat.completions.create({
model: useWandAzure ? wandModelName : 'gpt-4o',
messages: messages,
temperature: 0.3,
@@ -417,7 +440,7 @@ export async function POST(req: NextRequest) {
logger.info(`[${requestId}] Wand generation successful`)
if (completion.usage) {
await updateUserStatsForWand(session.user.id, completion.usage, requestId)
await updateUserStatsForWand(session.user.id, completion.usage, requestId, isBYOK)
}
return NextResponse.json({ success: true, content: generatedContent })

View File

@@ -3,6 +3,8 @@
*
* @vitest-environment node
*/
import { loggerMock } from '@sim/testing'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
createMockRequest,
@@ -176,6 +178,8 @@ vi.mock('drizzle-orm/postgres-js', () => ({
vi.mock('postgres', () => vi.fn().mockReturnValue({}))
vi.mock('@/lib/logs/console/logger', () => loggerMock)
process.env.DATABASE_URL = 'postgresql://test:test@localhost:5432/test'
import { POST } from '@/app/api/webhooks/trigger/[path]/route'
@@ -257,9 +261,6 @@ describe('Webhook Trigger API Route', () => {
expect(data.message).toBe('Webhook processed')
})
/**
* Test generic webhook with Bearer token authentication
*/
it('should authenticate with Bearer token when no custom header is configured', async () => {
globalMockData.webhooks.push({
id: 'generic-webhook-id',
@@ -489,7 +490,7 @@ describe('Webhook Trigger API Route', () => {
const headers = {
'Content-Type': 'application/json',
Authorization: 'Bearer exclusive-token', // Correct token but wrong header type
Authorization: 'Bearer exclusive-token',
}
const req = createMockRequest('POST', { event: 'exclusivity.test' }, headers)
const params = Promise.resolve({ path: 'test-path' })
@@ -517,7 +518,7 @@ describe('Webhook Trigger API Route', () => {
const headers = {
'Content-Type': 'application/json',
'X-Wrong-Header': 'correct-token', // Correct token but wrong header name
'X-Wrong-Header': 'correct-token',
}
const req = createMockRequest('POST', { event: 'wrong.header.name.test' }, headers)
const params = Promise.resolve({ path: 'test-path' })

View File

@@ -60,13 +60,20 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
const { loadWorkflowFromNormalizedTables } = await import('@/lib/workflows/persistence/utils')
const normalizedData = await loadWorkflowFromNormalizedTables(id)
if (normalizedData) {
const [workflowRecord] = await db
.select({ variables: workflow.variables })
.from(workflow)
.where(eq(workflow.id, id))
.limit(1)
const currentState = {
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
variables: workflowRecord?.variables || {},
}
const { hasWorkflowChanged } = await import('@/lib/workflows/utils')
const { hasWorkflowChanged } = await import('@/lib/workflows/comparison')
needsRedeployment = hasWorkflowChanged(currentState as any, active.state as any)
}
}

View File

@@ -7,6 +7,7 @@ import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing'
import { createLogger } from '@/lib/logs/console/logger'
@@ -317,6 +318,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
loops: Record<string, any>
parallels: Record<string, any>
deploymentVersionId?: string
variables?: Record<string, any>
} | null = null
let processedInput = input
@@ -326,6 +328,11 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
: await loadDeployedWorkflowState(workflowId)
if (workflowData) {
const deployedVariables =
!shouldUseDraftState && 'variables' in workflowData
? (workflowData as any).variables
: undefined
cachedWorkflowData = {
blocks: workflowData.blocks,
edges: workflowData.edges,
@@ -335,6 +342,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
!shouldUseDraftState && 'deploymentVersionId' in workflowData
? (workflowData.deploymentVersionId as string)
: undefined,
variables: deployedVariables,
}
const serializedWorkflow = new Serializer().serializeWorkflow(
@@ -404,11 +412,13 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
workflowStateOverride: effectiveWorkflowStateOverride,
}
const executionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
const snapshot = new ExecutionSnapshot(
metadata,
workflow,
processedInput,
workflow.variables || {},
executionVariables,
selectedOutputs
)
@@ -470,6 +480,8 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
selectedOutputs,
cachedWorkflowData?.blocks || {}
)
const streamVariables = cachedWorkflowData?.variables ?? (workflow as any).variables
const stream = await createStreamingResponse({
requestId,
workflow: {
@@ -477,7 +489,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
userId: actorUserId,
workspaceId,
isDeployed: workflow.isDeployed,
variables: (workflow as any).variables,
variables: streamVariables,
},
input: processedInput,
executingUserId: actorUserId,
@@ -496,7 +508,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
}
const encoder = new TextEncoder()
let executorInstance: any = null
const abortController = new AbortController()
let isStreamClosed = false
const stream = new ReadableStream<Uint8Array>({
@@ -674,11 +686,13 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
workflowStateOverride: effectiveWorkflowStateOverride,
}
const sseExecutionVariables = cachedWorkflowData?.variables ?? workflow.variables ?? {}
const snapshot = new ExecutionSnapshot(
metadata,
workflow,
processedInput,
workflow.variables || {},
sseExecutionVariables,
selectedOutputs
)
@@ -688,11 +702,9 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
onBlockStart,
onBlockComplete,
onStream,
onExecutorCreated: (executor) => {
executorInstance = executor
},
},
loggingSession,
abortSignal: abortController.signal,
})
if (result.status === 'paused') {
@@ -769,11 +781,9 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
},
cancel() {
isStreamClosed = true
logger.info(`[${requestId}] Client aborted SSE stream, cancelling executor`)
if (executorInstance && typeof executorInstance.cancel === 'function') {
executorInstance.cancel()
}
logger.info(`[${requestId}] Client aborted SSE stream, signalling cancellation`)
abortController.abort()
markExecutionCancelled(executionId).catch(() => {})
},
})

View File

@@ -0,0 +1,47 @@
import { type NextRequest, NextResponse } from 'next/server'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('CancelExecutionAPI')
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function POST(
req: NextRequest,
{ params }: { params: Promise<{ id: string; executionId: string }> }
) {
const { id: workflowId, executionId } = await params
try {
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
logger.info('Cancel execution requested', { workflowId, executionId, userId: auth.userId })
const marked = await markExecutionCancelled(executionId)
if (marked) {
logger.info('Execution marked as cancelled in Redis', { executionId })
} else {
logger.info('Redis not available, cancellation will rely on connection close', {
executionId,
})
}
return NextResponse.json({
success: true,
executionId,
redisAvailable: marked,
})
} catch (error: any) {
logger.error('Failed to cancel execution', { workflowId, executionId, error: error.message })
return NextResponse.json(
{ error: error.message || 'Failed to cancel execution' },
{ status: 500 }
)
}
}

View File

@@ -1,10 +1,10 @@
import { db, workflowDeploymentVersion } from '@sim/db'
import { db, workflow, workflowDeploymentVersion } from '@sim/db'
import { and, desc, eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { hasWorkflowChanged } from '@/lib/workflows/comparison'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { hasWorkflowChanged } from '@/lib/workflows/utils'
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
@@ -22,17 +22,12 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return createErrorResponse(validation.error.message, validation.error.status)
}
// Check if the workflow has meaningful changes that would require redeployment
let needsRedeployment = false
if (validation.workflow.isDeployed) {
// Get current state from normalized tables (same logic as deployment API)
// Load current state from normalized tables using centralized helper
const normalizedData = await loadWorkflowFromNormalizedTables(id)
if (!normalizedData) {
// Workflow exists but has no blocks in normalized tables (empty workflow or not migrated)
// This is valid state - return success with no redeployment needed
return createSuccessResponse({
isDeployed: validation.workflow.isDeployed,
deployedAt: validation.workflow.deployedAt,
@@ -41,11 +36,18 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
})
}
const [workflowRecord] = await db
.select({ variables: workflow.variables })
.from(workflow)
.where(eq(workflow.id, id))
.limit(1)
const currentState = {
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
variables: workflowRecord?.variables || {},
lastSaved: Date.now(),
}
@@ -69,6 +71,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
return createSuccessResponse({
isDeployed: validation.workflow.isDeployed,
deployedAt: validation.workflow.deployedAt,
isPublished: validation.workflow.isPublished,
needsRedeployment,
})
} catch (error) {

View File

@@ -1,117 +0,0 @@
import { type NextRequest, NextResponse } from 'next/server'
import { simAgentClient } from '@/lib/copilot/client'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { getAllBlocks } from '@/blocks/registry'
import type { BlockConfig } from '@/blocks/types'
import { resolveOutputType } from '@/blocks/utils'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
const logger = createLogger('WorkflowYamlAPI')
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
logger.info(`[${requestId}] Converting workflow JSON to YAML`)
const body = await request.json()
const { workflowState, subBlockValues, includeMetadata = false } = body
if (!workflowState) {
return NextResponse.json(
{ success: false, error: 'workflowState is required' },
{ status: 400 }
)
}
// Ensure loop blocks have their data populated with defaults
if (workflowState.blocks) {
Object.entries(workflowState.blocks).forEach(([blockId, block]: [string, any]) => {
if (block.type === 'loop') {
// Ensure data field exists
if (!block.data) {
block.data = {}
}
// Apply defaults if not set
if (!block.data.loopType) {
block.data.loopType = 'for'
}
if (!block.data.count && block.data.count !== 0) {
block.data.count = 5
}
if (!block.data.collection) {
block.data.collection = ''
}
if (!block.data.maxConcurrency) {
block.data.maxConcurrency = 1
}
logger.debug(`[${requestId}] Applied defaults to loop block ${blockId}:`, {
loopType: block.data.loopType,
count: block.data.count,
})
}
})
}
// Gather block registry and utilities for sim-agent
const blocks = getAllBlocks()
const blockRegistry = blocks.reduce(
(acc, block) => {
const blockType = block.type
acc[blockType] = {
...block,
id: blockType,
subBlocks: block.subBlocks || [],
outputs: block.outputs || {},
} as any
return acc
},
{} as Record<string, BlockConfig>
)
// Call sim-agent directly
const result = await simAgentClient.makeRequest('/api/workflow/to-yaml', {
body: {
workflowState,
subBlockValues,
blockRegistry,
utilities: {
generateLoopBlocks: generateLoopBlocks.toString(),
generateParallelBlocks: generateParallelBlocks.toString(),
resolveOutputType: resolveOutputType.toString(),
},
},
})
if (!result.success || !result.data?.yaml) {
return NextResponse.json(
{
success: false,
error: result.error || 'Failed to generate YAML',
},
{ status: result.status || 500 }
)
}
logger.info(`[${requestId}] Successfully generated YAML`, {
yamlLength: result.data.yaml.length,
})
return NextResponse.json({
success: true,
yaml: result.data.yaml,
})
} catch (error) {
logger.error(`[${requestId}] YAML generation failed`, error)
return NextResponse.json(
{
success: false,
error: `Failed to generate YAML: ${error instanceof Error ? error.message : 'Unknown error'}`,
},
{ status: 500 }
)
}
}

View File

@@ -1,210 +0,0 @@
import { db } from '@sim/db'
import { workflow } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { simAgentClient } from '@/lib/copilot/client'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import { getAllBlocks } from '@/blocks/registry'
import type { BlockConfig } from '@/blocks/types'
import { resolveOutputType } from '@/blocks/utils'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
const logger = createLogger('WorkflowYamlExportAPI')
export async function GET(request: NextRequest) {
const requestId = generateRequestId()
const url = new URL(request.url)
const workflowId = url.searchParams.get('workflowId')
try {
logger.info(`[${requestId}] Exporting workflow YAML from database: ${workflowId}`)
if (!workflowId) {
return NextResponse.json({ success: false, error: 'workflowId is required' }, { status: 400 })
}
// Get the session for authentication
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized access attempt for workflow ${workflowId}`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
// Fetch the workflow from database
const workflowData = await db
.select()
.from(workflow)
.where(eq(workflow.id, workflowId))
.then((rows) => rows[0])
if (!workflowData) {
logger.warn(`[${requestId}] Workflow ${workflowId} not found`)
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
// Check if user has access to this workflow
let hasAccess = false
// Case 1: User owns the workflow
if (workflowData.userId === userId) {
hasAccess = true
}
// Case 2: Workflow belongs to a workspace the user has permissions for
if (!hasAccess && workflowData.workspaceId) {
const userPermission = await getUserEntityPermissions(
userId,
'workspace',
workflowData.workspaceId
)
if (userPermission !== null) {
hasAccess = true
}
}
if (!hasAccess) {
logger.warn(`[${requestId}] User ${userId} denied access to workflow ${workflowId}`)
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Try to load from normalized tables first
logger.debug(`[${requestId}] Attempting to load workflow ${workflowId} from normalized tables`)
const normalizedData = await loadWorkflowFromNormalizedTables(workflowId)
let workflowState: any
const subBlockValues: Record<string, Record<string, any>> = {}
if (normalizedData) {
logger.debug(`[${requestId}] Found normalized data for workflow ${workflowId}:`, {
blocksCount: Object.keys(normalizedData.blocks).length,
edgesCount: normalizedData.edges.length,
})
// Use normalized table data - construct state from normalized tables
workflowState = {
deploymentStatuses: {},
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,
parallels: normalizedData.parallels,
lastSaved: Date.now(),
isDeployed: workflowData.isDeployed || false,
deployedAt: workflowData.deployedAt,
}
// Extract subblock values from the normalized blocks
Object.entries(normalizedData.blocks).forEach(([blockId, block]: [string, any]) => {
subBlockValues[blockId] = {}
if (block.subBlocks) {
Object.entries(block.subBlocks).forEach(([subBlockId, subBlock]: [string, any]) => {
if (subBlock && typeof subBlock === 'object' && 'value' in subBlock) {
subBlockValues[blockId][subBlockId] = subBlock.value
}
})
}
})
logger.info(`[${requestId}] Loaded workflow ${workflowId} from normalized tables`)
} else {
return NextResponse.json(
{ success: false, error: 'Workflow has no normalized data' },
{ status: 400 }
)
}
// Ensure loop blocks have their data populated with defaults
if (workflowState.blocks) {
Object.entries(workflowState.blocks).forEach(([blockId, block]: [string, any]) => {
if (block.type === 'loop') {
// Ensure data field exists
if (!block.data) {
block.data = {}
}
// Apply defaults if not set
if (!block.data.loopType) {
block.data.loopType = 'for'
}
if (!block.data.count && block.data.count !== 0) {
block.data.count = 5
}
if (!block.data.collection) {
block.data.collection = ''
}
if (!block.data.maxConcurrency) {
block.data.maxConcurrency = 1
}
logger.debug(`[${requestId}] Applied defaults to loop block ${blockId}:`, {
loopType: block.data.loopType,
count: block.data.count,
})
}
})
}
// Gather block registry and utilities for sim-agent
const blocks = getAllBlocks()
const blockRegistry = blocks.reduce(
(acc, block) => {
const blockType = block.type
acc[blockType] = {
...block,
id: blockType,
subBlocks: block.subBlocks || [],
outputs: block.outputs || {},
} as any
return acc
},
{} as Record<string, BlockConfig>
)
// Call sim-agent directly
const result = await simAgentClient.makeRequest('/api/workflow/to-yaml', {
body: {
workflowState,
subBlockValues,
blockRegistry,
utilities: {
generateLoopBlocks: generateLoopBlocks.toString(),
generateParallelBlocks: generateParallelBlocks.toString(),
resolveOutputType: resolveOutputType.toString(),
},
},
})
if (!result.success || !result.data?.yaml) {
return NextResponse.json(
{
success: false,
error: result.error || 'Failed to generate YAML',
},
{ status: result.status || 500 }
)
}
logger.info(`[${requestId}] Successfully generated YAML from database`, {
yamlLength: result.data.yaml.length,
})
return NextResponse.json({
success: true,
yaml: result.data.yaml,
})
} catch (error) {
logger.error(`[${requestId}] YAML export failed`, error)
return NextResponse.json(
{
success: false,
error: `Failed to export YAML: ${error instanceof Error ? error.message : 'Unknown error'}`,
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,256 @@
import { db } from '@sim/db'
import { workspace, workspaceBYOKKeys } from '@sim/db/schema'
import { and, eq } from 'drizzle-orm'
import { nanoid } from 'nanoid'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { decryptSecret, encryptSecret } from '@/lib/core/security/encryption'
import { generateRequestId } from '@/lib/core/utils/request'
import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
const logger = createLogger('WorkspaceBYOKKeysAPI')
const VALID_PROVIDERS = ['openai', 'anthropic', 'google', 'mistral'] as const
const UpsertKeySchema = z.object({
providerId: z.enum(VALID_PROVIDERS),
apiKey: z.string().min(1, 'API key is required'),
})
const DeleteKeySchema = z.object({
providerId: z.enum(VALID_PROVIDERS),
})
function maskApiKey(key: string): string {
if (key.length <= 8) {
return '•'.repeat(8)
}
if (key.length <= 12) {
return `${key.slice(0, 4)}...${key.slice(-4)}`
}
return `${key.slice(0, 6)}...${key.slice(-4)}`
}
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
const workspaceId = (await params).id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized BYOK keys access attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const ws = await db.select().from(workspace).where(eq(workspace.id, workspaceId)).limit(1)
if (!ws.length) {
return NextResponse.json({ error: 'Workspace not found' }, { status: 404 })
}
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
if (!permission) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const byokKeys = await db
.select({
id: workspaceBYOKKeys.id,
providerId: workspaceBYOKKeys.providerId,
encryptedApiKey: workspaceBYOKKeys.encryptedApiKey,
createdBy: workspaceBYOKKeys.createdBy,
createdAt: workspaceBYOKKeys.createdAt,
updatedAt: workspaceBYOKKeys.updatedAt,
})
.from(workspaceBYOKKeys)
.where(eq(workspaceBYOKKeys.workspaceId, workspaceId))
.orderBy(workspaceBYOKKeys.providerId)
const formattedKeys = await Promise.all(
byokKeys.map(async (key) => {
try {
const { decrypted } = await decryptSecret(key.encryptedApiKey)
return {
id: key.id,
providerId: key.providerId,
maskedKey: maskApiKey(decrypted),
createdBy: key.createdBy,
createdAt: key.createdAt,
updatedAt: key.updatedAt,
}
} catch (error) {
logger.error(`[${requestId}] Failed to decrypt BYOK key for provider ${key.providerId}`, {
error,
})
return {
id: key.id,
providerId: key.providerId,
maskedKey: '••••••••',
createdBy: key.createdBy,
createdAt: key.createdAt,
updatedAt: key.updatedAt,
}
}
})
)
return NextResponse.json({ keys: formattedKeys })
} catch (error: unknown) {
logger.error(`[${requestId}] BYOK keys GET error`, error)
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to load BYOK keys' },
{ status: 500 }
)
}
}
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
const workspaceId = (await params).id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized BYOK key creation attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
if (permission !== 'admin') {
return NextResponse.json(
{ error: 'Only workspace admins can manage BYOK keys' },
{ status: 403 }
)
}
const body = await request.json()
const { providerId, apiKey } = UpsertKeySchema.parse(body)
const { encrypted } = await encryptSecret(apiKey)
const existingKey = await db
.select()
.from(workspaceBYOKKeys)
.where(
and(
eq(workspaceBYOKKeys.workspaceId, workspaceId),
eq(workspaceBYOKKeys.providerId, providerId)
)
)
.limit(1)
if (existingKey.length > 0) {
await db
.update(workspaceBYOKKeys)
.set({
encryptedApiKey: encrypted,
updatedAt: new Date(),
})
.where(eq(workspaceBYOKKeys.id, existingKey[0].id))
logger.info(`[${requestId}] Updated BYOK key for ${providerId} in workspace ${workspaceId}`)
return NextResponse.json({
success: true,
key: {
id: existingKey[0].id,
providerId,
maskedKey: maskApiKey(apiKey),
updatedAt: new Date(),
},
})
}
const [newKey] = await db
.insert(workspaceBYOKKeys)
.values({
id: nanoid(),
workspaceId,
providerId,
encryptedApiKey: encrypted,
createdBy: userId,
createdAt: new Date(),
updatedAt: new Date(),
})
.returning({
id: workspaceBYOKKeys.id,
providerId: workspaceBYOKKeys.providerId,
createdAt: workspaceBYOKKeys.createdAt,
})
logger.info(`[${requestId}] Created BYOK key for ${providerId} in workspace ${workspaceId}`)
return NextResponse.json({
success: true,
key: {
...newKey,
maskedKey: maskApiKey(apiKey),
},
})
} catch (error: unknown) {
logger.error(`[${requestId}] BYOK key POST error`, error)
if (error instanceof z.ZodError) {
return NextResponse.json({ error: error.errors[0].message }, { status: 400 })
}
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to save BYOK key' },
{ status: 500 }
)
}
}
export async function DELETE(
request: NextRequest,
{ params }: { params: Promise<{ id: string }> }
) {
const requestId = generateRequestId()
const workspaceId = (await params).id
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized BYOK key deletion attempt`)
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const userId = session.user.id
const permission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
if (permission !== 'admin') {
return NextResponse.json(
{ error: 'Only workspace admins can manage BYOK keys' },
{ status: 403 }
)
}
const body = await request.json()
const { providerId } = DeleteKeySchema.parse(body)
const result = await db
.delete(workspaceBYOKKeys)
.where(
and(
eq(workspaceBYOKKeys.workspaceId, workspaceId),
eq(workspaceBYOKKeys.providerId, providerId)
)
)
logger.info(`[${requestId}] Deleted BYOK key for ${providerId} from workspace ${workspaceId}`)
return NextResponse.json({ success: true })
} catch (error: unknown) {
logger.error(`[${requestId}] BYOK key DELETE error`, error)
if (error instanceof z.ZodError) {
return NextResponse.json({ error: error.errors[0].message }, { status: 400 })
}
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to delete BYOK key' },
{ status: 500 }
)
}
}

View File

@@ -1,3 +1,4 @@
import { createSession, createWorkspaceRecord, loggerMock } from '@sim/testing'
import { NextRequest } from 'next/server'
import { beforeEach, describe, expect, it, vi } from 'vitest'
@@ -59,14 +60,7 @@ vi.mock('@/lib/workspaces/permissions/utils', () => ({
mockHasWorkspaceAdminAccess(userId, workspaceId),
}))
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn().mockReturnValue({
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
}),
}))
vi.mock('@/lib/logs/console/logger', () => loggerMock)
vi.mock('@/lib/core/utils/urls', () => ({
getBaseUrl: vi.fn().mockReturnValue('https://test.sim.ai'),
@@ -127,9 +121,14 @@ const mockUser = {
name: 'Test User',
}
const mockWorkspace = {
const mockWorkspaceData = createWorkspaceRecord({
id: 'workspace-456',
name: 'Test Workspace',
})
const mockWorkspace = {
id: mockWorkspaceData.id,
name: mockWorkspaceData.name,
}
const mockInvitation = {
@@ -140,7 +139,7 @@ const mockInvitation = {
status: 'pending',
token: 'token-abc123',
permissions: 'read',
expiresAt: new Date(Date.now() + 86400000), // 1 day from now
expiresAt: new Date(Date.now() + 86400000),
createdAt: new Date(),
updatedAt: new Date(),
}
@@ -154,7 +153,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
describe('GET /api/workspaces/invitations/[invitationId]', () => {
it('should return invitation details when called without token', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
dbSelectResults = [[mockInvitation], [mockWorkspace]]
const request = new NextRequest('http://localhost/api/workspaces/invitations/invitation-789')
@@ -202,15 +202,18 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should accept invitation when called with valid token', async () => {
mockGetSession.mockResolvedValue({
user: { ...mockUser, email: 'invited@example.com' },
const session = createSession({
userId: mockUser.id,
email: 'invited@example.com',
name: mockUser.name,
})
mockGetSession.mockResolvedValue(session)
dbSelectResults = [
[mockInvitation], // invitation lookup
[mockWorkspace], // workspace lookup
[{ ...mockUser, email: 'invited@example.com' }], // user lookup
[], // existing permission check (empty = no existing)
[mockInvitation],
[mockWorkspace],
[{ ...mockUser, email: 'invited@example.com' }],
[],
]
const request = new NextRequest(
@@ -225,13 +228,16 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should redirect to error page when invitation expired', async () => {
mockGetSession.mockResolvedValue({
user: { ...mockUser, email: 'invited@example.com' },
const session = createSession({
userId: mockUser.id,
email: 'invited@example.com',
name: mockUser.name,
})
mockGetSession.mockResolvedValue(session)
const expiredInvitation = {
...mockInvitation,
expiresAt: new Date(Date.now() - 86400000), // 1 day ago
expiresAt: new Date(Date.now() - 86400000),
}
dbSelectResults = [[expiredInvitation], [mockWorkspace]]
@@ -250,9 +256,12 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should redirect to error page when email mismatch', async () => {
mockGetSession.mockResolvedValue({
user: { ...mockUser, email: 'wrong@example.com' },
const session = createSession({
userId: mockUser.id,
email: 'wrong@example.com',
name: mockUser.name,
})
mockGetSession.mockResolvedValue(session)
dbSelectResults = [
[mockInvitation],
@@ -274,8 +283,9 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 404 when invitation not found', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
dbSelectResults = [[]] // Empty result
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
dbSelectResults = [[]]
const request = new NextRequest('http://localhost/api/workspaces/invitations/non-existent')
const params = Promise.resolve({ invitationId: 'non-existent' })
@@ -306,7 +316,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 404 when invitation does not exist', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
dbSelectResults = [[]]
const request = new NextRequest('http://localhost/api/workspaces/invitations/non-existent', {
@@ -322,7 +333,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 403 when user lacks admin access', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
mockHasWorkspaceAdminAccess.mockResolvedValue(false)
dbSelectResults = [[mockInvitation]]
@@ -341,7 +353,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should return 400 when trying to delete non-pending invitation', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
mockHasWorkspaceAdminAccess.mockResolvedValue(true)
const acceptedInvitation = { ...mockInvitation, status: 'accepted' }
@@ -361,7 +374,8 @@ describe('Workspace Invitation [invitationId] API Route', () => {
})
it('should successfully delete pending invitation when user has admin access', async () => {
mockGetSession.mockResolvedValue({ user: mockUser })
const session = createSession({ userId: mockUser.id, email: mockUser.email })
mockGetSession.mockResolvedValue(session)
mockHasWorkspaceAdminAccess.mockResolvedValue(true)
dbSelectResults = [[mockInvitation]]

View File

@@ -117,7 +117,7 @@ export default function ChatClient({ identifier }: { identifier: string }) {
const [error, setError] = useState<string | null>(null)
const messagesEndRef = useRef<HTMLDivElement>(null)
const messagesContainerRef = useRef<HTMLDivElement>(null)
const [starCount, setStarCount] = useState('24k')
const [starCount, setStarCount] = useState('24.4k')
const [conversationId, setConversationId] = useState('')
const [showScrollButton, setShowScrollButton] = useState(false)

View File

@@ -45,6 +45,7 @@ import {
ActionBar,
AddDocumentsModal,
BaseTagsModal,
DocumentTagsCell,
} from '@/app/workspace/[workspaceId]/knowledge/[id]/components'
import { getDocumentIcon } from '@/app/workspace/[workspaceId]/knowledge/components'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
@@ -53,6 +54,7 @@ import {
useKnowledgeBaseDocuments,
useKnowledgeBasesList,
} from '@/hooks/use-knowledge'
import { useKnowledgeBaseTagDefinitions } from '@/hooks/use-knowledge-base-tag-definitions'
import type { DocumentData } from '@/stores/knowledge/store'
const logger = createLogger('KnowledgeBase')
@@ -83,18 +85,17 @@ function DocumentTableRowSkeleton() {
<Skeleton className='h-[15px] w-[24px]' />
</TableCell>
<TableCell className='px-[12px] py-[8px]'>
<div className='flex flex-col justify-center'>
<div className='flex items-center font-medium text-[12px]'>
<Skeleton className='h-[15px] w-[50px]' />
<span className='mx-[6px] hidden text-[var(--text-muted)] xl:inline'>|</span>
<Skeleton className='hidden h-[15px] w-[70px] xl:inline-block' />
</div>
<Skeleton className='mt-[2px] h-[15px] w-[40px] lg:hidden' />
</div>
<Skeleton className='h-[15px] w-[60px]' />
</TableCell>
<TableCell className='px-[12px] py-[8px]'>
<Skeleton className='h-[24px] w-[64px] rounded-md' />
</TableCell>
<TableCell className='px-[12px] py-[8px]'>
<div className='flex items-center gap-[4px]'>
<Skeleton className='h-[18px] w-[40px] rounded-full' />
<Skeleton className='h-[18px] w-[40px] rounded-full' />
</div>
</TableCell>
<TableCell className='py-[8px] pr-[4px] pl-[12px]'>
<div className='flex items-center gap-[4px]'>
<Skeleton className='h-[28px] w-[28px] rounded-[4px]' />
@@ -127,13 +128,16 @@ function DocumentTableSkeleton({ rowCount = 5 }: { rowCount?: number }) {
<TableHead className='hidden w-[8%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)] lg:table-cell'>
Chunks
</TableHead>
<TableHead className='w-[16%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)]'>
<TableHead className='w-[11%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)]'>
Uploaded
</TableHead>
<TableHead className='w-[12%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)]'>
<TableHead className='w-[10%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)]'>
Status
</TableHead>
<TableHead className='w-[14%] py-[8px] pr-[4px] pl-[12px] text-[12px] text-[var(--text-secondary)]'>
<TableHead className='w-[12%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)]'>
Tags
</TableHead>
<TableHead className='w-[11%] py-[8px] pr-[4px] pl-[12px] text-[12px] text-[var(--text-secondary)]'>
Actions
</TableHead>
</TableRow>
@@ -379,6 +383,8 @@ export function KnowledgeBase({
sortOrder,
})
const { tagDefinitions } = useKnowledgeBaseTagDefinitions(id)
const router = useRouter()
const knowledgeBaseName = knowledgeBase?.name || passedKnowledgeBaseName || 'Knowledge Base'
@@ -1061,9 +1067,12 @@ export function KnowledgeBase({
{renderSortableHeader('fileSize', 'Size', 'w-[8%]')}
{renderSortableHeader('tokenCount', 'Tokens', 'w-[8%]')}
{renderSortableHeader('chunkCount', 'Chunks', 'hidden w-[8%] lg:table-cell')}
{renderSortableHeader('uploadedAt', 'Uploaded', 'w-[16%]')}
{renderSortableHeader('processingStatus', 'Status', 'w-[12%]')}
<TableHead className='w-[14%] py-[8px] pr-[4px] pl-[12px] text-[12px] text-[var(--text-secondary)]'>
{renderSortableHeader('uploadedAt', 'Uploaded', 'w-[11%]')}
{renderSortableHeader('processingStatus', 'Status', 'w-[10%]')}
<TableHead className='w-[12%] px-[12px] py-[8px] text-[12px] text-[var(--text-secondary)]'>
Tags
</TableHead>
<TableHead className='w-[11%] py-[8px] pr-[4px] pl-[12px] text-[12px] text-[var(--text-secondary)]'>
Actions
</TableHead>
</TableRow>
@@ -1135,20 +1144,16 @@ export function KnowledgeBase({
: '—'}
</TableCell>
<TableCell className='px-[12px] py-[8px]'>
<div className='flex flex-col justify-center'>
<div className='flex items-center font-medium text-[12px]'>
<span>{format(new Date(doc.uploadedAt), 'h:mm a')}</span>
<span className='mx-[6px] hidden text-[var(--text-muted)] xl:inline'>
|
<Tooltip.Root>
<Tooltip.Trigger asChild>
<span className='text-[12px] text-[var(--text-muted)]'>
{format(new Date(doc.uploadedAt), 'MMM d')}
</span>
<span className='hidden text-[var(--text-muted)] xl:inline'>
{format(new Date(doc.uploadedAt), 'MMM d, yyyy')}
</span>
</div>
<div className='mt-[2px] text-[12px] text-[var(--text-muted)] lg:hidden'>
{format(new Date(doc.uploadedAt), 'MMM d')}
</div>
</div>
</Tooltip.Trigger>
<Tooltip.Content side='top'>
{format(new Date(doc.uploadedAt), 'MMM d, yyyy h:mm a')}
</Tooltip.Content>
</Tooltip.Root>
</TableCell>
<TableCell className='px-[12px] py-[8px]'>
{doc.processingStatus === 'failed' && doc.processingError ? (
@@ -1166,6 +1171,9 @@ export function KnowledgeBase({
<div className={statusDisplay.className}>{statusDisplay.text}</div>
)}
</TableCell>
<TableCell className='px-[12px] py-[8px]'>
<DocumentTagsCell document={doc} tagDefinitions={tagDefinitions} />
</TableCell>
<TableCell className='py-[8px] pr-[4px] pl-[12px]'>
<div className='flex items-center gap-[4px]'>
{doc.processingStatus === 'failed' && (

View File

@@ -0,0 +1,163 @@
'use client'
import { useMemo } from 'react'
import { format } from 'date-fns'
import { Badge, Popover, PopoverAnchor, PopoverContent, Tooltip } from '@/components/emcn'
import type { TagDefinition } from '@/hooks/use-knowledge-base-tag-definitions'
import type { DocumentData } from '@/stores/knowledge/store'
/** All tag slot keys that can hold values */
const TAG_SLOTS = [
'tag1',
'tag2',
'tag3',
'tag4',
'tag5',
'tag6',
'tag7',
'number1',
'number2',
'number3',
'number4',
'number5',
'date1',
'date2',
'boolean1',
'boolean2',
'boolean3',
] as const
type TagSlot = (typeof TAG_SLOTS)[number]
interface TagValue {
slot: TagSlot
displayName: string
value: string
fieldType: string
}
interface DocumentTagsCellProps {
document: DocumentData
tagDefinitions: TagDefinition[]
}
/**
* Formats a tag value based on its field type
*/
function formatTagValue(value: unknown, fieldType: string): string {
if (value === null || value === undefined) return ''
switch (fieldType) {
case 'date':
try {
return format(new Date(value as string), 'MMM d, yyyy')
} catch {
return String(value)
}
case 'boolean':
return value ? 'Yes' : 'No'
case 'number':
return typeof value === 'number' ? value.toLocaleString() : String(value)
default:
return String(value)
}
}
/**
* Gets the field type for a tag slot
*/
function getFieldType(slot: TagSlot): string {
if (slot.startsWith('tag')) return 'text'
if (slot.startsWith('number')) return 'number'
if (slot.startsWith('date')) return 'date'
if (slot.startsWith('boolean')) return 'boolean'
return 'text'
}
/**
* Cell component that displays document tags as compact badges with overflow popover
*/
export function DocumentTagsCell({ document, tagDefinitions }: DocumentTagsCellProps) {
const tags = useMemo(() => {
const result: TagValue[] = []
for (const slot of TAG_SLOTS) {
const value = document[slot]
if (value === null || value === undefined) continue
const definition = tagDefinitions.find((def) => def.tagSlot === slot)
const fieldType = definition?.fieldType || getFieldType(slot)
const formattedValue = formatTagValue(value, fieldType)
if (!formattedValue) continue
result.push({
slot,
displayName: definition?.displayName || slot,
value: formattedValue,
fieldType,
})
}
return result
}, [document, tagDefinitions])
if (tags.length === 0) {
return <span className='text-[11px] text-[var(--text-muted)]'></span>
}
const visibleTags = tags.slice(0, 2)
const overflowTags = tags.slice(2)
const hasOverflow = overflowTags.length > 0
return (
<div className='flex items-center gap-[4px]' onClick={(e) => e.stopPropagation()}>
{visibleTags.map((tag) => (
<Tooltip.Root key={tag.slot}>
<Tooltip.Trigger asChild>
<Badge className='max-w-[80px] truncate px-[6px] py-[1px] text-[10px]'>
{tag.value}
</Badge>
</Tooltip.Trigger>
<Tooltip.Content side='top'>
{tag.displayName}: {tag.value}
</Tooltip.Content>
</Tooltip.Root>
))}
{hasOverflow && (
<Popover>
<Tooltip.Root>
<Tooltip.Trigger asChild>
<PopoverAnchor asChild>
<Badge
variant='outline'
className='cursor-pointer px-[6px] py-[1px] text-[10px] hover:bg-[var(--surface-6)]'
>
+{overflowTags.length}
</Badge>
</PopoverAnchor>
</Tooltip.Trigger>
<Tooltip.Content side='top'>
{overflowTags.map((tag) => tag.displayName).join(', ')}
</Tooltip.Content>
</Tooltip.Root>
<PopoverContent side='bottom' align='start' maxWidth={220} minWidth={160}>
<div className='flex flex-col gap-[2px]'>
{tags.map((tag) => (
<div
key={tag.slot}
className='flex items-center justify-between gap-[8px] rounded-[4px] px-[6px] py-[4px] text-[11px]'
>
<span className='text-[var(--text-muted)]'>{tag.displayName}</span>
<span className='max-w-[100px] truncate text-[var(--text-primary)]'>
{tag.value}
</span>
</div>
))}
</div>
</PopoverContent>
</Popover>
)}
</div>
)
}

View File

@@ -1,3 +1,4 @@
export { ActionBar } from './action-bar/action-bar'
export { AddDocumentsModal } from './add-documents-modal/add-documents-modal'
export { BaseTagsModal } from './base-tags-modal/base-tags-modal'
export { DocumentTagsCell } from './document-tags-cell/document-tags-cell'

View File

@@ -14,11 +14,6 @@ import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('GlobalCommands')
/**
* Detects if the current platform is macOS.
*
* @returns True if running on macOS, false otherwise
*/
function isMacPlatform(): boolean {
if (typeof window === 'undefined') return false
return (
@@ -27,18 +22,6 @@ function isMacPlatform(): boolean {
)
}
/**
* Represents a parsed keyboard shortcut.
*
* We support the following modifiers:
* - Mod: maps to Meta on macOS, Ctrl on other platforms
* - Ctrl, Meta, Shift, Alt
*
* Examples:
* - "Mod+A"
* - "Mod+Shift+T"
* - "Meta+K"
*/
export interface ParsedShortcut {
key: string
mod?: boolean
@@ -48,24 +31,10 @@ export interface ParsedShortcut {
alt?: boolean
}
/**
* Declarative command registration.
*/
export interface GlobalCommand {
/** Unique id for the command. If omitted, one is generated. */
id?: string
/** Shortcut string in the form "Mod+Shift+T", "Mod+A", "Meta+K", etc. */
shortcut: string
/**
* Whether to allow the command to run inside editable elements like inputs,
* textareas or contenteditable. Defaults to true to ensure browser defaults
* are overridden when desired.
*/
allowInEditable?: boolean
/**
* Handler invoked when the shortcut is matched. Use this to trigger actions
* like navigation or dispatching application events.
*/
handler: (event: KeyboardEvent) => void
}
@@ -80,16 +49,13 @@ interface GlobalCommandsContextValue {
const GlobalCommandsContext = createContext<GlobalCommandsContextValue | null>(null)
/**
* Parses a human-readable shortcut into a structured representation.
*/
function parseShortcut(shortcut: string): ParsedShortcut {
const parts = shortcut.split('+').map((p) => p.trim())
const modifiers = new Set(parts.slice(0, -1).map((p) => p.toLowerCase()))
const last = parts[parts.length - 1]
return {
key: last.length === 1 ? last.toLowerCase() : last, // keep non-letter keys verbatim
key: last.length === 1 ? last.toLowerCase() : last,
mod: modifiers.has('mod'),
ctrl: modifiers.has('ctrl'),
meta: modifiers.has('meta') || modifiers.has('cmd') || modifiers.has('command'),
@@ -98,16 +64,10 @@ function parseShortcut(shortcut: string): ParsedShortcut {
}
}
/**
* Checks if a KeyboardEvent matches a parsed shortcut, honoring platform-specific
* interpretation of "Mod" (Meta on macOS, Ctrl elsewhere).
*/
function matchesShortcut(e: KeyboardEvent, parsed: ParsedShortcut): boolean {
const isMac = isMacPlatform()
const expectedCtrl = parsed.ctrl || (parsed.mod ? !isMac : false)
const expectedMeta = parsed.meta || (parsed.mod ? isMac : false)
// Normalize key for comparison: for letters compare lowercase
const eventKey = e.key.length === 1 ? e.key.toLowerCase() : e.key
return (
@@ -119,10 +79,6 @@ function matchesShortcut(e: KeyboardEvent, parsed: ParsedShortcut): boolean {
)
}
/**
* Provider that captures global keyboard shortcuts and routes them to
* registered commands. Commands can be registered from any descendant component.
*/
export function GlobalCommandsProvider({ children }: { children: ReactNode }) {
const registryRef = useRef<Map<string, RegistryCommand>>(new Map())
const isMac = useMemo(() => isMacPlatform(), [])
@@ -140,13 +96,11 @@ export function GlobalCommandsProvider({ children }: { children: ReactNode }) {
allowInEditable: cmd.allowInEditable ?? true,
})
createdIds.push(id)
logger.info('Registered global command', { id, shortcut: cmd.shortcut })
}
return () => {
for (const id of createdIds) {
registryRef.current.delete(id)
logger.info('Unregistered global command', { id })
}
}
}, [])
@@ -155,8 +109,6 @@ export function GlobalCommandsProvider({ children }: { children: ReactNode }) {
const onKeyDown = (e: KeyboardEvent) => {
if (e.isComposing) return
// Evaluate matches in registration order (latest registration wins naturally
// due to replacement on same id). Break on first match.
for (const [, cmd] of registryRef.current) {
if (!cmd.allowInEditable) {
const ae = document.activeElement
@@ -168,16 +120,8 @@ export function GlobalCommandsProvider({ children }: { children: ReactNode }) {
}
if (matchesShortcut(e, cmd.parsed)) {
// Always override default browser behavior for matched commands.
e.preventDefault()
e.stopPropagation()
logger.info('Executing global command', {
id: cmd.id,
shortcut: cmd.shortcut,
key: e.key,
isMac,
path: typeof window !== 'undefined' ? window.location.pathname : undefined,
})
try {
cmd.handler(e)
} catch (err) {
@@ -197,22 +141,28 @@ export function GlobalCommandsProvider({ children }: { children: ReactNode }) {
return <GlobalCommandsContext.Provider value={value}>{children}</GlobalCommandsContext.Provider>
}
/**
* Registers a set of global commands for the lifetime of the component.
*
* Returns nothing; cleanup is automatic on unmount.
*/
export function useRegisterGlobalCommands(commands: GlobalCommand[] | (() => GlobalCommand[])) {
const ctx = useContext(GlobalCommandsContext)
if (!ctx) {
throw new Error('useRegisterGlobalCommands must be used within GlobalCommandsProvider')
}
const commandsRef = useRef<GlobalCommand[]>([])
const list = typeof commands === 'function' ? commands() : commands
commandsRef.current = list
useEffect(() => {
const list = typeof commands === 'function' ? commands() : commands
const unregister = ctx.register(list)
const wrappedCommands = commandsRef.current.map((cmd) => ({
...cmd,
handler: (event: KeyboardEvent) => {
const currentCmd = commandsRef.current.find((c) => c.id === cmd.id)
if (currentCmd) {
currentCmd.handler(event)
}
},
}))
const unregister = ctx.register(wrappedCommands)
return unregister
// We intentionally want to register once for the given commands
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
}

View File

@@ -1055,7 +1055,7 @@ export function Chat() {
{isStreaming ? (
<Button
onClick={handleStopStreaming}
className='h-[22px] w-[22px] rounded-full p-0 transition-colors !bg-[var(--c-C0C0C0)] hover:!bg-[var(--c-D0D0D0)]'
className='!bg-[var(--c-C0C0C0)] hover:!bg-[var(--c-D0D0D0)] h-[22px] w-[22px] rounded-full p-0 transition-colors'
>
<Square className='h-2.5 w-2.5 fill-black text-black' />
</Button>

View File

@@ -14,7 +14,6 @@ const logger = createLogger('DiffControls')
export const DiffControls = memo(function DiffControls() {
const isTerminalResizing = useTerminalStore((state) => state.isResizing)
// Optimized: Single diff store subscription
const {
isShowingDiff,
isDiffReady,
@@ -38,12 +37,10 @@ export const DiffControls = memo(function DiffControls() {
)
)
// Optimized: Single copilot store subscription for needed values
const { updatePreviewToolCallState, clearPreviewYaml, currentChat, messages } = useCopilotStore(
const { updatePreviewToolCallState, currentChat, messages } = useCopilotStore(
useCallback(
(state) => ({
updatePreviewToolCallState: state.updatePreviewToolCallState,
clearPreviewYaml: state.clearPreviewYaml,
currentChat: state.currentChat,
messages: state.messages,
}),
@@ -222,11 +219,6 @@ export const DiffControls = memo(function DiffControls() {
logger.warn('Failed to create checkpoint before accept:', error)
})
// Clear preview YAML immediately
await clearPreviewYaml().catch((error) => {
logger.warn('Failed to clear preview YAML:', error)
})
// Resolve target toolCallId for build/edit and update to terminal success state in the copilot store
try {
const { toolCallsById, messages } = useCopilotStore.getState()
@@ -266,16 +258,11 @@ export const DiffControls = memo(function DiffControls() {
logger.error('Workflow update failed:', errorMessage)
alert(`Failed to save workflow changes: ${errorMessage}`)
}
}, [createCheckpoint, clearPreviewYaml, updatePreviewToolCallState, acceptChanges])
}, [createCheckpoint, updatePreviewToolCallState, acceptChanges])
const handleReject = useCallback(() => {
logger.info('Rejecting proposed changes (optimistic)')
// Clear preview YAML immediately
clearPreviewYaml().catch((error) => {
logger.warn('Failed to clear preview YAML:', error)
})
// Resolve target toolCallId for build/edit and update to terminal rejected state in the copilot store
try {
const { toolCallsById, messages } = useCopilotStore.getState()
@@ -306,7 +293,7 @@ export const DiffControls = memo(function DiffControls() {
rejectChanges().catch((error) => {
logger.error('Failed to reject changes (background):', error)
})
}, [clearPreviewYaml, updatePreviewToolCallState, rejectChanges])
}, [updatePreviewToolCallState, rejectChanges])
// Don't show anything if no diff is available or diff is not ready
if (!hasActiveDiff || !isDiffReady) {

View File

@@ -1,6 +1,7 @@
'use client'
import { Component, type ReactNode, useEffect } from 'react'
import { ReactFlowProvider } from 'reactflow'
import { createLogger } from '@/lib/logs/console/logger'
import { Panel } from '@/app/workspace/[workspaceId]/w/[workflowId]/components'
import { Sidebar } from '@/app/workspace/[workspaceId]/w/components/sidebar/sidebar'
@@ -47,8 +48,9 @@ export function ErrorUI({
</div>
</div>
{/* Panel */}
<Panel />
<ReactFlowProvider>
<Panel />
</ReactFlowProvider>
</div>
</div>
)

View File

@@ -65,56 +65,6 @@ export function useMessageFeedback(
return null
}, [messages, message.id])
/**
* Extracts workflow YAML from workflow tool calls
*/
const getWorkflowYaml = useCallback(() => {
const allToolCalls = [
...(message.toolCalls || []),
...(message.contentBlocks || [])
.filter((block) => block.type === 'tool_call')
.map((block) => (block as any).toolCall),
]
const workflowTools = allToolCalls.filter((toolCall) =>
WORKFLOW_TOOL_NAMES.includes(toolCall?.name)
)
for (const toolCall of workflowTools) {
const yamlContent =
toolCall.result?.yamlContent ||
toolCall.result?.data?.yamlContent ||
toolCall.input?.yamlContent ||
toolCall.input?.data?.yamlContent
if (yamlContent && typeof yamlContent === 'string' && yamlContent.trim()) {
return yamlContent
}
}
if (currentChat?.previewYaml?.trim()) {
return currentChat.previewYaml
}
for (const toolCall of workflowTools) {
if (toolCall.id) {
const preview = getPreviewByToolCall(toolCall.id)
if (preview?.yamlContent?.trim()) {
return preview.yamlContent
}
}
}
if (workflowTools.length > 0 && workflowId) {
const latestPreview = getLatestPendingPreview(workflowId, currentChat?.id)
if (latestPreview?.yamlContent?.trim()) {
return latestPreview.yamlContent
}
}
return null
}, [message, currentChat, workflowId, getPreviewByToolCall, getLatestPendingPreview])
/**
* Submits feedback to the API
*/
@@ -137,20 +87,14 @@ export function useMessageFeedback(
return
}
const workflowYaml = getWorkflowYaml()
try {
const requestBody: any = {
const requestBody = {
chatId: currentChat.id,
userQuery,
agentResponse,
isPositiveFeedback: isPositive,
}
if (workflowYaml) {
requestBody.workflowYaml = workflowYaml
}
const response = await fetch('/api/copilot/feedback', {
method: 'POST',
headers: {
@@ -168,7 +112,7 @@ export function useMessageFeedback(
logger.error('Error submitting feedback:', error)
}
},
[currentChat, getLastUserQuery, getFullAssistantContent, message, getWorkflowYaml]
[currentChat, getLastUserQuery, getFullAssistantContent, message]
)
/**

View File

@@ -35,7 +35,6 @@ interface DeployModalProps {
workflowId: string | null
isDeployed: boolean
needsRedeployment: boolean
setNeedsRedeployment: (value: boolean) => void
deployedState: WorkflowState
isLoadingDeployedState: boolean
refetchDeployedState: () => Promise<void>
@@ -58,7 +57,6 @@ export function DeployModal({
workflowId,
isDeployed: isDeployedProp,
needsRedeployment,
setNeedsRedeployment,
deployedState,
isLoadingDeployedState,
refetchDeployedState,
@@ -229,7 +227,6 @@ export function DeployModal({
setDeploymentStatus(workflowId, isDeployedStatus, deployedAtTime, apiKeyLabel)
setNeedsRedeployment(false)
if (workflowId) {
useWorkflowRegistry.getState().setWorkflowNeedsRedeployment(workflowId, false)
}
@@ -453,7 +450,6 @@ export function DeployModal({
getApiKeyLabel(apiKey)
)
setNeedsRedeployment(false)
if (workflowId) {
useWorkflowRegistry.getState().setWorkflowNeedsRedeployment(workflowId, false)
}

View File

@@ -45,8 +45,7 @@ export function Deploy({ activeWorkflowId, userPermissions, className }: DeployP
isRegistryLoading,
})
// Detect changes between current and deployed state
const { changeDetected, setChangeDetected } = useChangeDetection({
const { changeDetected } = useChangeDetection({
workflowId: activeWorkflowId,
deployedState,
isLoadingDeployedState,
@@ -136,7 +135,6 @@ export function Deploy({ activeWorkflowId, userPermissions, className }: DeployP
workflowId={activeWorkflowId}
isDeployed={isDeployed}
needsRedeployment={changeDetected}
setNeedsRedeployment={setChangeDetected}
deployedState={deployedState!}
isLoadingDeployedState={isLoadingDeployedState}
refetchDeployedState={refetchWithErrorHandling}

View File

@@ -1,13 +1,11 @@
import { useEffect, useMemo, useState } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import { useMemo } from 'react'
import { hasWorkflowChanged } from '@/lib/workflows/comparison'
import { useDebounce } from '@/hooks/use-debounce'
import { useOperationQueueStore } from '@/stores/operation-queue/store'
import { useVariablesStore } from '@/stores/panel/variables/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import type { WorkflowState } from '@/stores/workflows/workflow/types'
const logger = createLogger('useChangeDetection')
interface UseChangeDetectionProps {
workflowId: string | null
deployedState: WorkflowState | null
@@ -15,97 +13,78 @@ interface UseChangeDetectionProps {
}
/**
* Hook to detect changes between current workflow state and deployed state
* Uses API-based change detection for accuracy
* Detects meaningful changes between current workflow state and deployed state.
* Performs comparison entirely on the client - no API calls needed.
*/
export function useChangeDetection({
workflowId,
deployedState,
isLoadingDeployedState,
}: UseChangeDetectionProps) {
const [changeDetected, setChangeDetected] = useState(false)
const [blockStructureVersion, setBlockStructureVersion] = useState(0)
const [edgeStructureVersion, setEdgeStructureVersion] = useState(0)
const [subBlockStructureVersion, setSubBlockStructureVersion] = useState(0)
// Get current store state for change detection
const currentBlocks = useWorkflowStore((state) => state.blocks)
const currentEdges = useWorkflowStore((state) => state.edges)
const lastSaved = useWorkflowStore((state) => state.lastSaved)
const blocks = useWorkflowStore((state) => state.blocks)
const edges = useWorkflowStore((state) => state.edges)
const loops = useWorkflowStore((state) => state.loops)
const parallels = useWorkflowStore((state) => state.parallels)
const subBlockValues = useSubBlockStore((state) =>
workflowId ? state.workflowValues[workflowId] : null
)
// Track structure changes
useEffect(() => {
setBlockStructureVersion((version) => version + 1)
}, [currentBlocks])
useEffect(() => {
setEdgeStructureVersion((version) => version + 1)
}, [currentEdges])
useEffect(() => {
setSubBlockStructureVersion((version) => version + 1)
}, [subBlockValues])
// Reset version counters when workflow changes
useEffect(() => {
setBlockStructureVersion(0)
setEdgeStructureVersion(0)
setSubBlockStructureVersion(0)
}, [workflowId])
// Create trigger for status check
const statusCheckTrigger = useMemo(() => {
return JSON.stringify({
lastSaved: lastSaved ?? 0,
blockVersion: blockStructureVersion,
edgeVersion: edgeStructureVersion,
subBlockVersion: subBlockStructureVersion,
})
}, [lastSaved, blockStructureVersion, edgeStructureVersion, subBlockStructureVersion])
const debouncedStatusCheckTrigger = useDebounce(statusCheckTrigger, 500)
useEffect(() => {
// Avoid off-by-one false positives: wait until operation queue is idle
const { operations, isProcessing } = useOperationQueueStore.getState()
const hasPendingOps =
isProcessing || operations.some((op) => op.status === 'pending' || op.status === 'processing')
if (!workflowId || !deployedState) {
setChangeDetected(false)
return
const allVariables = useVariablesStore((state) => state.variables)
const workflowVariables = useMemo(() => {
if (!workflowId) return {}
const vars: Record<string, any> = {}
for (const [id, variable] of Object.entries(allVariables)) {
if (variable.workflowId === workflowId) {
vars[id] = variable
}
}
return vars
}, [workflowId, allVariables])
if (isLoadingDeployedState || hasPendingOps) {
return
}
const currentState = useMemo((): WorkflowState | null => {
if (!workflowId) return null
// Use the workflow status API to get accurate change detection
// This uses the same logic as the deployment API (reading from normalized tables)
const checkForChanges = async () => {
try {
const response = await fetch(`/api/workflows/${workflowId}/status`)
if (response.ok) {
const data = await response.json()
setChangeDetected(data.needsRedeployment || false)
} else {
logger.error('Failed to fetch workflow status:', response.status, response.statusText)
setChangeDetected(false)
const blocksWithSubBlocks: WorkflowState['blocks'] = {}
for (const [blockId, block] of Object.entries(blocks)) {
const blockSubValues = subBlockValues?.[blockId] || {}
const subBlocks: Record<string, any> = {}
for (const [subId, value] of Object.entries(blockSubValues)) {
subBlocks[subId] = { value }
}
if (block.subBlocks) {
for (const [subId, subBlock] of Object.entries(block.subBlocks)) {
if (!subBlocks[subId]) {
subBlocks[subId] = subBlock
} else {
subBlocks[subId] = { ...subBlock, value: subBlocks[subId].value }
}
}
} catch (error) {
logger.error('Error fetching workflow status:', error)
setChangeDetected(false)
}
blocksWithSubBlocks[blockId] = {
...block,
subBlocks,
}
}
checkForChanges()
}, [workflowId, deployedState, debouncedStatusCheckTrigger, isLoadingDeployedState])
return {
blocks: blocksWithSubBlocks,
edges,
loops,
parallels,
variables: workflowVariables,
} as WorkflowState & { variables: Record<string, any> }
}, [workflowId, blocks, edges, loops, parallels, subBlockValues, workflowVariables])
return {
changeDetected,
setChangeDetected,
}
const rawChangeDetected = useMemo(() => {
if (!currentState || !deployedState || isLoadingDeployedState) {
return false
}
return hasWorkflowChanged(currentState, deployedState)
}, [currentState, deployedState, isLoadingDeployedState])
const changeDetected = useDebounce(rawChangeDetected, 300)
return { changeDetected }
}

View File

@@ -1,11 +1,9 @@
import { useCallback, useEffect, useState } from 'react'
import { AlertTriangle } from 'lucide-react'
import { useParams } from 'next/navigation'
import { createLogger } from '@/lib/logs/console/logger'
import { Badge } from '@/components/emcn'
import { parseCronToHumanReadable } from '@/lib/workflows/schedules/utils'
import { useRedeployWorkflowSchedule, useScheduleQuery } from '@/hooks/queries/schedules'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
const logger = createLogger('ScheduleStatus')
import { MAX_CONSECUTIVE_FAILURES } from '@/triggers/constants'
interface ScheduleInfoProps {
blockId: string
@@ -20,172 +18,93 @@ interface ScheduleInfoProps {
export function ScheduleInfo({ blockId, isPreview = false }: ScheduleInfoProps) {
const params = useParams()
const workflowId = params.workflowId as string
const [scheduleStatus, setScheduleStatus] = useState<'active' | 'disabled' | null>(null)
const [nextRunAt, setNextRunAt] = useState<Date | null>(null)
const [lastRanAt, setLastRanAt] = useState<Date | null>(null)
const [failedCount, setFailedCount] = useState<number>(0)
const [isLoadingStatus, setIsLoadingStatus] = useState(true)
const [savedCronExpression, setSavedCronExpression] = useState<string | null>(null)
const [isRedeploying, setIsRedeploying] = useState(false)
const [hasSchedule, setHasSchedule] = useState(false)
const scheduleTimezone = useSubBlockStore((state) => state.getValue(blockId, 'timezone'))
const fetchScheduleStatus = useCallback(async () => {
if (isPreview) return
const { data: schedule, isLoading } = useScheduleQuery(workflowId, blockId, {
enabled: !isPreview,
})
setIsLoadingStatus(true)
try {
const response = await fetch(`/api/schedules?workflowId=${workflowId}&blockId=${blockId}`)
if (response.ok) {
const data = await response.json()
if (data.schedule) {
setHasSchedule(true)
setScheduleStatus(data.schedule.status)
setNextRunAt(data.schedule.nextRunAt ? new Date(data.schedule.nextRunAt) : null)
setLastRanAt(data.schedule.lastRanAt ? new Date(data.schedule.lastRanAt) : null)
setFailedCount(data.schedule.failedCount || 0)
setSavedCronExpression(data.schedule.cronExpression || null)
} else {
// No schedule exists (workflow not deployed or no schedule block)
setHasSchedule(false)
setScheduleStatus(null)
setNextRunAt(null)
setLastRanAt(null)
setFailedCount(0)
setSavedCronExpression(null)
}
}
} catch (error) {
logger.error('Error fetching schedule status', { error })
} finally {
setIsLoadingStatus(false)
}
}, [workflowId, blockId, isPreview])
const redeployMutation = useRedeployWorkflowSchedule()
useEffect(() => {
if (!isPreview) {
fetchScheduleStatus()
}
}, [isPreview, fetchScheduleStatus])
/**
* Handles redeploying the workflow when schedule is disabled due to failures.
* Redeploying will recreate the schedule with reset failure count.
*/
const handleRedeploy = async () => {
if (isPreview || isRedeploying) return
setIsRedeploying(true)
try {
const response = await fetch(`/api/workflows/${workflowId}/deploy`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ deployChatEnabled: false }),
})
if (response.ok) {
// Refresh schedule status after redeploy
await fetchScheduleStatus()
logger.info('Workflow redeployed successfully to reset schedule', { workflowId, blockId })
} else {
const errorData = await response.json()
logger.error('Failed to redeploy workflow', { error: errorData.error })
}
} catch (error) {
logger.error('Error redeploying workflow', { error })
} finally {
setIsRedeploying(false)
}
const handleRedeploy = () => {
if (isPreview || redeployMutation.isPending) return
redeployMutation.mutate({ workflowId, blockId })
}
// Don't render anything if there's no deployed schedule
if (!hasSchedule && !isLoadingStatus) {
if (!schedule || isLoading) {
return null
}
const timezone = scheduleTimezone || schedule?.timezone || 'UTC'
const failedCount = schedule?.failedCount || 0
const isDisabled = schedule?.status === 'disabled'
const nextRunAt = schedule?.nextRunAt ? new Date(schedule.nextRunAt) : null
return (
<div className='mt-2'>
{isLoadingStatus ? (
<div className='flex items-center gap-2 text-muted-foreground text-sm'>
<div className='h-4 w-4 animate-spin rounded-full border-[1.5px] border-current border-t-transparent' />
Loading schedule status...
</div>
) : (
<div className='space-y-1.5'>
{/* Status badges */}
{(failedCount > 0 || isDisabled) && (
<div className='space-y-1'>
{/* Failure badge with redeploy action */}
{failedCount >= 10 && scheduleStatus === 'disabled' && (
<button
type='button'
onClick={handleRedeploy}
disabled={isRedeploying}
className='flex w-full cursor-pointer items-center gap-2 rounded-md bg-destructive/10 px-3 py-2 text-left text-destructive text-sm transition-colors hover:bg-destructive/20 disabled:cursor-not-allowed disabled:opacity-50'
>
{isRedeploying ? (
<div className='h-4 w-4 animate-spin rounded-full border-[1.5px] border-current border-t-transparent' />
) : (
<AlertTriangle className='h-4 w-4 flex-shrink-0' />
)}
<span>
{isRedeploying
? 'Redeploying...'
: `Schedule disabled after ${failedCount} failures - Click to redeploy`}
</span>
</button>
)}
{/* Show warning for failed runs under threshold */}
{failedCount > 0 && failedCount < 10 && (
<div className='flex items-center gap-2'>
<span className='text-destructive text-sm'>
{failedCount} failed run{failedCount !== 1 ? 's' : ''}
</span>
</div>
)}
{/* Cron expression human-readable description */}
{savedCronExpression && (
<p className='text-muted-foreground text-sm'>
Runs{' '}
{parseCronToHumanReadable(
savedCronExpression,
scheduleTimezone || 'UTC'
).toLowerCase()}
<div className='flex flex-wrap items-center gap-2'>
{failedCount >= MAX_CONSECUTIVE_FAILURES && isDisabled ? (
<Badge
variant='outline'
className='cursor-pointer'
style={{
borderColor: 'var(--warning)',
color: 'var(--warning)',
}}
onClick={handleRedeploy}
>
{redeployMutation.isPending ? 'redeploying...' : 'disabled'}
</Badge>
) : failedCount > 0 ? (
<Badge
variant='outline'
style={{
borderColor: 'var(--warning)',
color: 'var(--warning)',
}}
>
{failedCount} failed
</Badge>
) : null}
</div>
{failedCount >= MAX_CONSECUTIVE_FAILURES && isDisabled && (
<p className='text-[12px] text-[var(--text-tertiary)]'>
Disabled after {MAX_CONSECUTIVE_FAILURES} consecutive failures
</p>
)}
{redeployMutation.isError && (
<p className='text-[12px] text-[var(--text-error)]'>
Failed to redeploy. Please try again.
</p>
)}
</div>
)}
{/* Next run time */}
{/* Schedule info - only show when active */}
{!isDisabled && (
<div className='text-[12px] text-[var(--text-tertiary)]'>
{schedule?.cronExpression && (
<span>{parseCronToHumanReadable(schedule.cronExpression, timezone)}</span>
)}
{nextRunAt && (
<p className='text-sm'>
<span className='font-medium'>Next run:</span>{' '}
{nextRunAt.toLocaleString('en-US', {
timeZone: scheduleTimezone || 'UTC',
year: 'numeric',
month: 'numeric',
day: 'numeric',
hour: 'numeric',
minute: '2-digit',
hour12: true,
})}{' '}
{scheduleTimezone || 'UTC'}
</p>
)}
{/* Last ran time */}
{lastRanAt && (
<p className='text-muted-foreground text-sm'>
<span className='font-medium'>Last ran:</span>{' '}
{lastRanAt.toLocaleString('en-US', {
timeZone: scheduleTimezone || 'UTC',
year: 'numeric',
month: 'numeric',
day: 'numeric',
hour: 'numeric',
minute: '2-digit',
hour12: true,
})}{' '}
{scheduleTimezone || 'UTC'}
</p>
<>
{schedule?.cronExpression && <span className='mx-1'>·</span>}
<span>
Next:{' '}
{nextRunAt.toLocaleString('en-US', {
timeZone: timezone,
month: 'short',
day: 'numeric',
hour: 'numeric',
minute: '2-digit',
hour12: true,
})}
</span>
</>
)}
</div>
)}

View File

@@ -885,7 +885,8 @@ export function ToolInput({
block.type === 'knowledge' ||
block.type === 'function') &&
block.type !== 'evaluator' &&
block.type !== 'mcp'
block.type !== 'mcp' &&
block.type !== 'file'
)
const value = isPreview ? previewValue : storeValue

View File

@@ -8,7 +8,6 @@ import {
ModalHeader,
} from '@/components/emcn/components'
import { Trash } from '@/components/emcn/icons/trash'
import { Alert, AlertDescription } from '@/components/ui/alert'
import { cn } from '@/lib/core/utils/cn'
import { createLogger } from '@/lib/logs/console/logger'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
@@ -367,12 +366,7 @@ export function TriggerSave({
saveStatus === 'error' && 'bg-red-600 hover:bg-red-700'
)}
>
{saveStatus === 'saving' && (
<>
<div className='mr-2 h-4 w-4 animate-spin rounded-full border-[1.5px] border-current border-t-transparent' />
Saving...
</>
)}
{saveStatus === 'saving' && 'Saving...'}
{saveStatus === 'saved' && 'Saved'}
{saveStatus === 'error' && 'Error'}
{saveStatus === 'idle' && (webhookId ? 'Update Configuration' : 'Save Configuration')}
@@ -394,59 +388,48 @@ export function TriggerSave({
)}
</div>
{errorMessage && (
<Alert variant='destructive' className='mt-2'>
<AlertDescription>{errorMessage}</AlertDescription>
</Alert>
)}
{errorMessage && <p className='mt-2 text-[12px] text-[var(--text-error)]'>{errorMessage}</p>}
{webhookId && hasWebhookUrlDisplay && (
<div className='mt-2 space-y-1'>
<div className='mt-4 space-y-2'>
<div className='flex items-center justify-between'>
<span className='font-medium text-sm'>Test Webhook URL</span>
<span className='font-medium text-[13px] text-[var(--text-primary)]'>
Test Webhook URL
</span>
<Button
variant='outline'
variant='ghost'
onClick={generateTestUrl}
disabled={isGeneratingTestUrl || isProcessing}
className='h-[32px] rounded-[8px] px-[12px]'
>
{isGeneratingTestUrl ? (
<>
<div className='mr-2 h-3 w-3 animate-spin rounded-full border-[1.5px] border-current border-t-transparent' />
Generating
</>
) : testUrl ? (
'Regenerate'
) : (
'Generate'
)}
{isGeneratingTestUrl ? 'Generating…' : testUrl ? 'Regenerate' : 'Generate'}
</Button>
</div>
{testUrl ? (
<ShortInput
blockId={blockId}
subBlockId={`${subBlockId}-test-url`}
config={{
id: `${subBlockId}-test-url`,
type: 'short-input',
readOnly: true,
showCopyButton: true,
}}
value={testUrl}
readOnly={true}
showCopyButton={true}
disabled={isPreview || disabled}
isPreview={isPreview}
/>
<>
<ShortInput
blockId={blockId}
subBlockId={`${subBlockId}-test-url`}
config={{
id: `${subBlockId}-test-url`,
type: 'short-input',
readOnly: true,
showCopyButton: true,
}}
value={testUrl}
readOnly={true}
showCopyButton={true}
disabled={isPreview || disabled}
isPreview={isPreview}
/>
{testUrlExpiresAt && (
<p className='text-[12px] text-[var(--text-tertiary)]'>
Expires {new Date(testUrlExpiresAt).toLocaleString()}
</p>
)}
</>
) : (
<p className='text-muted-foreground text-xs'>
Generate a temporary URL that executes this webhook against the live (undeployed)
workflow state.
</p>
)}
{testUrlExpiresAt && (
<p className='text-muted-foreground text-xs'>
Expires at {new Date(testUrlExpiresAt).toLocaleString()}
<p className='text-[12px] text-[var(--text-tertiary)]'>
Generate a temporary URL to test against the live (undeployed) workflow state.
</p>
)}
</div>

View File

@@ -37,6 +37,7 @@ import {
useUsageLimits,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/hooks'
import { Variables } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/variables/variables'
import { useAutoLayout } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-auto-layout'
import { useWorkflowExecution } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution'
import { useDeleteWorkflow, useImportWorkflow } from '@/app/workspace/[workspaceId]/w/hooks'
import { useChatStore } from '@/stores/chat/store'
@@ -99,6 +100,7 @@ export function Panel() {
hydration.phase === 'state-loading'
const { getJson } = useWorkflowJsonStore()
const { blocks } = useWorkflowStore()
const { handleAutoLayout: autoLayoutWithFitView } = useAutoLayout(activeWorkflowId || null)
// Delete workflow hook
const { isDeleting, handleDeleteWorkflow } = useDeleteWorkflow({
@@ -133,6 +135,13 @@ export function Panel() {
}
}
/**
* Cancels the currently executing workflow
*/
const cancelWorkflow = useCallback(async () => {
await handleCancelExecution()
}, [handleCancelExecution])
/**
* Runs the workflow with usage limit check
*/
@@ -144,13 +153,6 @@ export function Panel() {
await handleRunWorkflow()
}, [usageExceeded, handleRunWorkflow])
/**
* Cancels the currently executing workflow
*/
const cancelWorkflow = useCallback(async () => {
await handleCancelExecution()
}, [handleCancelExecution])
// Chat state
const { isChatOpen, setIsChatOpen } = useChatStore()
const { isOpen: isVariablesOpen, setIsOpen: setVariablesOpen } = useVariablesStore()
@@ -201,22 +203,11 @@ export function Panel() {
setIsAutoLayouting(true)
try {
// Use the standalone auto layout utility for immediate frontend updates
const { applyAutoLayoutAndUpdateStore } = await import('../../utils')
const result = await applyAutoLayoutAndUpdateStore(activeWorkflowId!)
if (result.success) {
logger.info('Auto layout completed successfully')
} else {
logger.error('Auto layout failed:', result.error)
}
} catch (error) {
logger.error('Auto layout error:', error)
await autoLayoutWithFitView()
} finally {
setIsAutoLayouting(false)
}
}, [isExecuting, userPermissions.canEdit, isAutoLayouting, activeWorkflowId])
}, [isExecuting, userPermissions.canEdit, isAutoLayouting, autoLayoutWithFitView])
/**
* Handles exporting workflow as JSON
@@ -300,7 +291,6 @@ export function Panel() {
{
id: 'run-workflow',
handler: () => {
// Do exactly what the Run button does
if (isExecuting) {
void cancelWorkflow()
} else {

View File

@@ -1,10 +1,10 @@
import { useCallback, useEffect, useState } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import { parseCronToHumanReadable } from '@/lib/workflows/schedules/utils'
import { useCallback } from 'react'
import {
useReactivateSchedule,
useScheduleInfo as useScheduleInfoQuery,
} from '@/hooks/queries/schedules'
import type { ScheduleInfo } from '../types'
const logger = createLogger('useScheduleInfo')
/**
* Return type for the useScheduleInfo hook
*/
@@ -18,7 +18,7 @@ export interface UseScheduleInfoReturn {
}
/**
* Custom hook for fetching schedule information
* Custom hook for fetching schedule information using TanStack Query
*
* @param blockId - The ID of the block
* @param blockType - The type of the block
@@ -30,96 +30,37 @@ export function useScheduleInfo(
blockType: string,
workflowId: string
): UseScheduleInfoReturn {
const [isLoading, setIsLoading] = useState(false)
const [scheduleInfo, setScheduleInfo] = useState<ScheduleInfo | null>(null)
const fetchScheduleInfo = useCallback(
async (wfId: string) => {
if (!wfId) return
try {
setIsLoading(true)
const params = new URLSearchParams({
workflowId: wfId,
blockId,
})
const response = await fetch(`/api/schedules?${params}`, {
cache: 'no-store',
headers: { 'Cache-Control': 'no-cache' },
})
if (!response.ok) {
setScheduleInfo(null)
return
}
const data = await response.json()
if (!data.schedule) {
setScheduleInfo(null)
return
}
const schedule = data.schedule
const scheduleTimezone = schedule.timezone || 'UTC'
setScheduleInfo({
scheduleTiming: schedule.cronExpression
? parseCronToHumanReadable(schedule.cronExpression, scheduleTimezone)
: 'Unknown schedule',
nextRunAt: schedule.nextRunAt,
lastRanAt: schedule.lastRanAt,
timezone: scheduleTimezone,
status: schedule.status,
isDisabled: schedule.status === 'disabled',
failedCount: schedule.failedCount || 0,
id: schedule.id,
})
} catch (error) {
logger.error('Error fetching schedule info:', error)
setScheduleInfo(null)
} finally {
setIsLoading(false)
}
},
[blockId]
const { scheduleInfo: queryScheduleInfo, isLoading } = useScheduleInfoQuery(
workflowId,
blockId,
blockType
)
const reactivateMutation = useReactivateSchedule()
const reactivateSchedule = useCallback(
async (scheduleId: string) => {
try {
const response = await fetch(`/api/schedules/${scheduleId}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ action: 'reactivate' }),
})
if (response.ok && workflowId) {
await fetchScheduleInfo(workflowId)
} else {
logger.error('Failed to reactivate schedule')
}
} catch (error) {
logger.error('Error reactivating schedule:', error)
}
await reactivateMutation.mutateAsync({
scheduleId,
workflowId,
blockId,
})
},
[workflowId, fetchScheduleInfo]
[reactivateMutation, workflowId, blockId]
)
useEffect(() => {
if (blockType === 'schedule' && workflowId) {
fetchScheduleInfo(workflowId)
} else {
setScheduleInfo(null)
setIsLoading(false)
}
return () => {
setIsLoading(false)
}
}, [blockType, workflowId, fetchScheduleInfo])
const scheduleInfo: ScheduleInfo | null = queryScheduleInfo
? {
scheduleTiming: queryScheduleInfo.scheduleTiming,
nextRunAt: queryScheduleInfo.nextRunAt,
lastRanAt: queryScheduleInfo.lastRanAt,
timezone: queryScheduleInfo.timezone,
status: queryScheduleInfo.status,
isDisabled: queryScheduleInfo.isDisabled,
failedCount: queryScheduleInfo.failedCount,
id: queryScheduleInfo.id,
}
: null
return {
scheduleInfo,

View File

@@ -1,14 +1,23 @@
import { useCallback } from 'react'
import type { AutoLayoutOptions } from '../utils/auto-layout-utils'
import { applyAutoLayoutAndUpdateStore as applyAutoLayoutStandalone } from '../utils/auto-layout-utils'
import { useReactFlow } from 'reactflow'
import { createLogger } from '@/lib/logs/console/logger'
import type { AutoLayoutOptions } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/auto-layout-utils'
import { applyAutoLayoutAndUpdateStore as applyAutoLayoutStandalone } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/auto-layout-utils'
export type { AutoLayoutOptions }
const logger = createLogger('useAutoLayout')
/**
* Hook providing auto-layout functionality for workflows
* Binds workflowId context and provides memoized callback for React components
* Hook providing auto-layout functionality for workflows.
* Binds workflowId context and provides memoized callback for React components.
* Includes automatic fitView animation after successful layout.
*
* Note: This hook requires a ReactFlowProvider ancestor.
*/
export function useAutoLayout(workflowId: string | null) {
const { fitView } = useReactFlow()
const applyAutoLayoutAndUpdateStore = useCallback(
async (options: AutoLayoutOptions = {}) => {
if (!workflowId) {
@@ -19,7 +28,34 @@ export function useAutoLayout(workflowId: string | null) {
[workflowId]
)
/**
* Applies auto-layout and animates to fit all blocks in view
*/
const handleAutoLayout = useCallback(async () => {
try {
const result = await applyAutoLayoutAndUpdateStore()
if (result.success) {
logger.info('Auto layout completed successfully')
requestAnimationFrame(() => {
fitView({ padding: 0.8, duration: 600 })
})
} else {
logger.error('Auto layout failed:', result.error)
}
return result
} catch (error) {
logger.error('Auto layout error:', error)
return {
success: false,
error: error instanceof Error ? error.message : 'Unknown error',
}
}
}, [applyAutoLayoutAndUpdateStore, fitView])
return {
applyAutoLayoutAndUpdateStore,
handleAutoLayout,
}
}

View File

@@ -198,7 +198,7 @@ const WorkflowContent = React.memo(() => {
return resizeLoopNodes(updateNodeDimensions)
}, [resizeLoopNodes, updateNodeDimensions])
const { applyAutoLayoutAndUpdateStore } = useAutoLayout(activeWorkflowId || null)
const { handleAutoLayout: autoLayoutWithFitView } = useAutoLayout(activeWorkflowId || null)
const isWorkflowEmpty = useMemo(() => Object.keys(blocks).length === 0, [blocks])
@@ -441,19 +441,8 @@ const WorkflowContent = React.memo(() => {
/** Applies auto-layout to the workflow canvas. */
const handleAutoLayout = useCallback(async () => {
if (Object.keys(blocks).length === 0) return
try {
const result = await applyAutoLayoutAndUpdateStore()
if (result.success) {
logger.info('Auto layout completed successfully')
} else {
logger.error('Auto layout failed:', result.error)
}
} catch (error) {
logger.error('Auto layout error:', error)
}
}, [blocks, applyAutoLayoutAndUpdateStore])
await autoLayoutWithFitView()
}, [blocks, autoLayoutWithFitView])
const debouncedAutoLayout = useCallback(() => {
const debounceTimer = setTimeout(() => {

View File

@@ -0,0 +1,336 @@
'use client'
import { useState } from 'react'
import { Eye, EyeOff } from 'lucide-react'
import { useParams } from 'next/navigation'
import {
Button,
Input as EmcnInput,
Modal,
ModalBody,
ModalContent,
ModalFooter,
ModalHeader,
} from '@/components/emcn'
import { AnthropicIcon, GeminiIcon, MistralIcon, OpenAIIcon } from '@/components/icons'
import { Skeleton } from '@/components/ui'
import { createLogger } from '@/lib/logs/console/logger'
import {
type BYOKKey,
type BYOKProviderId,
useBYOKKeys,
useDeleteBYOKKey,
useUpsertBYOKKey,
} from '@/hooks/queries/byok-keys'
const logger = createLogger('BYOKSettings')
const PROVIDERS: {
id: BYOKProviderId
name: string
icon: React.ComponentType<{ className?: string }>
description: string
placeholder: string
}[] = [
{
id: 'openai',
name: 'OpenAI',
icon: OpenAIIcon,
description: 'LLM calls and Knowledge Base embeddings',
placeholder: 'sk-...',
},
{
id: 'anthropic',
name: 'Anthropic',
icon: AnthropicIcon,
description: 'LLM calls',
placeholder: 'sk-ant-...',
},
{
id: 'google',
name: 'Google',
icon: GeminiIcon,
description: 'LLM calls',
placeholder: 'Enter your API key',
},
{
id: 'mistral',
name: 'Mistral',
icon: MistralIcon,
description: 'LLM calls and Knowledge Base OCR',
placeholder: 'Enter your API key',
},
]
function BYOKKeySkeleton() {
return (
<div className='flex items-center justify-between gap-[12px]'>
<div className='flex items-center gap-[12px]'>
<Skeleton className='h-9 w-9 flex-shrink-0 rounded-[6px]' />
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
<Skeleton className='h-[14px] w-[100px]' />
<Skeleton className='h-[13px] w-[200px]' />
</div>
</div>
<Skeleton className='h-[32px] w-[72px] rounded-[6px]' />
</div>
)
}
export function BYOK() {
const params = useParams()
const workspaceId = (params?.workspaceId as string) || ''
const { data: keys = [], isLoading } = useBYOKKeys(workspaceId)
const upsertKey = useUpsertBYOKKey()
const deleteKey = useDeleteBYOKKey()
const [editingProvider, setEditingProvider] = useState<BYOKProviderId | null>(null)
const [apiKeyInput, setApiKeyInput] = useState('')
const [showApiKey, setShowApiKey] = useState(false)
const [error, setError] = useState<string | null>(null)
const [deleteConfirmProvider, setDeleteConfirmProvider] = useState<BYOKProviderId | null>(null)
const getKeyForProvider = (providerId: BYOKProviderId): BYOKKey | undefined => {
return keys.find((k) => k.providerId === providerId)
}
const handleSave = async () => {
if (!editingProvider || !apiKeyInput.trim()) return
setError(null)
try {
await upsertKey.mutateAsync({
workspaceId,
providerId: editingProvider,
apiKey: apiKeyInput.trim(),
})
setEditingProvider(null)
setApiKeyInput('')
setShowApiKey(false)
} catch (err) {
const message = err instanceof Error ? err.message : 'Failed to save API key'
setError(message)
logger.error('Failed to save BYOK key', { error: err })
}
}
const handleDelete = async () => {
if (!deleteConfirmProvider) return
try {
await deleteKey.mutateAsync({
workspaceId,
providerId: deleteConfirmProvider,
})
setDeleteConfirmProvider(null)
} catch (err) {
logger.error('Failed to delete BYOK key', { error: err })
}
}
const openEditModal = (providerId: BYOKProviderId) => {
setEditingProvider(providerId)
setApiKeyInput('')
setShowApiKey(false)
setError(null)
}
return (
<>
<div className='flex h-full flex-col gap-[16px]'>
<p className='text-[13px] text-[var(--text-secondary)]'>
Use your own API keys for hosted model providers.
</p>
<div className='min-h-0 flex-1 overflow-y-auto'>
{isLoading ? (
<div className='flex flex-col gap-[8px]'>
{PROVIDERS.map((p) => (
<BYOKKeySkeleton key={p.id} />
))}
</div>
) : (
<div className='flex flex-col gap-[8px]'>
{PROVIDERS.map((provider) => {
const existingKey = getKeyForProvider(provider.id)
const Icon = provider.icon
return (
<div key={provider.id} className='flex items-center justify-between gap-[12px]'>
<div className='flex items-center gap-[12px]'>
<div className='flex h-9 w-9 flex-shrink-0 items-center justify-center overflow-hidden rounded-[6px] bg-[var(--surface-6)]'>
<Icon className='h-4 w-4' />
</div>
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
<span className='font-medium text-[14px]'>{provider.name}</span>
<p className='truncate text-[13px] text-[var(--text-muted)]'>
{existingKey ? existingKey.maskedKey : provider.description}
</p>
</div>
</div>
{existingKey ? (
<div className='flex flex-shrink-0 items-center gap-[8px]'>
<Button variant='ghost' onClick={() => openEditModal(provider.id)}>
Update
</Button>
<Button
variant='ghost'
onClick={() => setDeleteConfirmProvider(provider.id)}
>
Delete
</Button>
</div>
) : (
<Button
variant='primary'
className='!bg-[var(--brand-tertiary-2)] !text-[var(--text-inverse)] hover:!bg-[var(--brand-tertiary-2)]/90'
onClick={() => openEditModal(provider.id)}
>
Add Key
</Button>
)}
</div>
)
})}
</div>
)}
</div>
</div>
<Modal
open={!!editingProvider}
onOpenChange={(open) => {
if (!open) {
setEditingProvider(null)
setApiKeyInput('')
setShowApiKey(false)
setError(null)
}
}}
>
<ModalContent className='w-[420px]'>
<ModalHeader>
{editingProvider && (
<>
{getKeyForProvider(editingProvider) ? 'Update' : 'Add'}{' '}
{PROVIDERS.find((p) => p.id === editingProvider)?.name} API Key
</>
)}
</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-tertiary)]'>
This key will be used for all {PROVIDERS.find((p) => p.id === editingProvider)?.name}{' '}
requests in this workspace. Your key is encrypted and stored securely.
</p>
<div className='mt-[16px] flex flex-col gap-[8px]'>
<p className='font-medium text-[13px] text-[var(--text-secondary)]'>
Enter your API key
</p>
{/* Hidden decoy fields to prevent browser autofill */}
<input
type='text'
name='fakeusernameremembered'
autoComplete='username'
style={{
position: 'absolute',
left: '-9999px',
opacity: 0,
pointerEvents: 'none',
}}
tabIndex={-1}
readOnly
/>
<div className='relative'>
<EmcnInput
type={showApiKey ? 'text' : 'password'}
value={apiKeyInput}
onChange={(e) => {
setApiKeyInput(e.target.value)
if (error) setError(null)
}}
placeholder={PROVIDERS.find((p) => p.id === editingProvider)?.placeholder}
className='h-9 pr-[36px]'
autoFocus
name='byok_api_key'
autoComplete='off'
autoCorrect='off'
autoCapitalize='off'
data-lpignore='true'
data-form-type='other'
/>
<Button
variant='ghost'
className='-translate-y-1/2 absolute top-1/2 right-[4px] h-[28px] w-[28px] p-0'
onClick={() => setShowApiKey(!showApiKey)}
>
{showApiKey ? (
<EyeOff className='h-[14px] w-[14px]' />
) : (
<Eye className='h-[14px] w-[14px]' />
)}
</Button>
</div>
{error && (
<p className='text-[11px] text-[var(--text-error)] leading-tight'>{error}</p>
)}
</div>
</ModalBody>
<ModalFooter>
<Button
variant='default'
onClick={() => {
setEditingProvider(null)
setApiKeyInput('')
setShowApiKey(false)
setError(null)
}}
>
Cancel
</Button>
<Button
variant='primary'
onClick={handleSave}
disabled={!apiKeyInput.trim() || upsertKey.isPending}
className='!bg-[var(--brand-tertiary-2)] !text-[var(--text-inverse)] hover:!bg-[var(--brand-tertiary-2)]/90'
>
{upsertKey.isPending ? 'Saving...' : 'Save'}
</Button>
</ModalFooter>
</ModalContent>
</Modal>
<Modal open={!!deleteConfirmProvider} onOpenChange={() => setDeleteConfirmProvider(null)}>
<ModalContent className='w-[400px]'>
<ModalHeader>Delete API Key</ModalHeader>
<ModalBody>
<p className='text-[12px] text-[var(--text-tertiary)]'>
Are you sure you want to delete the{' '}
<span className='font-medium text-[var(--text-primary)]'>
{PROVIDERS.find((p) => p.id === deleteConfirmProvider)?.name}
</span>{' '}
API key? This workspace will revert to using platform keys with the 2x multiplier.
</p>
</ModalBody>
<ModalFooter>
<Button variant='default' onClick={() => setDeleteConfirmProvider(null)}>
Cancel
</Button>
<Button
variant='primary'
onClick={handleDelete}
disabled={deleteKey.isPending}
className='!bg-[var(--brand-tertiary-2)] !text-[var(--text-inverse)] hover:!bg-[var(--brand-tertiary-2)]/90'
>
{deleteKey.isPending ? 'Deleting...' : 'Delete'}
</Button>
</ModalFooter>
</ModalContent>
</Modal>
</>
)
}

View File

@@ -1,4 +1,5 @@
export { ApiKeys } from './api-keys/api-keys'
export { BYOK } from './byok/byok'
export { Copilot } from './copilot/copilot'
export { CustomTools } from './custom-tools/custom-tools'
export { EnvironmentVariables } from './environment/environment'

View File

@@ -4,7 +4,7 @@ import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import * as DialogPrimitive from '@radix-ui/react-dialog'
import * as VisuallyHidden from '@radix-ui/react-visually-hidden'
import { useQueryClient } from '@tanstack/react-query'
import { Files, LogIn, Settings, User, Users, Wrench } from 'lucide-react'
import { Files, KeySquare, LogIn, Settings, User, Users, Wrench } from 'lucide-react'
import {
Card,
Connections,
@@ -30,6 +30,7 @@ import { isHosted } from '@/lib/core/config/feature-flags'
import { getUserRole } from '@/lib/workspaces/organization'
import {
ApiKeys,
BYOK,
Copilot,
CustomTools,
EnvironmentVariables,
@@ -62,6 +63,7 @@ type SettingsSection =
| 'template-profile'
| 'integrations'
| 'apikeys'
| 'byok'
| 'files'
| 'subscription'
| 'team'
@@ -114,6 +116,13 @@ const allNavigationItems: NavigationItem[] = [
{ id: 'mcp', label: 'MCPs', icon: McpIcon, section: 'tools' },
{ id: 'environment', label: 'Environment', icon: FolderCode, section: 'system' },
{ id: 'apikeys', label: 'API Keys', icon: Key, section: 'system' },
{
id: 'byok',
label: 'BYOK',
icon: KeySquare,
section: 'system',
requiresHosted: true,
},
{
id: 'copilot',
label: 'Copilot Keys',
@@ -456,6 +465,7 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
{isBillingEnabled && activeSection === 'subscription' && <Subscription />}
{isBillingEnabled && activeSection === 'team' && <TeamManagement />}
{activeSection === 'sso' && <SSO />}
{activeSection === 'byok' && <BYOK />}
{activeSection === 'copilot' && <Copilot />}
{activeSection === 'mcp' && <MCP initialServerId={pendingMcpServerId} />}
{activeSection === 'custom-tools' && <CustomTools />}

View File

@@ -2,6 +2,7 @@
import { useEffect, useState } from 'react'
import { useParams, useRouter } from 'next/navigation'
import { ReactFlowProvider } from 'reactflow'
import { createLogger } from '@/lib/logs/console/logger'
import { Panel, Terminal } from '@/app/workspace/[workspaceId]/w/[workflowId]/components'
import { useWorkflows } from '@/hooks/queries/workflows'
@@ -69,7 +70,9 @@ export default function WorkflowsPage() {
}}
/>
</div>
<Panel />
<ReactFlowProvider>
<Panel />
</ReactFlowProvider>
</div>
<Terminal />
</div>

View File

@@ -27,11 +27,10 @@ import { type ExecutionMetadata, ExecutionSnapshot } from '@/executor/execution/
import type { ExecutionResult } from '@/executor/types'
import { createEnvVarPattern } from '@/executor/utils/reference-validation'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
import { MAX_CONSECUTIVE_FAILURES } from '@/triggers/constants'
const logger = createLogger('TriggerScheduleExecution')
const MAX_CONSECUTIVE_FAILURES = 10
type WorkflowRecord = typeof workflow.$inferSelect
type WorkflowScheduleUpdate = Partial<typeof workflowSchedule.$inferInsert>
type ExecutionCoreResult = Awaited<ReturnType<typeof executeWorkflowCore>>

View File

@@ -0,0 +1,698 @@
import { describe, expect, it, vi } from 'vitest'
// Use the real registry module, not the global mock from vitest.setup.ts
vi.unmock('@/blocks/registry')
import { generateRouterPrompt } from '@/blocks/blocks/router'
import {
getAllBlocks,
getAllBlockTypes,
getBlock,
getBlockByToolName,
getBlocksByCategory,
isValidBlockType,
registry,
} from '@/blocks/registry'
import { AuthMode } from '@/blocks/types'
describe('Blocks Module', () => {
describe('Registry', () => {
it('should have a non-empty registry of blocks', () => {
expect(Object.keys(registry).length).toBeGreaterThan(0)
})
it('should have all blocks with required properties', () => {
const blocks = getAllBlocks()
for (const block of blocks) {
expect(block.type).toBeDefined()
expect(typeof block.type).toBe('string')
expect(block.name).toBeDefined()
expect(typeof block.name).toBe('string')
expect(block.description).toBeDefined()
expect(typeof block.description).toBe('string')
expect(block.category).toBeDefined()
expect(['blocks', 'tools', 'triggers']).toContain(block.category)
expect(block.bgColor).toBeDefined()
expect(typeof block.bgColor).toBe('string')
expect(block.bgColor.length).toBeGreaterThan(0)
expect(block.icon).toBeDefined()
expect(typeof block.icon).toBe('function')
expect(block.tools).toBeDefined()
expect(block.tools.access).toBeDefined()
expect(Array.isArray(block.tools.access)).toBe(true)
expect(block.inputs).toBeDefined()
expect(typeof block.inputs).toBe('object')
expect(block.outputs).toBeDefined()
expect(typeof block.outputs).toBe('object')
expect(block.subBlocks).toBeDefined()
expect(Array.isArray(block.subBlocks)).toBe(true)
}
})
it('should have unique block types', () => {
const types = getAllBlockTypes()
const uniqueTypes = new Set(types)
expect(types.length).toBe(uniqueTypes.size)
})
})
describe('getBlock', () => {
it('should return a block by type', () => {
const block = getBlock('function')
expect(block).toBeDefined()
expect(block?.type).toBe('function')
expect(block?.name).toBe('Function')
})
it('should return undefined for non-existent block type', () => {
const block = getBlock('non-existent-block')
expect(block).toBeUndefined()
})
it('should normalize hyphens to underscores', () => {
const block = getBlock('microsoft-teams')
expect(block).toBeDefined()
expect(block?.type).toBe('microsoft_teams')
})
})
describe('getBlockByToolName', () => {
it('should find a block by tool name', () => {
const block = getBlockByToolName('function_execute')
expect(block).toBeDefined()
expect(block?.type).toBe('function')
})
it('should find a block with http_request tool', () => {
const block = getBlockByToolName('http_request')
expect(block).toBeDefined()
expect(block?.type).toBe('api')
})
it('should return undefined for non-existent tool name', () => {
const block = getBlockByToolName('non_existent_tool')
expect(block).toBeUndefined()
})
})
describe('getBlocksByCategory', () => {
it('should return blocks in the "blocks" category', () => {
const blocks = getBlocksByCategory('blocks')
expect(blocks.length).toBeGreaterThan(0)
for (const block of blocks) {
expect(block.category).toBe('blocks')
}
})
it('should return blocks in the "tools" category', () => {
const blocks = getBlocksByCategory('tools')
expect(blocks.length).toBeGreaterThan(0)
for (const block of blocks) {
expect(block.category).toBe('tools')
}
})
it('should return blocks in the "triggers" category', () => {
const blocks = getBlocksByCategory('triggers')
expect(blocks.length).toBeGreaterThan(0)
for (const block of blocks) {
expect(block.category).toBe('triggers')
}
})
})
describe('getAllBlockTypes', () => {
it('should return an array of block types', () => {
const types = getAllBlockTypes()
expect(Array.isArray(types)).toBe(true)
expect(types.length).toBeGreaterThan(0)
for (const type of types) {
expect(typeof type).toBe('string')
}
})
})
describe('isValidBlockType', () => {
it('should return true for valid block types', () => {
expect(isValidBlockType('function')).toBe(true)
expect(isValidBlockType('agent')).toBe(true)
expect(isValidBlockType('condition')).toBe(true)
expect(isValidBlockType('api')).toBe(true)
})
it('should return false for invalid block types', () => {
expect(isValidBlockType('invalid-block')).toBe(false)
expect(isValidBlockType('')).toBe(false)
})
it('should handle hyphenated versions of underscored types', () => {
expect(isValidBlockType('microsoft-teams')).toBe(true)
expect(isValidBlockType('google-calendar')).toBe(true)
})
})
describe('Block Definitions', () => {
describe('FunctionBlock', () => {
const block = getBlock('function')
it('should have correct metadata', () => {
expect(block?.type).toBe('function')
expect(block?.name).toBe('Function')
expect(block?.category).toBe('blocks')
expect(block?.bgColor).toBe('#FF402F')
})
it('should have language and code subBlocks', () => {
expect(block?.subBlocks.length).toBeGreaterThanOrEqual(1)
const languageSubBlock = block?.subBlocks.find((sb) => sb.id === 'language')
const codeSubBlock = block?.subBlocks.find((sb) => sb.id === 'code')
expect(codeSubBlock).toBeDefined()
expect(codeSubBlock?.type).toBe('code')
})
it('should have function_execute tool access', () => {
expect(block?.tools.access).toContain('function_execute')
})
it('should have code input', () => {
expect(block?.inputs.code).toBeDefined()
expect(block?.inputs.code.type).toBe('string')
})
it('should have result and stdout outputs', () => {
expect(block?.outputs.result).toBeDefined()
expect(block?.outputs.stdout).toBeDefined()
})
})
describe('ConditionBlock', () => {
const block = getBlock('condition')
it('should have correct metadata', () => {
expect(block?.type).toBe('condition')
expect(block?.name).toBe('Condition')
expect(block?.category).toBe('blocks')
expect(block?.bgColor).toBe('#FF752F')
})
it('should have condition-input subBlock', () => {
const conditionsSubBlock = block?.subBlocks.find((sb) => sb.id === 'conditions')
expect(conditionsSubBlock).toBeDefined()
expect(conditionsSubBlock?.type).toBe('condition-input')
})
it('should have empty tools access', () => {
expect(block?.tools.access).toEqual([])
})
it('should have condition-related outputs', () => {
expect(block?.outputs.conditionResult).toBeDefined()
expect(block?.outputs.selectedPath).toBeDefined()
expect(block?.outputs.selectedOption).toBeDefined()
})
})
describe('ApiBlock', () => {
const block = getBlock('api')
it('should have correct metadata', () => {
expect(block?.type).toBe('api')
expect(block?.name).toBe('API')
expect(block?.category).toBe('blocks')
expect(block?.bgColor).toBe('#2F55FF')
})
it('should have required url subBlock', () => {
const urlSubBlock = block?.subBlocks.find((sb) => sb.id === 'url')
expect(urlSubBlock).toBeDefined()
expect(urlSubBlock?.type).toBe('short-input')
expect(urlSubBlock?.required).toBe(true)
})
it('should have method dropdown with HTTP methods', () => {
const methodSubBlock = block?.subBlocks.find((sb) => sb.id === 'method')
expect(methodSubBlock).toBeDefined()
expect(methodSubBlock?.type).toBe('dropdown')
expect(methodSubBlock?.required).toBe(true)
const options = methodSubBlock?.options as Array<{ label: string; id: string }>
expect(options?.map((o) => o.id)).toContain('GET')
expect(options?.map((o) => o.id)).toContain('POST')
expect(options?.map((o) => o.id)).toContain('PUT')
expect(options?.map((o) => o.id)).toContain('DELETE')
expect(options?.map((o) => o.id)).toContain('PATCH')
})
it('should have http_request tool access', () => {
expect(block?.tools.access).toContain('http_request')
})
it('should have API-related inputs', () => {
expect(block?.inputs.url).toBeDefined()
expect(block?.inputs.method).toBeDefined()
expect(block?.inputs.headers).toBeDefined()
expect(block?.inputs.body).toBeDefined()
expect(block?.inputs.params).toBeDefined()
})
it('should have API response outputs', () => {
expect(block?.outputs.data).toBeDefined()
expect(block?.outputs.status).toBeDefined()
expect(block?.outputs.headers).toBeDefined()
})
})
describe('ResponseBlock', () => {
const block = getBlock('response')
it('should have correct metadata', () => {
expect(block?.type).toBe('response')
expect(block?.name).toBe('Response')
expect(block?.category).toBe('blocks')
})
it('should have dataMode dropdown with builder and editor options', () => {
const dataModeSubBlock = block?.subBlocks.find((sb) => sb.id === 'dataMode')
expect(dataModeSubBlock).toBeDefined()
expect(dataModeSubBlock?.type).toBe('dropdown')
const options = dataModeSubBlock?.options as Array<{ label: string; id: string }>
expect(options?.map((o) => o.id)).toContain('structured')
expect(options?.map((o) => o.id)).toContain('json')
})
it('should have conditional subBlocks based on dataMode', () => {
const builderDataSubBlock = block?.subBlocks.find((sb) => sb.id === 'builderData')
const dataSubBlock = block?.subBlocks.find((sb) => sb.id === 'data')
expect(builderDataSubBlock?.condition).toEqual({ field: 'dataMode', value: 'structured' })
expect(dataSubBlock?.condition).toEqual({ field: 'dataMode', value: 'json' })
})
it('should have empty tools access', () => {
expect(block?.tools.access).toEqual([])
})
})
describe('StarterBlock', () => {
const block = getBlock('starter')
it('should have correct metadata', () => {
expect(block?.type).toBe('starter')
expect(block?.name).toBe('Starter')
expect(block?.category).toBe('blocks')
expect(block?.hideFromToolbar).toBe(true)
})
it('should have startWorkflow dropdown', () => {
const startWorkflowSubBlock = block?.subBlocks.find((sb) => sb.id === 'startWorkflow')
expect(startWorkflowSubBlock).toBeDefined()
expect(startWorkflowSubBlock?.type).toBe('dropdown')
const options = startWorkflowSubBlock?.options as Array<{ label: string; id: string }>
expect(options?.map((o) => o.id)).toContain('manual')
expect(options?.map((o) => o.id)).toContain('chat')
})
it('should have empty outputs since it initiates workflow', () => {
expect(Object.keys(block?.outputs || {}).length).toBe(0)
})
})
describe('RouterBlock', () => {
const block = getBlock('router')
it('should have correct metadata', () => {
expect(block?.type).toBe('router')
expect(block?.name).toBe('Router')
expect(block?.category).toBe('blocks')
expect(block?.authMode).toBe(AuthMode.ApiKey)
})
it('should have required prompt subBlock', () => {
const promptSubBlock = block?.subBlocks.find((sb) => sb.id === 'prompt')
expect(promptSubBlock).toBeDefined()
expect(promptSubBlock?.type).toBe('long-input')
expect(promptSubBlock?.required).toBe(true)
})
it('should have model combobox with default value', () => {
const modelSubBlock = block?.subBlocks.find((sb) => sb.id === 'model')
expect(modelSubBlock).toBeDefined()
expect(modelSubBlock?.type).toBe('combobox')
expect(modelSubBlock?.required).toBe(true)
expect(modelSubBlock?.defaultValue).toBe('claude-sonnet-4-5')
})
it('should have LLM tool access', () => {
expect(block?.tools.access).toContain('openai_chat')
expect(block?.tools.access).toContain('anthropic_chat')
expect(block?.tools.access).toContain('google_chat')
})
it('should have tools.config with tool selector function', () => {
expect(block?.tools.config).toBeDefined()
expect(typeof block?.tools.config?.tool).toBe('function')
})
})
describe('WebhookBlock', () => {
const block = getBlock('webhook')
it('should have correct metadata', () => {
expect(block?.type).toBe('webhook')
expect(block?.name).toBe('Webhook')
expect(block?.category).toBe('triggers')
expect(block?.authMode).toBe(AuthMode.OAuth)
expect(block?.triggerAllowed).toBe(true)
expect(block?.hideFromToolbar).toBe(true)
})
it('should have webhookProvider dropdown with multiple providers', () => {
const providerSubBlock = block?.subBlocks.find((sb) => sb.id === 'webhookProvider')
expect(providerSubBlock).toBeDefined()
expect(providerSubBlock?.type).toBe('dropdown')
const options = providerSubBlock?.options as Array<{ label: string; id: string }>
expect(options?.map((o) => o.id)).toContain('slack')
expect(options?.map((o) => o.id)).toContain('generic')
expect(options?.map((o) => o.id)).toContain('github')
})
it('should have conditional OAuth inputs', () => {
const gmailCredentialSubBlock = block?.subBlocks.find((sb) => sb.id === 'gmailCredential')
expect(gmailCredentialSubBlock).toBeDefined()
expect(gmailCredentialSubBlock?.type).toBe('oauth-input')
expect(gmailCredentialSubBlock?.condition).toEqual({
field: 'webhookProvider',
value: 'gmail',
})
const outlookCredentialSubBlock = block?.subBlocks.find(
(sb) => sb.id === 'outlookCredential'
)
expect(outlookCredentialSubBlock).toBeDefined()
expect(outlookCredentialSubBlock?.type).toBe('oauth-input')
expect(outlookCredentialSubBlock?.condition).toEqual({
field: 'webhookProvider',
value: 'outlook',
})
})
it('should have empty tools access', () => {
expect(block?.tools.access).toEqual([])
})
})
})
describe('SubBlock Validation', () => {
it('should have non-empty ids for all subBlocks', () => {
const blocks = getAllBlocks()
for (const block of blocks) {
for (const subBlock of block.subBlocks) {
expect(subBlock.id).toBeDefined()
expect(typeof subBlock.id).toBe('string')
expect(subBlock.id.length).toBeGreaterThan(0)
}
}
})
it('should have valid subBlock types', () => {
const validTypes = [
'short-input',
'long-input',
'dropdown',
'combobox',
'slider',
'table',
'code',
'switch',
'tool-input',
'checkbox-list',
'grouped-checkbox-list',
'condition-input',
'eval-input',
'time-input',
'oauth-input',
'webhook-config',
'schedule-info',
'file-selector',
'project-selector',
'channel-selector',
'user-selector',
'folder-selector',
'knowledge-base-selector',
'knowledge-tag-filters',
'document-selector',
'document-tag-entry',
'mcp-server-selector',
'mcp-tool-selector',
'mcp-dynamic-args',
'input-format',
'response-format',
'trigger-save',
'file-upload',
'input-mapping',
'variables-input',
'messages-input',
'workflow-selector',
'workflow-input-mapper',
'text',
]
const blocks = getAllBlocks()
for (const block of blocks) {
for (const subBlock of block.subBlocks) {
expect(validTypes).toContain(subBlock.type)
}
}
})
it('should have valid mode values for subBlocks', () => {
const validModes = ['basic', 'advanced', 'both', 'trigger', undefined]
const blocks = getAllBlocks()
for (const block of blocks) {
for (const subBlock of block.subBlocks) {
expect(validModes).toContain(subBlock.mode)
}
}
})
})
describe('Input/Output Validation', () => {
it('should have valid input types', () => {
const validTypes = ['string', 'number', 'boolean', 'json', 'array']
const blocks = getAllBlocks()
for (const block of blocks) {
for (const [_, inputConfig] of Object.entries(block.inputs)) {
expect(validTypes).toContain(inputConfig.type)
}
}
})
it('should have valid output types', () => {
const validPrimitiveTypes = ['string', 'number', 'boolean', 'json', 'array', 'files', 'any']
const blocks = getAllBlocks()
for (const block of blocks) {
for (const [key, outputConfig] of Object.entries(block.outputs)) {
if (key === 'visualization') continue
if (typeof outputConfig === 'string') {
expect(validPrimitiveTypes).toContain(outputConfig)
} else if (typeof outputConfig === 'object' && outputConfig !== null) {
if ('type' in outputConfig) {
expect(validPrimitiveTypes).toContain(outputConfig.type)
}
}
}
}
})
})
describe('AuthMode Validation', () => {
it('should have valid authMode when defined', () => {
const validAuthModes = [AuthMode.OAuth, AuthMode.ApiKey, AuthMode.BotToken, undefined]
const blocks = getAllBlocks()
for (const block of blocks) {
expect(validAuthModes).toContain(block.authMode)
}
})
})
describe('Edge Cases', () => {
it('should handle blocks with no inputs', () => {
const conditionBlock = getBlock('condition')
expect(conditionBlock?.inputs).toBeDefined()
expect(Object.keys(conditionBlock?.inputs || {}).length).toBe(0)
})
it('should handle blocks with no outputs', () => {
const starterBlock = getBlock('starter')
expect(starterBlock?.outputs).toBeDefined()
expect(Object.keys(starterBlock?.outputs || {}).length).toBe(0)
})
it('should handle blocks with no tool access', () => {
const conditionBlock = getBlock('condition')
expect(conditionBlock?.tools.access).toEqual([])
})
it('should handle blocks with multiple tool access', () => {
const routerBlock = getBlock('router')
expect(routerBlock?.tools.access.length).toBeGreaterThan(1)
})
it('should handle blocks with tools.config', () => {
const routerBlock = getBlock('router')
expect(routerBlock?.tools.config).toBeDefined()
expect(typeof routerBlock?.tools.config?.tool).toBe('function')
})
it('should handle blocks with triggerAllowed flag', () => {
const webhookBlock = getBlock('webhook')
expect(webhookBlock?.triggerAllowed).toBe(true)
const functionBlock = getBlock('function')
expect(functionBlock?.triggerAllowed).toBeUndefined()
})
it('should handle blocks with hideFromToolbar flag', () => {
const starterBlock = getBlock('starter')
expect(starterBlock?.hideFromToolbar).toBe(true)
const functionBlock = getBlock('function')
expect(functionBlock?.hideFromToolbar).toBeUndefined()
})
it('should handle blocks with docsLink', () => {
const functionBlock = getBlock('function')
expect(functionBlock?.docsLink).toBe('https://docs.sim.ai/blocks/function')
const apiBlock = getBlock('api')
expect(apiBlock?.docsLink).toBe('https://docs.sim.ai/blocks/api')
})
})
describe('generateRouterPrompt', () => {
it('should generate a base prompt with routing instructions', () => {
const prompt = generateRouterPrompt('Route to the correct agent')
expect(prompt).toContain('You are an intelligent routing agent')
expect(prompt).toContain('Route to the correct agent')
expect(prompt).toContain('Response Format')
})
it('should include target blocks information when provided', () => {
const targetBlocks = [
{
id: 'block-1',
type: 'agent',
title: 'Customer Support Agent',
description: 'Handles customer inquiries',
subBlocks: { systemPrompt: 'You are a helpful customer support agent.' },
},
{
id: 'block-2',
type: 'agent',
title: 'Sales Agent',
description: 'Handles sales inquiries',
subBlocks: { systemPrompt: 'You are a sales agent.' },
},
]
const prompt = generateRouterPrompt('Route to the correct agent', targetBlocks)
expect(prompt).toContain('Available Target Blocks')
expect(prompt).toContain('block-1')
expect(prompt).toContain('Customer Support Agent')
expect(prompt).toContain('block-2')
expect(prompt).toContain('Sales Agent')
})
it('should include current state when provided', () => {
const targetBlocks = [
{
id: 'block-1',
type: 'agent',
title: 'Agent',
currentState: { status: 'active', count: 5 },
},
]
const prompt = generateRouterPrompt('Route based on state', targetBlocks)
expect(prompt).toContain('Current State')
expect(prompt).toContain('active')
expect(prompt).toContain('5')
})
it('should handle empty target blocks array', () => {
const prompt = generateRouterPrompt('Route to agent', [])
expect(prompt).toContain('You are an intelligent routing agent')
expect(prompt).toContain('Route to agent')
})
it('should handle empty prompt string', () => {
const prompt = generateRouterPrompt('')
expect(prompt).toContain('You are an intelligent routing agent')
expect(prompt).toContain('Routing Request:')
})
})
describe('Block Category Counts', () => {
it('should have more blocks in tools category than triggers', () => {
const toolsBlocks = getBlocksByCategory('tools')
const triggersBlocks = getBlocksByCategory('triggers')
expect(toolsBlocks.length).toBeGreaterThan(triggersBlocks.length)
})
it('should have a reasonable total number of blocks', () => {
const allBlocks = getAllBlocks()
expect(allBlocks.length).toBeGreaterThan(50)
})
})
describe('SubBlock Features', () => {
it('should have wandConfig on code subBlocks where applicable', () => {
const functionBlock = getBlock('function')
const codeSubBlock = functionBlock?.subBlocks.find((sb) => sb.id === 'code')
expect(codeSubBlock?.wandConfig).toBeDefined()
expect(codeSubBlock?.wandConfig?.enabled).toBe(true)
expect(codeSubBlock?.wandConfig?.prompt).toBeDefined()
})
it('should have correct slider configurations', () => {
const routerBlock = getBlock('router')
const temperatureSubBlock = routerBlock?.subBlocks.find((sb) => sb.id === 'temperature')
expect(temperatureSubBlock?.type).toBe('slider')
expect(temperatureSubBlock?.min).toBe(0)
expect(temperatureSubBlock?.max).toBe(2)
})
it('should have required scopes on OAuth inputs', () => {
const webhookBlock = getBlock('webhook')
const gmailCredentialSubBlock = webhookBlock?.subBlocks.find(
(sb) => sb.id === 'gmailCredential'
)
expect(gmailCredentialSubBlock?.requiredScopes).toBeDefined()
expect(Array.isArray(gmailCredentialSubBlock?.requiredScopes)).toBe(true)
expect((gmailCredentialSubBlock?.requiredScopes?.length ?? 0) > 0).toBe(true)
})
})
describe('Block Consistency', () => {
it('should have consistent registry keys matching block types', () => {
for (const [key, block] of Object.entries(registry)) {
expect(key).toBe(block.type)
}
})
it('should have non-empty descriptions for all blocks', () => {
const blocks = getAllBlocks()
for (const block of blocks) {
expect(block.description.trim().length).toBeGreaterThan(0)
}
})
it('should have non-empty names for all blocks', () => {
const blocks = getAllBlocks()
for (const block of blocks) {
expect(block.name.trim().length).toBeGreaterThan(0)
}
})
})
})

View File

@@ -187,12 +187,16 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
type: 'combobox',
placeholder: 'Type or select a model...',
required: true,
defaultValue: 'claude-sonnet-4-5',
options: () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(new Set([...baseModels, ...ollamaModels, ...openrouterModels]))
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)

View File

@@ -135,12 +135,16 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
type: 'combobox',
placeholder: 'Type or select a model...',
required: true,
defaultValue: 'claude-sonnet-4-5',
options: () => {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(new Set([...baseModels, ...ollamaModels, ...openrouterModels]))
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)
return allModels.map((model) => {
const icon = getProviderIcon(model)

View File

@@ -178,13 +178,13 @@ export const MEMORY = {
} as const
export const ROUTER = {
DEFAULT_MODEL: 'gpt-4o',
DEFAULT_MODEL: 'claude-sonnet-4-5',
DEFAULT_TEMPERATURE: 0,
INFERENCE_TEMPERATURE: 0.1,
} as const
export const EVALUATOR = {
DEFAULT_MODEL: 'gpt-4o',
DEFAULT_MODEL: 'claude-sonnet-4-5',
DEFAULT_TEMPERATURE: 0.1,
RESPONSE_SCHEMA_NAME: 'evaluation_response',
JSON_INDENT: 2,

View File

@@ -1,3 +1,4 @@
import { isExecutionCancelled, isRedisCancellationEnabled } from '@/lib/execution/cancellation'
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType } from '@/executor/constants'
import type { DAG } from '@/executor/dag/builder'
@@ -23,6 +24,10 @@ export class ExecutionEngine {
private finalOutput: NormalizedBlockOutput = {}
private pausedBlocks: Map<string, PauseMetadata> = new Map()
private allowResumeTriggers: boolean
private cancelledFlag = false
private lastCancellationCheck = 0
private readonly useRedisCancellation: boolean
private readonly CANCELLATION_CHECK_INTERVAL_MS = 500
constructor(
private context: ExecutionContext,
@@ -31,6 +36,35 @@ export class ExecutionEngine {
private nodeOrchestrator: NodeExecutionOrchestrator
) {
this.allowResumeTriggers = this.context.metadata.resumeFromSnapshot === true
this.useRedisCancellation = isRedisCancellationEnabled() && !!this.context.executionId
}
private async checkCancellation(): Promise<boolean> {
if (this.cancelledFlag) {
return true
}
if (this.useRedisCancellation) {
const now = Date.now()
if (now - this.lastCancellationCheck < this.CANCELLATION_CHECK_INTERVAL_MS) {
return false
}
this.lastCancellationCheck = now
const cancelled = await isExecutionCancelled(this.context.executionId!)
if (cancelled) {
this.cancelledFlag = true
logger.info('Execution cancelled via Redis', { executionId: this.context.executionId })
}
return cancelled
}
if (this.context.abortSignal?.aborted) {
this.cancelledFlag = true
return true
}
return false
}
async run(triggerBlockId?: string): Promise<ExecutionResult> {
@@ -39,7 +73,7 @@ export class ExecutionEngine {
this.initializeQueue(triggerBlockId)
while (this.hasWork()) {
if (this.context.isCancelled && this.executing.size === 0) {
if ((await this.checkCancellation()) && this.executing.size === 0) {
break
}
await this.processQueue()
@@ -54,7 +88,7 @@ export class ExecutionEngine {
this.context.metadata.endTime = new Date(endTime).toISOString()
this.context.metadata.duration = endTime - startTime
if (this.context.isCancelled) {
if (this.cancelledFlag) {
return {
success: false,
output: this.finalOutput,
@@ -75,7 +109,7 @@ export class ExecutionEngine {
this.context.metadata.endTime = new Date(endTime).toISOString()
this.context.metadata.duration = endTime - startTime
if (this.context.isCancelled) {
if (this.cancelledFlag) {
return {
success: false,
output: this.finalOutput,
@@ -234,7 +268,7 @@ export class ExecutionEngine {
private async processQueue(): Promise<void> {
while (this.readyQueue.length > 0) {
if (this.context.isCancelled) {
if (await this.checkCancellation()) {
break
}
const nodeId = this.dequeue()

View File

@@ -37,7 +37,6 @@ export class DAGExecutor {
private workflowInput: WorkflowInput
private workflowVariables: Record<string, unknown>
private contextExtensions: ContextExtensions
private isCancelled = false
private dagBuilder: DAGBuilder
constructor(options: DAGExecutorOptions) {
@@ -54,13 +53,6 @@ export class DAGExecutor {
const dag = this.dagBuilder.build(this.workflow, triggerBlockId, savedIncomingEdges)
const { context, state } = this.createExecutionContext(workflowId, triggerBlockId)
// Link cancellation flag to context
Object.defineProperty(context, 'isCancelled', {
get: () => this.isCancelled,
enumerable: true,
configurable: true,
})
const resolver = new VariableResolver(this.workflow, this.workflowVariables, state)
const loopOrchestrator = new LoopOrchestrator(dag, state, resolver)
loopOrchestrator.setContextExtensions(this.contextExtensions)
@@ -82,10 +74,6 @@ export class DAGExecutor {
return await engine.run(triggerBlockId)
}
cancel(): void {
this.isCancelled = true
}
async continueExecution(
_pendingBlocks: string[],
context: ExecutionContext
@@ -180,6 +168,7 @@ export class DAGExecutor {
onStream: this.contextExtensions.onStream,
onBlockStart: this.contextExtensions.onBlockStart,
onBlockComplete: this.contextExtensions.onBlockComplete,
abortSignal: this.contextExtensions.abortSignal,
}
if (this.contextExtensions.resumeFromSnapshot) {

View File

@@ -34,7 +34,6 @@ export interface ExecutionCallbacks {
blockType: string,
output: any
) => Promise<void>
onExecutorCreated?: (executor: any) => void
}
export interface SerializableExecutionState {

View File

@@ -22,6 +22,11 @@ export interface ContextExtensions {
dagIncomingEdges?: Record<string, string[]>
snapshotState?: SerializableExecutionState
metadata?: ExecutionMetadata
/**
* AbortSignal for cancellation support.
* When aborted, the execution should stop gracefully.
*/
abortSignal?: AbortSignal
onStream?: (streamingExecution: unknown) => Promise<void>
onBlockStart?: (
blockId: string,

View File

@@ -26,7 +26,7 @@ import { collectBlockData } from '@/executor/utils/block-data'
import { buildAPIUrl, buildAuthHeaders, extractAPIErrorMessage } from '@/executor/utils/http'
import { stringifyJSON } from '@/executor/utils/json'
import { executeProviderRequest } from '@/providers'
import { getApiKey, getProviderFromModel, transformBlockTool } from '@/providers/utils'
import { getProviderFromModel, transformBlockTool } from '@/providers/utils'
import type { SerializedBlock } from '@/serializer/types'
import { executeTool } from '@/tools'
import { getTool, getToolAsync } from '@/tools/utils'
@@ -1006,15 +1006,13 @@ export class AgentBlockHandler implements BlockHandler {
responseFormat: any,
providerStartTime: number
) {
let finalApiKey: string
let finalApiKey: string | undefined = providerRequest.apiKey
if (providerId === 'vertex' && providerRequest.vertexCredential) {
finalApiKey = await this.resolveVertexCredential(
providerRequest.vertexCredential,
ctx.workflowId
)
} else {
finalApiKey = this.getApiKey(providerId, model, providerRequest.apiKey)
}
const { blockData, blockNameMapping } = collectBlockData(ctx)
@@ -1033,7 +1031,7 @@ export class AgentBlockHandler implements BlockHandler {
vertexLocation: providerRequest.vertexLocation,
responseFormat: providerRequest.responseFormat,
workflowId: providerRequest.workflowId,
workspaceId: providerRequest.workspaceId,
workspaceId: ctx.workspaceId,
stream: providerRequest.stream,
messages: 'messages' in providerRequest ? providerRequest.messages : undefined,
environmentVariables: ctx.environmentVariables || {},
@@ -1111,20 +1109,6 @@ export class AgentBlockHandler implements BlockHandler {
return this.createMinimalStreamingExecution(response.body!)
}
private getApiKey(providerId: string, model: string, inputApiKey: string): string {
try {
return getApiKey(providerId, model, inputApiKey)
} catch (error) {
logger.error('Failed to get API key:', {
provider: providerId,
model,
error: error instanceof Error ? error.message : String(error),
hasProvidedApiKey: !!inputApiKey,
})
throw new Error(error instanceof Error ? error.message : 'API key error')
}
}
/**
* Resolves a Vertex AI OAuth credential to an access token
*/

View File

@@ -82,6 +82,7 @@ describe('EvaluatorBlockHandler', () => {
{ name: 'score2', description: 'Second score', range: { min: 0, max: 10 } },
],
model: 'gpt-4o',
apiKey: 'test-api-key',
temperature: 0.1,
}
@@ -97,7 +98,6 @@ describe('EvaluatorBlockHandler', () => {
})
)
// Verify the request body contains the expected data
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
@@ -137,6 +137,7 @@ describe('EvaluatorBlockHandler', () => {
const inputs = {
content: JSON.stringify(contentObj),
metrics: [{ name: 'clarity', description: 'Clarity score', range: { min: 1, max: 5 } }],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -169,6 +170,7 @@ describe('EvaluatorBlockHandler', () => {
metrics: [
{ name: 'completeness', description: 'Data completeness', range: { min: 0, max: 1 } },
],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -198,6 +200,7 @@ describe('EvaluatorBlockHandler', () => {
const inputs = {
content: 'Test content',
metrics: [{ name: 'quality', description: 'Quality score', range: { min: 1, max: 10 } }],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -223,6 +226,7 @@ describe('EvaluatorBlockHandler', () => {
const inputs = {
content: 'Test content',
metrics: [{ name: 'score', description: 'Score', range: { min: 0, max: 5 } }],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -251,6 +255,7 @@ describe('EvaluatorBlockHandler', () => {
{ name: 'accuracy', description: 'Acc', range: { min: 0, max: 1 } },
{ name: 'fluency', description: 'Flu', range: { min: 0, max: 1 } },
],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -276,6 +281,7 @@ describe('EvaluatorBlockHandler', () => {
const inputs = {
content: 'Test',
metrics: [{ name: 'CamelCaseScore', description: 'Desc', range: { min: 0, max: 10 } }],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -304,6 +310,7 @@ describe('EvaluatorBlockHandler', () => {
{ name: 'presentScore', description: 'Desc1', range: { min: 0, max: 5 } },
{ name: 'missingScore', description: 'Desc2', range: { min: 0, max: 5 } },
],
apiKey: 'test-api-key',
}
mockFetch.mockImplementationOnce(() => {
@@ -327,7 +334,7 @@ describe('EvaluatorBlockHandler', () => {
})
it('should handle server error responses', async () => {
const inputs = { content: 'Test error handling.' }
const inputs = { content: 'Test error handling.', apiKey: 'test-api-key' }
// Override fetch mock to return an error
mockFetch.mockImplementationOnce(() => {
@@ -340,4 +347,124 @@ describe('EvaluatorBlockHandler', () => {
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow('Server error')
})
it('should handle Azure OpenAI models with endpoint and API version', async () => {
const inputs = {
content: 'Test content to evaluate',
metrics: [{ name: 'quality', description: 'Quality score', range: { min: 1, max: 10 } }],
model: 'gpt-4o',
apiKey: 'test-azure-key',
azureEndpoint: 'https://test.openai.azure.com',
azureApiVersion: '2024-07-01-preview',
}
mockGetProviderFromModel.mockReturnValue('azure-openai')
mockFetch.mockImplementationOnce(() => {
return Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
content: JSON.stringify({ quality: 8 }),
model: 'gpt-4o',
tokens: {},
cost: 0,
timing: {},
}),
})
})
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
provider: 'azure-openai',
model: 'gpt-4o',
apiKey: 'test-azure-key',
azureEndpoint: 'https://test.openai.azure.com',
azureApiVersion: '2024-07-01-preview',
})
})
it('should handle Vertex AI models with OAuth credential', async () => {
const inputs = {
content: 'Test content to evaluate',
metrics: [{ name: 'quality', description: 'Quality score', range: { min: 1, max: 10 } }],
model: 'gemini-2.0-flash-exp',
vertexCredential: 'test-vertex-credential-id',
vertexProject: 'test-gcp-project',
vertexLocation: 'us-central1',
}
mockGetProviderFromModel.mockReturnValue('vertex')
// Mock the database query for Vertex credential
const mockDb = await import('@sim/db')
const mockAccount = {
id: 'test-vertex-credential-id',
accessToken: 'mock-access-token',
refreshToken: 'mock-refresh-token',
expiresAt: new Date(Date.now() + 3600000), // 1 hour from now
}
vi.spyOn(mockDb.db.query.account, 'findFirst').mockResolvedValue(mockAccount as any)
mockFetch.mockImplementationOnce(() => {
return Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
content: JSON.stringify({ quality: 9 }),
model: 'gemini-2.0-flash-exp',
tokens: {},
cost: 0,
timing: {},
}),
})
})
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
provider: 'vertex',
model: 'gemini-2.0-flash-exp',
vertexProject: 'test-gcp-project',
vertexLocation: 'us-central1',
})
expect(requestBody.apiKey).toBe('mock-access-token')
})
it('should use default model when not provided', async () => {
const inputs = {
content: 'Test content',
metrics: [{ name: 'score', description: 'Score', range: { min: 0, max: 10 } }],
apiKey: 'test-api-key',
// No model provided - should use default
}
mockFetch.mockImplementationOnce(() => {
return Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
content: JSON.stringify({ score: 7 }),
model: 'claude-sonnet-4-5',
tokens: {},
cost: 0,
timing: {},
}),
})
})
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody.model).toBe('claude-sonnet-4-5')
})
})

View File

@@ -35,7 +35,7 @@ export class EvaluatorBlockHandler implements BlockHandler {
}
const providerId = getProviderFromModel(evaluatorConfig.model)
let finalApiKey = evaluatorConfig.apiKey
let finalApiKey: string | undefined = evaluatorConfig.apiKey
if (providerId === 'vertex' && evaluatorConfig.vertexCredential) {
finalApiKey = await this.resolveVertexCredential(evaluatorConfig.vertexCredential)
}
@@ -115,6 +115,7 @@ export class EvaluatorBlockHandler implements BlockHandler {
temperature: EVALUATOR.DEFAULT_TEMPERATURE,
apiKey: finalApiKey,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
}
if (providerId === 'vertex') {
@@ -122,6 +123,11 @@ export class EvaluatorBlockHandler implements BlockHandler {
providerRequest.vertexLocation = evaluatorConfig.vertexLocation
}
if (providerId === 'azure-openai') {
providerRequest.azureEndpoint = inputs.azureEndpoint
providerRequest.azureApiVersion = inputs.azureApiVersion
}
const response = await fetch(url.toString(), {
method: 'POST',
headers: {

View File

@@ -105,6 +105,7 @@ describe('RouterBlockHandler', () => {
const inputs = {
prompt: 'Choose the best option.',
model: 'gpt-4o',
apiKey: 'test-api-key',
temperature: 0.1,
}
@@ -187,7 +188,7 @@ describe('RouterBlockHandler', () => {
})
it('should throw error if LLM response is not a valid target block ID', async () => {
const inputs = { prompt: 'Test' }
const inputs = { prompt: 'Test', apiKey: 'test-api-key' }
// Override fetch mock to return an invalid block ID
mockFetch.mockImplementationOnce(() => {
@@ -210,22 +211,22 @@ describe('RouterBlockHandler', () => {
})
it('should use default model and temperature if not provided', async () => {
const inputs = { prompt: 'Choose.' }
const inputs = { prompt: 'Choose.', apiKey: 'test-api-key' }
await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetProviderFromModel).toHaveBeenCalledWith('gpt-4o')
expect(mockGetProviderFromModel).toHaveBeenCalledWith('claude-sonnet-4-5')
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
model: 'gpt-4o',
model: 'claude-sonnet-4-5',
temperature: 0.1,
})
})
it('should handle server error responses', async () => {
const inputs = { prompt: 'Test error handling.' }
const inputs = { prompt: 'Test error handling.', apiKey: 'test-api-key' }
// Override fetch mock to return an error
mockFetch.mockImplementationOnce(() => {
@@ -238,4 +239,64 @@ describe('RouterBlockHandler', () => {
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow('Server error')
})
it('should handle Azure OpenAI models with endpoint and API version', async () => {
const inputs = {
prompt: 'Choose the best option.',
model: 'gpt-4o',
apiKey: 'test-azure-key',
azureEndpoint: 'https://test.openai.azure.com',
azureApiVersion: '2024-07-01-preview',
}
mockGetProviderFromModel.mockReturnValue('azure-openai')
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
provider: 'azure-openai',
model: 'gpt-4o',
apiKey: 'test-azure-key',
azureEndpoint: 'https://test.openai.azure.com',
azureApiVersion: '2024-07-01-preview',
})
})
it('should handle Vertex AI models with OAuth credential', async () => {
const inputs = {
prompt: 'Choose the best option.',
model: 'gemini-2.0-flash-exp',
vertexCredential: 'test-vertex-credential-id',
vertexProject: 'test-gcp-project',
vertexLocation: 'us-central1',
}
mockGetProviderFromModel.mockReturnValue('vertex')
// Mock the database query for Vertex credential
const mockDb = await import('@sim/db')
const mockAccount = {
id: 'test-vertex-credential-id',
accessToken: 'mock-access-token',
refreshToken: 'mock-refresh-token',
expiresAt: new Date(Date.now() + 3600000), // 1 hour from now
}
vi.spyOn(mockDb.db.query.account, 'findFirst').mockResolvedValue(mockAccount as any)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
expect(requestBody).toMatchObject({
provider: 'vertex',
model: 'gemini-2.0-flash-exp',
vertexProject: 'test-gcp-project',
vertexLocation: 'us-central1',
})
expect(requestBody.apiKey).toBe('mock-access-token')
})
})

View File

@@ -47,7 +47,7 @@ export class RouterBlockHandler implements BlockHandler {
const messages = [{ role: 'user', content: routerConfig.prompt }]
const systemPrompt = generateRouterPrompt(routerConfig.prompt, targetBlocks)
let finalApiKey = routerConfig.apiKey
let finalApiKey: string | undefined = routerConfig.apiKey
if (providerId === 'vertex' && routerConfig.vertexCredential) {
finalApiKey = await this.resolveVertexCredential(routerConfig.vertexCredential)
}
@@ -60,6 +60,7 @@ export class RouterBlockHandler implements BlockHandler {
temperature: ROUTER.INFERENCE_TEMPERATURE,
apiKey: finalApiKey,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
}
if (providerId === 'vertex') {
@@ -67,6 +68,11 @@ export class RouterBlockHandler implements BlockHandler {
providerRequest.vertexLocation = routerConfig.vertexLocation
}
if (providerId === 'azure-openai') {
providerRequest.azureEndpoint = inputs.azureEndpoint
providerRequest.azureApiVersion = inputs.azureApiVersion
}
const response = await fetch(url.toString(), {
method: 'POST',
headers: {

View File

@@ -0,0 +1,294 @@
import '@/executor/__test-utils__/mock-dependencies'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { BlockType } from '@/executor/constants'
import { WaitBlockHandler } from '@/executor/handlers/wait/wait-handler'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
describe('WaitBlockHandler', () => {
let handler: WaitBlockHandler
let mockBlock: SerializedBlock
let mockContext: ExecutionContext
beforeEach(() => {
vi.useFakeTimers()
handler = new WaitBlockHandler()
mockBlock = {
id: 'wait-block-1',
metadata: { id: BlockType.WAIT, name: 'Test Wait' },
position: { x: 50, y: 50 },
config: { tool: BlockType.WAIT, params: {} },
inputs: { timeValue: 'string', timeUnit: 'string' },
outputs: {},
enabled: true,
}
mockContext = {
workflowId: 'test-workflow-id',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopExecutions: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
}
})
afterEach(() => {
vi.useRealTimers()
})
it('should handle wait blocks', () => {
expect(handler.canHandle(mockBlock)).toBe(true)
const nonWaitBlock: SerializedBlock = { ...mockBlock, metadata: { id: 'other' } }
expect(handler.canHandle(nonWaitBlock)).toBe(false)
})
it('should wait for specified seconds', async () => {
const inputs = {
timeValue: '5',
timeUnit: 'seconds',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(5000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 5000,
status: 'completed',
})
})
it('should wait for specified minutes', async () => {
const inputs = {
timeValue: '2',
timeUnit: 'minutes',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(120000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 120000,
status: 'completed',
})
})
it('should use default values when not provided', async () => {
const inputs = {}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(10000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 10000,
status: 'completed',
})
})
it('should throw error for negative wait times', async () => {
const inputs = {
timeValue: '-5',
timeUnit: 'seconds',
}
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Wait amount must be a positive number'
)
})
it('should throw error for zero wait time', async () => {
const inputs = {
timeValue: '0',
timeUnit: 'seconds',
}
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Wait amount must be a positive number'
)
})
it('should throw error for non-numeric wait times', async () => {
const inputs = {
timeValue: 'abc',
timeUnit: 'seconds',
}
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Wait amount must be a positive number'
)
})
it('should throw error when wait time exceeds maximum (seconds)', async () => {
const inputs = {
timeValue: '601',
timeUnit: 'seconds',
}
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Wait time exceeds maximum of 600 seconds'
)
})
it('should throw error when wait time exceeds maximum (minutes)', async () => {
const inputs = {
timeValue: '11',
timeUnit: 'minutes',
}
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Wait time exceeds maximum of 10 minutes'
)
})
it('should allow maximum wait time of exactly 10 minutes', async () => {
const inputs = {
timeValue: '10',
timeUnit: 'minutes',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(600000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 600000,
status: 'completed',
})
})
it('should allow maximum wait time of exactly 600 seconds', async () => {
const inputs = {
timeValue: '600',
timeUnit: 'seconds',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(600000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 600000,
status: 'completed',
})
})
it('should handle cancellation via AbortSignal', async () => {
const abortController = new AbortController()
mockContext.abortSignal = abortController.signal
const inputs = {
timeValue: '30',
timeUnit: 'seconds',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(10000)
abortController.abort()
await vi.advanceTimersByTimeAsync(1)
const result = await executePromise
expect(result).toEqual({
waitDuration: 30000,
status: 'cancelled',
})
})
it('should return cancelled immediately if signal is already aborted', async () => {
const abortController = new AbortController()
abortController.abort()
mockContext.abortSignal = abortController.signal
const inputs = {
timeValue: '10',
timeUnit: 'seconds',
}
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toEqual({
waitDuration: 10000,
status: 'cancelled',
})
})
it('should handle partial completion before cancellation', async () => {
const abortController = new AbortController()
mockContext.abortSignal = abortController.signal
const inputs = {
timeValue: '100',
timeUnit: 'seconds',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(50000)
abortController.abort()
await vi.advanceTimersByTimeAsync(1)
const result = await executePromise
expect(result).toEqual({
waitDuration: 100000,
status: 'cancelled',
})
})
it('should handle fractional seconds by converting to integers', async () => {
const inputs = {
timeValue: '5.7',
timeUnit: 'seconds',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(5000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 5000,
status: 'completed',
})
})
it('should handle very short wait times', async () => {
const inputs = {
timeValue: '1',
timeUnit: 'seconds',
}
const executePromise = handler.execute(mockContext, mockBlock, inputs)
await vi.advanceTimersByTimeAsync(1000)
const result = await executePromise
expect(result).toEqual({
waitDuration: 1000,
status: 'completed',
})
})
})

View File

@@ -1,37 +1,65 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isExecutionCancelled, isRedisCancellationEnabled } from '@/lib/execution/cancellation'
import { BlockType } from '@/executor/constants'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
const logger = createLogger('WaitBlockHandler')
const CANCELLATION_CHECK_INTERVAL_MS = 500
/**
* Helper function to sleep for a specified number of milliseconds
* On client-side: checks for cancellation every 100ms (non-blocking for UI)
* On server-side: simple sleep without polling (server execution can't be cancelled mid-flight)
*/
const sleep = async (ms: number, checkCancelled?: () => boolean): Promise<boolean> => {
const isClientSide = typeof window !== 'undefined'
interface SleepOptions {
signal?: AbortSignal
executionId?: string
}
if (!isClientSide) {
await new Promise((resolve) => setTimeout(resolve, ms))
return true
const sleep = async (ms: number, options: SleepOptions = {}): Promise<boolean> => {
const { signal, executionId } = options
const useRedis = isRedisCancellationEnabled() && !!executionId
if (!useRedis && signal?.aborted) {
return false
}
const chunkMs = 100
let elapsed = 0
return new Promise((resolve) => {
// biome-ignore lint/style/useConst: needs to be declared before cleanup() but assigned later
let mainTimeoutId: NodeJS.Timeout | undefined
let checkIntervalId: NodeJS.Timeout | undefined
let resolved = false
while (elapsed < ms) {
if (checkCancelled?.()) {
return false
const cleanup = () => {
if (mainTimeoutId) clearTimeout(mainTimeoutId)
if (checkIntervalId) clearInterval(checkIntervalId)
if (!useRedis && signal) signal.removeEventListener('abort', onAbort)
}
const sleepTime = Math.min(chunkMs, ms - elapsed)
await new Promise((resolve) => setTimeout(resolve, sleepTime))
elapsed += sleepTime
}
const onAbort = () => {
if (resolved) return
resolved = true
cleanup()
resolve(false)
}
return true
if (useRedis) {
checkIntervalId = setInterval(async () => {
if (resolved) return
try {
const cancelled = await isExecutionCancelled(executionId!)
if (cancelled) {
resolved = true
cleanup()
resolve(false)
}
} catch {}
}, CANCELLATION_CHECK_INTERVAL_MS)
} else if (signal) {
signal.addEventListener('abort', onAbort, { once: true })
}
mainTimeoutId = setTimeout(() => {
if (resolved) return
resolved = true
cleanup()
resolve(true)
}, ms)
})
}
/**
@@ -65,11 +93,10 @@ export class WaitBlockHandler implements BlockHandler {
throw new Error(`Wait time exceeds maximum of ${maxDisplay}`)
}
const checkCancelled = () => {
return (ctx as any).isCancelled === true
}
const completed = await sleep(waitMs, checkCancelled)
const completed = await sleep(waitMs, {
signal: ctx.abortSignal,
executionId: ctx.executionId,
})
if (!completed) {
return {

View File

@@ -1,4 +1,5 @@
import { generateRequestId } from '@/lib/core/utils/request'
import { isExecutionCancelled, isRedisCancellationEnabled } from '@/lib/execution/cancellation'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { createLogger } from '@/lib/logs/console/logger'
import { buildLoopIndexCondition, DEFAULTS, EDGE } from '@/executor/constants'
@@ -229,7 +230,14 @@ export class LoopOrchestrator {
}
}
if (ctx.isCancelled) {
const useRedis = isRedisCancellationEnabled() && !!ctx.executionId
let isCancelled = false
if (useRedis) {
isCancelled = await isExecutionCancelled(ctx.executionId!)
} else {
isCancelled = ctx.abortSignal?.aborted ?? false
}
if (isCancelled) {
logger.info('Loop execution cancelled', { loopId, iteration: scope.iteration })
return this.createExitResult(ctx, loopId, scope)
}

View File

@@ -222,8 +222,12 @@ export interface ExecutionContext {
output: any
) => Promise<void>
// Cancellation support
isCancelled?: boolean
/**
* AbortSignal for cancellation support.
* When the signal is aborted, execution should stop gracefully.
* This is triggered when the SSE client disconnects.
*/
abortSignal?: AbortSignal
// Dynamically added nodes that need to be scheduled (e.g., from parallel expansion)
pendingDynamicNodes?: string[]

View File

@@ -0,0 +1,357 @@
import { loggerMock } from '@sim/testing'
import { describe, expect, it, vi } from 'vitest'
import { ExecutionState } from '@/executor/execution/state'
import { BlockResolver } from './block'
import type { ResolutionContext } from './reference'
vi.mock('@/lib/logs/console/logger', () => loggerMock)
/**
* Creates a minimal workflow for testing.
*/
function createTestWorkflow(blocks: Array<{ id: string; name?: string; type?: string }> = []) {
return {
version: '1.0',
blocks: blocks.map((b) => ({
id: b.id,
position: { x: 0, y: 0 },
config: { tool: b.type ?? 'function', params: {} },
inputs: {},
outputs: {},
metadata: { id: b.type ?? 'function', name: b.name ?? b.id },
enabled: true,
})),
connections: [],
loops: {},
parallels: {},
}
}
/**
* Creates a test ResolutionContext with block outputs.
*/
function createTestContext(
currentNodeId: string,
blockOutputs: Record<string, any> = {},
contextBlockStates?: Map<string, { output: any }>
): ResolutionContext {
const state = new ExecutionState()
for (const [blockId, output] of Object.entries(blockOutputs)) {
state.setBlockOutput(blockId, output)
}
return {
executionContext: {
blockStates: contextBlockStates ?? new Map(),
},
executionState: state,
currentNodeId,
} as unknown as ResolutionContext
}
describe('BlockResolver', () => {
describe('canResolve', () => {
it.concurrent('should return true for block references', () => {
const resolver = new BlockResolver(createTestWorkflow([{ id: 'block-1' }]))
expect(resolver.canResolve('<block-1>')).toBe(true)
expect(resolver.canResolve('<block-1.output>')).toBe(true)
expect(resolver.canResolve('<block-1.result.value>')).toBe(true)
})
it.concurrent('should return true for block references by name', () => {
const resolver = new BlockResolver(createTestWorkflow([{ id: 'block-1', name: 'My Block' }]))
expect(resolver.canResolve('<myblock>')).toBe(true)
expect(resolver.canResolve('<My Block>')).toBe(true)
})
it.concurrent('should return false for special prefixes', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.canResolve('<loop.index>')).toBe(false)
expect(resolver.canResolve('<parallel.currentItem>')).toBe(false)
expect(resolver.canResolve('<variable.myvar>')).toBe(false)
})
it.concurrent('should return false for non-references', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.canResolve('plain text')).toBe(false)
expect(resolver.canResolve('{{ENV_VAR}}')).toBe(false)
expect(resolver.canResolve('block-1.output')).toBe(false)
})
})
describe('resolve', () => {
it.concurrent('should resolve block output by ID', () => {
const workflow = createTestWorkflow([{ id: 'source-block' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
'source-block': { result: 'success', data: { value: 42 } },
})
expect(resolver.resolve('<source-block>', ctx)).toEqual({
result: 'success',
data: { value: 42 },
})
})
it.concurrent('should resolve block output by name', () => {
const workflow = createTestWorkflow([{ id: 'block-123', name: 'My Source Block' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
'block-123': { message: 'hello' },
})
expect(resolver.resolve('<mysourceblock>', ctx)).toEqual({ message: 'hello' })
expect(resolver.resolve('<My Source Block>', ctx)).toEqual({ message: 'hello' })
})
it.concurrent('should resolve nested property path', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
source: { user: { profile: { name: 'Alice', email: 'alice@test.com' } } },
})
expect(resolver.resolve('<source.user.profile.name>', ctx)).toBe('Alice')
expect(resolver.resolve('<source.user.profile.email>', ctx)).toBe('alice@test.com')
})
it.concurrent('should resolve array index in path', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
source: { items: [{ id: 1 }, { id: 2 }, { id: 3 }] },
})
expect(resolver.resolve('<source.items.0>', ctx)).toEqual({ id: 1 })
expect(resolver.resolve('<source.items.1.id>', ctx)).toBe(2)
})
it.concurrent('should throw error for non-existent path', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
source: { existing: 'value' },
})
expect(() => resolver.resolve('<source.nonexistent>', ctx)).toThrow(
/No value found at path "nonexistent" in block "source"/
)
})
it.concurrent('should return undefined for non-existent block', () => {
const workflow = createTestWorkflow([{ id: 'existing' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {})
expect(resolver.resolve('<nonexistent>', ctx)).toBeUndefined()
})
it.concurrent('should fall back to context blockStates', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const contextStates = new Map([['source', { output: { fallback: true } }]])
const ctx = createTestContext('current', {}, contextStates)
expect(resolver.resolve('<source>', ctx)).toEqual({ fallback: true })
})
})
describe('formatValueForBlock', () => {
it.concurrent('should format string for condition block', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock('hello world', 'condition')
expect(result).toBe('"hello world"')
})
it.concurrent('should escape special characters for condition block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock('line1\nline2', 'condition')).toBe('"line1\\nline2"')
expect(resolver.formatValueForBlock('quote "test"', 'condition')).toBe('"quote \\"test\\""')
expect(resolver.formatValueForBlock('backslash \\', 'condition')).toBe('"backslash \\\\"')
expect(resolver.formatValueForBlock('tab\there', 'condition')).toBe('"tab\there"')
})
it.concurrent('should format object for condition block', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock({ key: 'value' }, 'condition')
expect(result).toBe('{"key":"value"}')
})
it.concurrent('should format null/undefined for condition block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock(null, 'condition')).toBe('null')
expect(resolver.formatValueForBlock(undefined, 'condition')).toBe('undefined')
})
it.concurrent('should format number for condition block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock(42, 'condition')).toBe('42')
expect(resolver.formatValueForBlock(3.14, 'condition')).toBe('3.14')
expect(resolver.formatValueForBlock(-100, 'condition')).toBe('-100')
})
it.concurrent('should format boolean for condition block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock(true, 'condition')).toBe('true')
expect(resolver.formatValueForBlock(false, 'condition')).toBe('false')
})
it.concurrent('should format string for function block (JSON escaped)', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock('hello', 'function')
expect(result).toBe('"hello"')
})
it.concurrent('should format string for function block in template literal', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock('hello', 'function', true)
expect(result).toBe('hello')
})
it.concurrent('should format object for function block in template literal', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock({ a: 1 }, 'function', true)
expect(result).toBe('{"a":1}')
})
it.concurrent('should format null/undefined for function block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock(null, 'function')).toBe('null')
expect(resolver.formatValueForBlock(undefined, 'function')).toBe('undefined')
})
it.concurrent('should format string for response block (no quotes)', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock('plain text', 'response')).toBe('plain text')
})
it.concurrent('should format object for response block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock({ key: 'value' }, 'response')).toBe('{"key":"value"}')
})
it.concurrent('should format array for response block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock([1, 2, 3], 'response')).toBe('[1,2,3]')
})
it.concurrent('should format primitives for response block', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock(42, 'response')).toBe('42')
expect(resolver.formatValueForBlock(true, 'response')).toBe('true')
})
it.concurrent('should format object for default block type', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock({ x: 1 }, undefined)).toBe('{"x":1}')
expect(resolver.formatValueForBlock({ x: 1 }, 'agent')).toBe('{"x":1}')
})
it.concurrent('should format primitive for default block type', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.formatValueForBlock('text', undefined)).toBe('text')
expect(resolver.formatValueForBlock(123, undefined)).toBe('123')
})
})
describe('tryParseJSON', () => {
it.concurrent('should parse valid JSON object string', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.tryParseJSON('{"key": "value"}')).toEqual({ key: 'value' })
})
it.concurrent('should parse valid JSON array string', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.tryParseJSON('[1, 2, 3]')).toEqual([1, 2, 3])
})
it.concurrent('should return original value for non-string input', () => {
const resolver = new BlockResolver(createTestWorkflow())
const obj = { key: 'value' }
expect(resolver.tryParseJSON(obj)).toBe(obj)
expect(resolver.tryParseJSON(123)).toBe(123)
expect(resolver.tryParseJSON(null)).toBe(null)
})
it.concurrent('should return original string for non-JSON strings', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.tryParseJSON('plain text')).toBe('plain text')
expect(resolver.tryParseJSON('123')).toBe('123')
expect(resolver.tryParseJSON('')).toBe('')
})
it.concurrent('should return original string for invalid JSON', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.tryParseJSON('{invalid json}')).toBe('{invalid json}')
expect(resolver.tryParseJSON('[1, 2,')).toBe('[1, 2,')
})
it.concurrent('should handle whitespace around JSON', () => {
const resolver = new BlockResolver(createTestWorkflow())
expect(resolver.tryParseJSON(' {"key": "value"} ')).toEqual({ key: 'value' })
expect(resolver.tryParseJSON('\n[1, 2]\n')).toEqual([1, 2])
})
})
describe('edge cases', () => {
it.concurrent('should handle case-insensitive block name matching', () => {
const workflow = createTestWorkflow([{ id: 'block-1', name: 'My Block' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', { 'block-1': { data: 'test' } })
expect(resolver.resolve('<MYBLOCK>', ctx)).toEqual({ data: 'test' })
expect(resolver.resolve('<myblock>', ctx)).toEqual({ data: 'test' })
expect(resolver.resolve('<MyBlock>', ctx)).toEqual({ data: 'test' })
})
it.concurrent('should handle block names with spaces', () => {
const workflow = createTestWorkflow([{ id: 'block-1', name: 'API Request Block' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', { 'block-1': { status: 200 } })
expect(resolver.resolve('<apirequestblock>', ctx)).toEqual({ status: 200 })
})
it.concurrent('should handle empty path returning entire output', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const output = { a: 1, b: 2, c: { nested: true } }
const ctx = createTestContext('current', { source: output })
expect(resolver.resolve('<source>', ctx)).toEqual(output)
})
it.concurrent('should handle output with null values', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
source: { value: null, other: 'exists' },
})
expect(resolver.resolve('<source.value>', ctx)).toBeNull()
expect(resolver.resolve('<source.other>', ctx)).toBe('exists')
})
it.concurrent('should handle output with undefined values', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
source: { value: undefined, other: 'exists' },
})
expect(() => resolver.resolve('<source.value>', ctx)).toThrow()
})
it.concurrent('should handle deeply nested path errors', () => {
const workflow = createTestWorkflow([{ id: 'source' }])
const resolver = new BlockResolver(workflow)
const ctx = createTestContext('current', {
source: { level1: { level2: {} } },
})
expect(() => resolver.resolve('<source.level1.level2.level3>', ctx)).toThrow(
/No value found at path "level1.level2.level3"/
)
})
})
})

View File

@@ -0,0 +1,178 @@
import { loggerMock } from '@sim/testing'
import { describe, expect, it, vi } from 'vitest'
import { EnvResolver } from './env'
import type { ResolutionContext } from './reference'
vi.mock('@/lib/logs/console/logger', () => loggerMock)
/**
* Creates a minimal ResolutionContext for testing.
* The EnvResolver only uses context.executionContext.environmentVariables.
*/
function createTestContext(environmentVariables: Record<string, string>): ResolutionContext {
return {
executionContext: { environmentVariables },
executionState: {},
currentNodeId: 'test-node',
} as ResolutionContext
}
describe('EnvResolver', () => {
describe('canResolve', () => {
it.concurrent('should return true for valid env var references', () => {
const resolver = new EnvResolver()
expect(resolver.canResolve('{{API_KEY}}')).toBe(true)
expect(resolver.canResolve('{{DATABASE_URL}}')).toBe(true)
expect(resolver.canResolve('{{MY_VAR}}')).toBe(true)
})
it.concurrent('should return true for env vars with underscores', () => {
const resolver = new EnvResolver()
expect(resolver.canResolve('{{MY_SECRET_KEY}}')).toBe(true)
expect(resolver.canResolve('{{SOME_LONG_VARIABLE_NAME}}')).toBe(true)
})
it.concurrent('should return true for env vars with numbers', () => {
const resolver = new EnvResolver()
expect(resolver.canResolve('{{API_KEY_2}}')).toBe(true)
expect(resolver.canResolve('{{V2_CONFIG}}')).toBe(true)
})
it.concurrent('should return false for non-env var references', () => {
const resolver = new EnvResolver()
expect(resolver.canResolve('<block.output>')).toBe(false)
expect(resolver.canResolve('<variable.myvar>')).toBe(false)
expect(resolver.canResolve('<loop.index>')).toBe(false)
expect(resolver.canResolve('plain text')).toBe(false)
expect(resolver.canResolve('{API_KEY}')).toBe(false)
expect(resolver.canResolve('{{API_KEY}')).toBe(false)
expect(resolver.canResolve('{API_KEY}}')).toBe(false)
})
})
describe('resolve', () => {
it.concurrent('should resolve existing environment variable', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({ API_KEY: 'secret-api-key' })
const result = resolver.resolve('{{API_KEY}}', ctx)
expect(result).toBe('secret-api-key')
})
it.concurrent('should resolve multiple different environment variables', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({
DATABASE_URL: 'postgres://localhost:5432/db',
REDIS_URL: 'redis://localhost:6379',
SECRET_KEY: 'super-secret',
})
expect(resolver.resolve('{{DATABASE_URL}}', ctx)).toBe('postgres://localhost:5432/db')
expect(resolver.resolve('{{REDIS_URL}}', ctx)).toBe('redis://localhost:6379')
expect(resolver.resolve('{{SECRET_KEY}}', ctx)).toBe('super-secret')
})
it.concurrent('should return original reference for non-existent variable', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({ EXISTING: 'value' })
const result = resolver.resolve('{{NON_EXISTENT}}', ctx)
expect(result).toBe('{{NON_EXISTENT}}')
})
it.concurrent('should handle empty string value', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({ EMPTY_VAR: '' })
const result = resolver.resolve('{{EMPTY_VAR}}', ctx)
expect(result).toBe('')
})
it.concurrent('should handle value with special characters', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({
SPECIAL: 'value with spaces & special chars: !@#$%^&*()',
})
const result = resolver.resolve('{{SPECIAL}}', ctx)
expect(result).toBe('value with spaces & special chars: !@#$%^&*()')
})
it.concurrent('should handle JSON string values', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({
JSON_CONFIG: '{"key": "value", "nested": {"a": 1}}',
})
const result = resolver.resolve('{{JSON_CONFIG}}', ctx)
expect(result).toBe('{"key": "value", "nested": {"a": 1}}')
})
it.concurrent('should handle empty environment variables object', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({})
const result = resolver.resolve('{{ANY_VAR}}', ctx)
expect(result).toBe('{{ANY_VAR}}')
})
it.concurrent('should handle undefined environmentVariables gracefully', () => {
const resolver = new EnvResolver()
const ctx = {
executionContext: {},
executionState: {},
currentNodeId: 'test-node',
} as ResolutionContext
const result = resolver.resolve('{{API_KEY}}', ctx)
expect(result).toBe('{{API_KEY}}')
})
})
describe('edge cases', () => {
it.concurrent('should handle variable names with consecutive underscores', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({ MY__VAR: 'double underscore' })
expect(resolver.canResolve('{{MY__VAR}}')).toBe(true)
expect(resolver.resolve('{{MY__VAR}}', ctx)).toBe('double underscore')
})
it.concurrent('should handle single character variable names', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({ X: 'single' })
expect(resolver.canResolve('{{X}}')).toBe(true)
expect(resolver.resolve('{{X}}', ctx)).toBe('single')
})
it.concurrent('should handle very long variable names', () => {
const resolver = new EnvResolver()
const longName = 'A'.repeat(100)
const ctx = createTestContext({ [longName]: 'long name value' })
expect(resolver.canResolve(`{{${longName}}}`)).toBe(true)
expect(resolver.resolve(`{{${longName}}}`, ctx)).toBe('long name value')
})
it.concurrent('should handle value containing mustache-like syntax', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({
TEMPLATE: 'Hello {{name}}!',
})
const result = resolver.resolve('{{TEMPLATE}}', ctx)
expect(result).toBe('Hello {{name}}!')
})
it.concurrent('should handle multiline values', () => {
const resolver = new EnvResolver()
const ctx = createTestContext({
MULTILINE: 'line1\nline2\nline3',
})
const result = resolver.resolve('{{MULTILINE}}', ctx)
expect(result).toBe('line1\nline2\nline3')
})
})
})

View File

@@ -0,0 +1,280 @@
import { loggerMock } from '@sim/testing'
import { describe, expect, it, vi } from 'vitest'
import type { LoopScope } from '@/executor/execution/state'
import { LoopResolver } from './loop'
import type { ResolutionContext } from './reference'
vi.mock('@/lib/logs/console/logger', () => loggerMock)
/**
* Creates a minimal workflow for testing.
*/
function createTestWorkflow(
loops: Record<string, { nodes: string[]; id?: string; iterations?: number }> = {}
) {
// Ensure each loop has required fields
const normalizedLoops: Record<string, { id: string; nodes: string[]; iterations: number }> = {}
for (const [key, loop] of Object.entries(loops)) {
normalizedLoops[key] = {
id: loop.id ?? key,
nodes: loop.nodes,
iterations: loop.iterations ?? 1,
}
}
return {
version: '1.0',
blocks: [],
connections: [],
loops: normalizedLoops,
parallels: {},
}
}
/**
* Creates a test loop scope.
*/
function createLoopScope(overrides: Partial<LoopScope> = {}): LoopScope {
return {
iteration: 0,
currentIterationOutputs: new Map(),
allIterationOutputs: [],
...overrides,
}
}
/**
* Creates a minimal ResolutionContext for testing.
*/
function createTestContext(
currentNodeId: string,
loopScope?: LoopScope,
loopExecutions?: Map<string, LoopScope>
): ResolutionContext {
return {
executionContext: {
loopExecutions: loopExecutions ?? new Map(),
},
executionState: {},
currentNodeId,
loopScope,
} as ResolutionContext
}
describe('LoopResolver', () => {
describe('canResolve', () => {
it.concurrent('should return true for loop references', () => {
const resolver = new LoopResolver(createTestWorkflow())
expect(resolver.canResolve('<loop.index>')).toBe(true)
expect(resolver.canResolve('<loop.iteration>')).toBe(true)
expect(resolver.canResolve('<loop.item>')).toBe(true)
expect(resolver.canResolve('<loop.currentItem>')).toBe(true)
expect(resolver.canResolve('<loop.items>')).toBe(true)
})
it.concurrent('should return true for loop references with nested paths', () => {
const resolver = new LoopResolver(createTestWorkflow())
expect(resolver.canResolve('<loop.item.name>')).toBe(true)
expect(resolver.canResolve('<loop.currentItem.data.value>')).toBe(true)
expect(resolver.canResolve('<loop.items.0>')).toBe(true)
})
it.concurrent('should return false for non-loop references', () => {
const resolver = new LoopResolver(createTestWorkflow())
expect(resolver.canResolve('<block.output>')).toBe(false)
expect(resolver.canResolve('<variable.myvar>')).toBe(false)
expect(resolver.canResolve('<parallel.index>')).toBe(false)
expect(resolver.canResolve('plain text')).toBe(false)
expect(resolver.canResolve('{{ENV_VAR}}')).toBe(false)
})
it.concurrent('should return false for malformed references', () => {
const resolver = new LoopResolver(createTestWorkflow())
expect(resolver.canResolve('loop.index')).toBe(false)
expect(resolver.canResolve('<loop.index')).toBe(false)
expect(resolver.canResolve('loop.index>')).toBe(false)
})
})
describe('resolve with explicit loopScope', () => {
it.concurrent('should resolve iteration/index property', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ iteration: 5 })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.iteration>', ctx)).toBe(5)
expect(resolver.resolve('<loop.index>', ctx)).toBe(5)
})
it.concurrent('should resolve item/currentItem property', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: { name: 'test', value: 42 } })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toEqual({ name: 'test', value: 42 })
expect(resolver.resolve('<loop.currentItem>', ctx)).toEqual({ name: 'test', value: 42 })
})
it.concurrent('should resolve items property', () => {
const resolver = new LoopResolver(createTestWorkflow())
const items = ['a', 'b', 'c']
const loopScope = createLoopScope({ items })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.items>', ctx)).toEqual(items)
})
it.concurrent('should resolve nested path in item', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({
item: { user: { name: 'Alice', address: { city: 'NYC' } } },
})
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item.user.name>', ctx)).toBe('Alice')
expect(resolver.resolve('<loop.item.user.address.city>', ctx)).toBe('NYC')
})
it.concurrent('should resolve array index in items', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({
items: [{ id: 1 }, { id: 2 }, { id: 3 }],
})
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.items.0>', ctx)).toEqual({ id: 1 })
expect(resolver.resolve('<loop.items.1.id>', ctx)).toBe(2)
})
})
describe('resolve without explicit loopScope (discovery)', () => {
it.concurrent('should find loop scope from workflow config', () => {
const workflow = createTestWorkflow({
'loop-1': { nodes: ['block-1', 'block-2'] },
})
const resolver = new LoopResolver(workflow)
const loopScope = createLoopScope({ iteration: 3 })
const loopExecutions = new Map([['loop-1', loopScope]])
const ctx = createTestContext('block-1', undefined, loopExecutions)
expect(resolver.resolve('<loop.iteration>', ctx)).toBe(3)
})
it.concurrent('should return undefined when block is not in any loop', () => {
const workflow = createTestWorkflow({
'loop-1': { nodes: ['other-block'] },
})
const resolver = new LoopResolver(workflow)
const ctx = createTestContext('block-1', undefined)
expect(resolver.resolve('<loop.iteration>', ctx)).toBeUndefined()
})
it.concurrent('should return undefined when loop scope not found in executions', () => {
const workflow = createTestWorkflow({
'loop-1': { nodes: ['block-1'] },
})
const resolver = new LoopResolver(workflow)
const ctx = createTestContext('block-1', undefined, new Map())
expect(resolver.resolve('<loop.iteration>', ctx)).toBeUndefined()
})
})
describe('edge cases', () => {
it.concurrent('should return undefined for invalid loop reference (missing property)', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ iteration: 0 })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop>', ctx)).toBeUndefined()
})
it.concurrent('should return undefined for unknown loop property', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ iteration: 0 })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.unknownProperty>', ctx)).toBeUndefined()
})
it.concurrent('should handle iteration index 0 correctly', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ iteration: 0 })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.index>', ctx)).toBe(0)
})
it.concurrent('should handle null item value', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: null })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toBeNull()
})
it.concurrent('should handle undefined item value', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: undefined })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toBeUndefined()
})
it.concurrent('should handle empty items array', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ items: [] })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.items>', ctx)).toEqual([])
})
it.concurrent('should handle primitive item value', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: 'simple string' })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toBe('simple string')
})
it.concurrent('should handle numeric item value', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: 42 })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toBe(42)
})
it.concurrent('should handle boolean item value', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: true })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toBe(true)
})
it.concurrent('should handle item with array value', () => {
const resolver = new LoopResolver(createTestWorkflow())
const loopScope = createLoopScope({ item: [1, 2, 3] })
const ctx = createTestContext('block-1', loopScope)
expect(resolver.resolve('<loop.item>', ctx)).toEqual([1, 2, 3])
expect(resolver.resolve('<loop.item.0>', ctx)).toBe(1)
expect(resolver.resolve('<loop.item.2>', ctx)).toBe(3)
})
})
describe('block ID with branch suffix', () => {
it.concurrent('should handle block ID with branch suffix in loop lookup', () => {
const workflow = createTestWorkflow({
'loop-1': { nodes: ['block-1'] },
})
const resolver = new LoopResolver(workflow)
const loopScope = createLoopScope({ iteration: 2 })
const loopExecutions = new Map([['loop-1', loopScope]])
const ctx = createTestContext('block-1₍0₎', undefined, loopExecutions)
expect(resolver.resolve('<loop.iteration>', ctx)).toBe(2)
})
})
})

View File

@@ -0,0 +1,360 @@
import { loggerMock } from '@sim/testing'
import { describe, expect, it, vi } from 'vitest'
import { ParallelResolver } from './parallel'
import type { ResolutionContext } from './reference'
vi.mock('@/lib/logs/console/logger', () => loggerMock)
/**
* Creates a minimal workflow for testing.
*/
function createTestWorkflow(
parallels: Record<
string,
{
nodes: string[]
id?: string
distribution?: any
distributionItems?: any
parallelType?: 'count' | 'collection'
}
> = {}
) {
// Ensure each parallel has required fields
const normalizedParallels: Record<
string,
{
id: string
nodes: string[]
distribution?: any
distributionItems?: any
parallelType?: 'count' | 'collection'
}
> = {}
for (const [key, parallel] of Object.entries(parallels)) {
normalizedParallels[key] = {
id: parallel.id ?? key,
nodes: parallel.nodes,
distribution: parallel.distribution,
distributionItems: parallel.distributionItems,
parallelType: parallel.parallelType,
}
}
return {
version: '1.0',
blocks: [],
connections: [],
loops: {},
parallels: normalizedParallels,
}
}
/**
* Creates a parallel scope for runtime context.
*/
function createParallelScope(items: any[]) {
return {
parallelId: 'parallel-1',
totalBranches: items.length,
branchOutputs: new Map(),
completedCount: 0,
totalExpectedNodes: 1,
items,
}
}
/**
* Creates a minimal ResolutionContext for testing.
*/
function createTestContext(
currentNodeId: string,
parallelExecutions?: Map<string, any>
): ResolutionContext {
return {
executionContext: {
parallelExecutions: parallelExecutions ?? new Map(),
},
executionState: {},
currentNodeId,
} as ResolutionContext
}
describe('ParallelResolver', () => {
describe('canResolve', () => {
it.concurrent('should return true for parallel references', () => {
const resolver = new ParallelResolver(createTestWorkflow())
expect(resolver.canResolve('<parallel.index>')).toBe(true)
expect(resolver.canResolve('<parallel.currentItem>')).toBe(true)
expect(resolver.canResolve('<parallel.items>')).toBe(true)
})
it.concurrent('should return true for parallel references with nested paths', () => {
const resolver = new ParallelResolver(createTestWorkflow())
expect(resolver.canResolve('<parallel.currentItem.name>')).toBe(true)
expect(resolver.canResolve('<parallel.items.0>')).toBe(true)
})
it.concurrent('should return false for non-parallel references', () => {
const resolver = new ParallelResolver(createTestWorkflow())
expect(resolver.canResolve('<block.output>')).toBe(false)
expect(resolver.canResolve('<variable.myvar>')).toBe(false)
expect(resolver.canResolve('<loop.index>')).toBe(false)
expect(resolver.canResolve('plain text')).toBe(false)
expect(resolver.canResolve('{{ENV_VAR}}')).toBe(false)
})
it.concurrent('should return false for malformed references', () => {
const resolver = new ParallelResolver(createTestWorkflow())
expect(resolver.canResolve('parallel.index')).toBe(false)
expect(resolver.canResolve('<parallel.index')).toBe(false)
expect(resolver.canResolve('parallel.index>')).toBe(false)
})
})
describe('resolve index property', () => {
it.concurrent('should resolve branch index from node ID', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['a', 'b', 'c'] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.index>', ctx)).toBe(0)
})
it.concurrent('should resolve different branch indices', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['a', 'b', 'c'] },
})
const resolver = new ParallelResolver(workflow)
expect(resolver.resolve('<parallel.index>', createTestContext('block-1₍0₎'))).toBe(0)
expect(resolver.resolve('<parallel.index>', createTestContext('block-1₍1₎'))).toBe(1)
expect(resolver.resolve('<parallel.index>', createTestContext('block-1₍2₎'))).toBe(2)
})
it.concurrent('should return undefined when branch index cannot be extracted', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['a', 'b'] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1')
expect(resolver.resolve('<parallel.index>', ctx)).toBeUndefined()
})
})
describe('resolve currentItem property', () => {
it.concurrent('should resolve current item from array distribution', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['apple', 'banana', 'cherry'] },
})
const resolver = new ParallelResolver(workflow)
expect(resolver.resolve('<parallel.currentItem>', createTestContext('block-1₍0₎'))).toBe(
'apple'
)
expect(resolver.resolve('<parallel.currentItem>', createTestContext('block-1₍1₎'))).toBe(
'banana'
)
expect(resolver.resolve('<parallel.currentItem>', createTestContext('block-1₍2₎'))).toBe(
'cherry'
)
})
it.concurrent('should resolve current item from object distribution as entries', () => {
// When an object is used as distribution, it gets converted to entries [key, value]
const workflow = createTestWorkflow({
'parallel-1': {
nodes: ['block-1'],
distribution: { key1: 'value1', key2: 'value2' },
},
})
const resolver = new ParallelResolver(workflow)
const ctx0 = createTestContext('block-1₍0₎')
const ctx1 = createTestContext('block-1₍1₎')
const item0 = resolver.resolve('<parallel.currentItem>', ctx0)
const item1 = resolver.resolve('<parallel.currentItem>', ctx1)
// Object entries are returned as [key, value] tuples
expect(item0).toEqual(['key1', 'value1'])
expect(item1).toEqual(['key2', 'value2'])
})
it.concurrent('should resolve current item with nested path', () => {
const workflow = createTestWorkflow({
'parallel-1': {
nodes: ['block-1'],
distribution: [
{ name: 'Alice', age: 30 },
{ name: 'Bob', age: 25 },
],
},
})
const resolver = new ParallelResolver(workflow)
expect(resolver.resolve('<parallel.currentItem.name>', createTestContext('block-1₍0₎'))).toBe(
'Alice'
)
expect(resolver.resolve('<parallel.currentItem.age>', createTestContext('block-1₍1₎'))).toBe(
25
)
})
it.concurrent('should use runtime parallelScope items when available', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['static1', 'static2'] },
})
const resolver = new ParallelResolver(workflow)
const parallelScope = createParallelScope(['runtime1', 'runtime2', 'runtime3'])
const parallelExecutions = new Map([['parallel-1', parallelScope]])
const ctx = createTestContext('block-1₍1₎', parallelExecutions)
expect(resolver.resolve('<parallel.currentItem>', ctx)).toBe('runtime2')
})
})
describe('resolve items property', () => {
it.concurrent('should resolve all items from array distribution', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: [1, 2, 3] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.items>', ctx)).toEqual([1, 2, 3])
})
it.concurrent('should resolve items with nested path', () => {
const workflow = createTestWorkflow({
'parallel-1': {
nodes: ['block-1'],
distribution: [{ id: 1 }, { id: 2 }, { id: 3 }],
},
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.items.1>', ctx)).toEqual({ id: 2 })
expect(resolver.resolve('<parallel.items.1.id>', ctx)).toBe(2)
})
it.concurrent('should use runtime parallelScope items when available', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['static'] },
})
const resolver = new ParallelResolver(workflow)
const parallelScope = createParallelScope(['runtime1', 'runtime2'])
const parallelExecutions = new Map([['parallel-1', parallelScope]])
const ctx = createTestContext('block-1₍0₎', parallelExecutions)
expect(resolver.resolve('<parallel.items>', ctx)).toEqual(['runtime1', 'runtime2'])
})
})
describe('edge cases', () => {
it.concurrent(
'should return undefined for invalid parallel reference (missing property)',
() => {
const resolver = new ParallelResolver(createTestWorkflow())
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel>', ctx)).toBeUndefined()
}
)
it.concurrent('should return undefined for unknown parallel property', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: ['a'] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.unknownProperty>', ctx)).toBeUndefined()
})
it.concurrent('should return undefined when block is not in any parallel', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['other-block'], distribution: ['a'] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.index>', ctx)).toBeUndefined()
})
it.concurrent('should return undefined when parallel config not found', () => {
const workflow = createTestWorkflow({})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.index>', ctx)).toBeUndefined()
})
it.concurrent('should handle empty distribution array', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: [] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.items>', ctx)).toEqual([])
expect(resolver.resolve('<parallel.currentItem>', ctx)).toBeUndefined()
})
it.concurrent('should handle JSON string distribution', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: '["x", "y", "z"]' },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍1₎')
expect(resolver.resolve('<parallel.items>', ctx)).toEqual(['x', 'y', 'z'])
expect(resolver.resolve('<parallel.currentItem>', ctx)).toBe('y')
})
it.concurrent('should handle JSON string with single quotes', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: "['a', 'b']" },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.items>', ctx)).toEqual(['a', 'b'])
})
it.concurrent('should return empty array for reference strings', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distribution: '<block.output>' },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.items>', ctx)).toEqual([])
})
it.concurrent('should handle distributionItems property as fallback', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1'], distributionItems: ['fallback1', 'fallback2'] },
})
const resolver = new ParallelResolver(workflow)
const ctx = createTestContext('block-1₍0₎')
expect(resolver.resolve('<parallel.items>', ctx)).toEqual(['fallback1', 'fallback2'])
})
})
describe('nested parallel blocks', () => {
it.concurrent('should resolve for block with multiple parallel parents', () => {
const workflow = createTestWorkflow({
'parallel-1': { nodes: ['block-1', 'block-2'], distribution: ['p1', 'p2'] },
'parallel-2': { nodes: ['block-3'], distribution: ['p3', 'p4'] },
})
const resolver = new ParallelResolver(workflow)
expect(resolver.resolve('<parallel.currentItem>', createTestContext('block-1₍0₎'))).toBe('p1')
expect(resolver.resolve('<parallel.currentItem>', createTestContext('block-3₍1₎'))).toBe('p4')
})
})
})

View File

@@ -0,0 +1,200 @@
import { describe, expect, it } from 'vitest'
import { navigatePath } from './reference'
describe('navigatePath', () => {
describe('basic property access', () => {
it.concurrent('should access top-level property', () => {
const obj = { name: 'test', value: 42 }
expect(navigatePath(obj, ['name'])).toBe('test')
expect(navigatePath(obj, ['value'])).toBe(42)
})
it.concurrent('should access nested properties', () => {
const obj = { a: { b: { c: 'deep' } } }
expect(navigatePath(obj, ['a', 'b', 'c'])).toBe('deep')
})
it.concurrent('should return entire object for empty path', () => {
const obj = { name: 'test' }
expect(navigatePath(obj, [])).toEqual(obj)
})
it.concurrent('should handle deeply nested objects', () => {
const obj = { level1: { level2: { level3: { level4: { value: 'found' } } } } }
expect(navigatePath(obj, ['level1', 'level2', 'level3', 'level4', 'value'])).toBe('found')
})
})
describe('array indexing', () => {
it.concurrent('should access array elements with numeric string index', () => {
const obj = { items: ['a', 'b', 'c'] }
expect(navigatePath(obj, ['items', '0'])).toBe('a')
expect(navigatePath(obj, ['items', '1'])).toBe('b')
expect(navigatePath(obj, ['items', '2'])).toBe('c')
})
it.concurrent('should access array elements with bracket notation', () => {
const obj = { items: [{ name: 'first' }, { name: 'second' }] }
expect(navigatePath(obj, ['items[0]', 'name'])).toBe('first')
expect(navigatePath(obj, ['items[1]', 'name'])).toBe('second')
})
it.concurrent('should access nested arrays', () => {
const obj = {
matrix: [
[1, 2],
[3, 4],
[5, 6],
],
}
expect(navigatePath(obj, ['matrix', '0', '0'])).toBe(1)
expect(navigatePath(obj, ['matrix', '1', '1'])).toBe(4)
expect(navigatePath(obj, ['matrix', '2', '0'])).toBe(5)
})
it.concurrent('should access array element properties', () => {
const obj = {
users: [
{ id: 1, name: 'Alice' },
{ id: 2, name: 'Bob' },
],
}
expect(navigatePath(obj, ['users', '0', 'name'])).toBe('Alice')
expect(navigatePath(obj, ['users', '1', 'id'])).toBe(2)
})
})
describe('edge cases', () => {
it.concurrent('should return undefined for non-existent property', () => {
const obj = { name: 'test' }
expect(navigatePath(obj, ['nonexistent'])).toBeUndefined()
})
it.concurrent('should return undefined for path through null', () => {
const obj = { data: null }
expect(navigatePath(obj, ['data', 'value'])).toBeUndefined()
})
it.concurrent('should return undefined for path through undefined', () => {
const obj: Record<string, any> = { data: undefined }
expect(navigatePath(obj, ['data', 'value'])).toBeUndefined()
})
it.concurrent('should return null when accessing null property', () => {
const obj = { value: null }
expect(navigatePath(obj, ['value'])).toBeNull()
})
it.concurrent('should return undefined for out of bounds array access', () => {
const obj = { items: ['a', 'b'] }
expect(navigatePath(obj, ['items', '10'])).toBeUndefined()
})
it.concurrent('should return undefined when accessing array property on non-array', () => {
const obj = { data: 'string' }
expect(navigatePath(obj, ['data', '0'])).toBeUndefined()
})
it.concurrent('should handle empty object', () => {
const obj = {}
expect(navigatePath(obj, ['any'])).toBeUndefined()
})
it.concurrent('should handle object with empty string key', () => {
const obj = { '': 'empty key value' }
expect(navigatePath(obj, [''])).toBe('empty key value')
})
})
describe('mixed access patterns', () => {
it.concurrent('should handle complex nested structures', () => {
const obj = {
users: [
{
name: 'Alice',
addresses: [
{ city: 'NYC', zip: '10001' },
{ city: 'LA', zip: '90001' },
],
},
{
name: 'Bob',
addresses: [{ city: 'Chicago', zip: '60601' }],
},
],
}
expect(navigatePath(obj, ['users', '0', 'name'])).toBe('Alice')
expect(navigatePath(obj, ['users', '0', 'addresses', '1', 'city'])).toBe('LA')
expect(navigatePath(obj, ['users', '1', 'addresses', '0', 'zip'])).toBe('60601')
})
it.concurrent('should return undefined for numeric keys on non-array objects', () => {
// navigatePath treats numeric strings as array indices only for arrays
// For objects with numeric string keys, the numeric check takes precedence
// and returns undefined since the object is not an array
const obj = { data: { '0': 'zero', '1': 'one' } }
expect(navigatePath(obj, ['data', '0'])).toBeUndefined()
expect(navigatePath(obj, ['data', '1'])).toBeUndefined()
})
it.concurrent('should access non-numeric string keys', () => {
const obj = { data: { first: 'value1', second: 'value2' } }
expect(navigatePath(obj, ['data', 'first'])).toBe('value1')
expect(navigatePath(obj, ['data', 'second'])).toBe('value2')
})
})
describe('special value types', () => {
it.concurrent('should return boolean values', () => {
const obj = { active: true, disabled: false }
expect(navigatePath(obj, ['active'])).toBe(true)
expect(navigatePath(obj, ['disabled'])).toBe(false)
})
it.concurrent('should return numeric values including zero', () => {
const obj = { count: 0, value: -5, decimal: 3.14 }
expect(navigatePath(obj, ['count'])).toBe(0)
expect(navigatePath(obj, ['value'])).toBe(-5)
expect(navigatePath(obj, ['decimal'])).toBe(3.14)
})
it.concurrent('should return empty string', () => {
const obj = { text: '' }
expect(navigatePath(obj, ['text'])).toBe('')
})
it.concurrent('should return empty array', () => {
const obj = { items: [] }
expect(navigatePath(obj, ['items'])).toEqual([])
})
it.concurrent('should return function values', () => {
const fn = () => 'test'
const obj = { callback: fn }
expect(navigatePath(obj, ['callback'])).toBe(fn)
})
})
describe('bracket notation edge cases', () => {
it.concurrent('should handle bracket notation with property access', () => {
const obj = { data: [{ value: 100 }, { value: 200 }] }
expect(navigatePath(obj, ['data[0]'])).toEqual({ value: 100 })
})
it.concurrent('should return undefined for bracket notation on non-existent property', () => {
const obj = { data: [1, 2, 3] }
expect(navigatePath(obj, ['nonexistent[0]'])).toBeUndefined()
})
it.concurrent('should return undefined for bracket notation with null property', () => {
const obj = { data: null }
expect(navigatePath(obj, ['data[0]'])).toBeUndefined()
})
it.concurrent('should return undefined for bracket notation on non-array', () => {
const obj = { data: 'string' }
expect(navigatePath(obj, ['data[0]'])).toBeUndefined()
})
})
})

View File

@@ -0,0 +1,105 @@
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import { createLogger } from '@/lib/logs/console/logger'
import { API_ENDPOINTS } from '@/stores/constants'
const logger = createLogger('BYOKKeysQueries')
export type BYOKProviderId = 'openai' | 'anthropic' | 'google' | 'mistral'
export interface BYOKKey {
id: string
providerId: BYOKProviderId
maskedKey: string
createdBy: string | null
createdAt: string
updatedAt: string
}
export const byokKeysKeys = {
all: ['byok-keys'] as const,
workspace: (workspaceId: string) => [...byokKeysKeys.all, 'workspace', workspaceId] as const,
}
async function fetchBYOKKeys(workspaceId: string): Promise<BYOKKey[]> {
const response = await fetch(API_ENDPOINTS.WORKSPACE_BYOK_KEYS(workspaceId))
if (!response.ok) {
throw new Error(`Failed to load BYOK keys: ${response.statusText}`)
}
const { keys } = await response.json()
return keys
}
export function useBYOKKeys(workspaceId: string) {
return useQuery({
queryKey: byokKeysKeys.workspace(workspaceId),
queryFn: () => fetchBYOKKeys(workspaceId),
enabled: !!workspaceId,
staleTime: 60 * 1000,
placeholderData: keepPreviousData,
})
}
interface UpsertBYOKKeyParams {
workspaceId: string
providerId: BYOKProviderId
apiKey: string
}
export function useUpsertBYOKKey() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async ({ workspaceId, providerId, apiKey }: UpsertBYOKKeyParams) => {
const response = await fetch(API_ENDPOINTS.WORKSPACE_BYOK_KEYS(workspaceId), {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ providerId, apiKey }),
})
if (!response.ok) {
const data = await response.json().catch(() => ({}))
throw new Error(data.error || `Failed to save BYOK key: ${response.statusText}`)
}
logger.info(`Saved BYOK key for ${providerId} in workspace ${workspaceId}`)
return await response.json()
},
onSuccess: (_data, variables) => {
queryClient.invalidateQueries({
queryKey: byokKeysKeys.workspace(variables.workspaceId),
})
},
})
}
interface DeleteBYOKKeyParams {
workspaceId: string
providerId: BYOKProviderId
}
export function useDeleteBYOKKey() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async ({ workspaceId, providerId }: DeleteBYOKKeyParams) => {
const response = await fetch(API_ENDPOINTS.WORKSPACE_BYOK_KEYS(workspaceId), {
method: 'DELETE',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ providerId }),
})
if (!response.ok) {
const data = await response.json().catch(() => ({}))
throw new Error(data.error || `Failed to delete BYOK key: ${response.statusText}`)
}
logger.info(`Deleted BYOK key for ${providerId} from workspace ${workspaceId}`)
return await response.json()
},
onSuccess: (_data, variables) => {
queryClient.invalidateQueries({
queryKey: byokKeysKeys.workspace(variables.workspaceId),
})
},
})
}

View File

@@ -0,0 +1,184 @@
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import { createLogger } from '@/lib/logs/console/logger'
import { parseCronToHumanReadable } from '@/lib/workflows/schedules/utils'
const logger = createLogger('ScheduleQueries')
export const scheduleKeys = {
all: ['schedules'] as const,
schedule: (workflowId: string, blockId: string) =>
[...scheduleKeys.all, workflowId, blockId] as const,
}
export interface ScheduleData {
id: string
status: 'active' | 'disabled'
cronExpression: string | null
nextRunAt: string | null
lastRanAt: string | null
timezone: string
failedCount: number
}
export interface ScheduleInfo {
id: string
status: 'active' | 'disabled'
scheduleTiming: string
nextRunAt: string | null
lastRanAt: string | null
timezone: string
isDisabled: boolean
failedCount: number
}
/**
* Fetches schedule data for a specific workflow block
*/
async function fetchSchedule(workflowId: string, blockId: string): Promise<ScheduleData | null> {
const params = new URLSearchParams({ workflowId, blockId })
const response = await fetch(`/api/schedules?${params}`, {
cache: 'no-store',
headers: { 'Cache-Control': 'no-cache' },
})
if (!response.ok) {
return null
}
const data = await response.json()
return data.schedule || null
}
/**
* Hook to fetch schedule data for a workflow block
*/
export function useScheduleQuery(
workflowId: string | undefined,
blockId: string | undefined,
options?: { enabled?: boolean }
) {
return useQuery({
queryKey: scheduleKeys.schedule(workflowId ?? '', blockId ?? ''),
queryFn: () => fetchSchedule(workflowId!, blockId!),
enabled: !!workflowId && !!blockId && (options?.enabled ?? true),
staleTime: 30 * 1000, // 30 seconds
retry: false,
})
}
/**
* Hook to get processed schedule info with human-readable timing
*/
export function useScheduleInfo(
workflowId: string | undefined,
blockId: string | undefined,
blockType: string,
options?: { timezone?: string }
): {
scheduleInfo: ScheduleInfo | null
isLoading: boolean
refetch: () => void
} {
const isScheduleBlock = blockType === 'schedule'
const { data, isLoading, refetch } = useScheduleQuery(workflowId, blockId, {
enabled: isScheduleBlock,
})
if (!data) {
return { scheduleInfo: null, isLoading, refetch }
}
const timezone = options?.timezone || data.timezone || 'UTC'
const scheduleTiming = data.cronExpression
? parseCronToHumanReadable(data.cronExpression, timezone)
: 'Unknown schedule'
return {
scheduleInfo: {
id: data.id,
status: data.status,
scheduleTiming,
nextRunAt: data.nextRunAt,
lastRanAt: data.lastRanAt,
timezone,
isDisabled: data.status === 'disabled',
failedCount: data.failedCount || 0,
},
isLoading,
refetch,
}
}
/**
* Mutation to reactivate a disabled schedule
*/
export function useReactivateSchedule() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async ({
scheduleId,
workflowId,
blockId,
}: {
scheduleId: string
workflowId: string
blockId: string
}) => {
const response = await fetch(`/api/schedules/${scheduleId}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ action: 'reactivate' }),
})
if (!response.ok) {
throw new Error('Failed to reactivate schedule')
}
return { workflowId, blockId }
},
onSuccess: ({ workflowId, blockId }) => {
logger.info('Schedule reactivated', { workflowId, blockId })
queryClient.invalidateQueries({
queryKey: scheduleKeys.schedule(workflowId, blockId),
})
},
onError: (error) => {
logger.error('Failed to reactivate schedule', { error })
},
})
}
/**
* Mutation to redeploy a workflow (which recreates the schedule)
*/
export function useRedeployWorkflowSchedule() {
const queryClient = useQueryClient()
return useMutation({
mutationFn: async ({ workflowId, blockId }: { workflowId: string; blockId: string }) => {
const response = await fetch(`/api/workflows/${workflowId}/deploy`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ deployChatEnabled: false }),
})
if (!response.ok) {
const errorData = await response.json()
throw new Error(errorData.error || 'Failed to redeploy workflow')
}
return { workflowId, blockId }
},
onSuccess: ({ workflowId, blockId }) => {
logger.info('Workflow redeployed for schedule reset', { workflowId, blockId })
queryClient.invalidateQueries({
queryKey: scheduleKeys.schedule(workflowId, blockId),
})
},
onError: (error) => {
logger.error('Failed to redeploy workflow', { error })
},
})
}

View File

@@ -2,6 +2,7 @@ import { useCallback, useEffect, useRef } from 'react'
import type { Edge } from 'reactflow'
import { useSession } from '@/lib/auth/auth-client'
import { createLogger } from '@/lib/logs/console/logger'
import { DEFAULT_DUPLICATE_OFFSET } from '@/lib/workflows/autolayout/constants'
import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs'
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
import { useSocket } from '@/app/workspace/providers/socket-provider'
@@ -1326,8 +1327,8 @@ export function useCollaborativeWorkflow() {
// Generate new ID and calculate position
const newId = crypto.randomUUID()
const offsetPosition = {
x: sourceBlock.position.x + 250,
y: sourceBlock.position.y + 20,
x: sourceBlock.position.x + DEFAULT_DUPLICATE_OFFSET.x,
y: sourceBlock.position.y + DEFAULT_DUPLICATE_OFFSET.y,
}
const newName = getUniqueBlockName(sourceBlock.name, workflowStore.blocks)

Some files were not shown because too many files have changed in this diff Show More