Compare commits

..

40 Commits

Author SHA1 Message Date
Waleed
2175fd1106 v0.4.4: database config updates 2025-10-02 20:08:09 -07:00
Waleed
10692b5e5a fix(db): remove overly complex db connection logic (#1538) 2025-10-02 19:54:32 -07:00
Waleed
62298bf094 fix(db): added database config to drizzle.config in app container (#1536) 2025-10-02 18:09:27 -07:00
Waleed
5f1518ffd9 fix(db): added SSL config to migrations container (#1535) 2025-10-02 18:04:31 -07:00
Waleed
cae0e85826 v0.4.3: posthog, docs updates, search modal improvements 2025-10-02 17:02:48 -07:00
Waleed
fa9c97816b fix(db): add more options for SSL connection, add envvar for base64 db cert (#1533) 2025-10-02 15:53:45 -07:00
Vikhyath Mondreti
4bc37db547 feat(copilot): JSON sanitization logic + operations sequence diff correctness (#1521)
* add state sending capability

* progress

* add ability to add title and description to workflow state

* progress in language

* fix

* cleanup code

* fix type issue

* fix subflow deletion case

* Workflow console tool

* fix lint

---------

Co-authored-by: Siddharth Ganesan <siddharthganesan@gmail.com>
2025-10-02 15:11:03 -07:00
Waleed
15138629cb improvement(performance): remove writes to workflow updated_at on position updates for blocks, edges, & subflows (#1531)
* improvement(performance): remove writes to workflow updated_at on position updates for blocks, edges, & subflows

* update query pattern for logs routes
2025-10-02 11:53:50 -07:00
Waleed
ace83ebcae feat(cmdk): added knowledgebases to the cmdk modal (#1530) 2025-10-01 21:21:42 -07:00
Waleed
b33ae5bff9 fix(fumadocs): fixed client-side export on fumadocs (#1529) 2025-10-01 20:52:20 -07:00
Waleed
dc6052578d fix(kb): removed filename constraint from knowledgebase doc names (#1527) 2025-10-01 20:39:56 -07:00
Waleed
4adbae03e7 chore(deps): update fumadocs (#1525) 2025-10-01 20:28:12 -07:00
Vikhyath Mondreti
3509ce8ce4 fix(autolayout): type issue if workflow deployed + remove dead state code (#1524)
* fix(autolayout): type issue if workflow deployed

* remove dead code hasActiveWebhook field
2025-10-01 20:18:29 -07:00
Waleed
7aae108b87 feat(posthog): added posthog for analytics (#1523)
* feat(posthog): added posthog for analytics

* added envvars to env.ts
2025-10-01 20:12:26 -07:00
Waleed
980a6d8347 improvement(db): enforce SSL everywhere where a DB connection is established (#1522)
* improvement(db): enforce SSL everywhere where a DB connection is established

* remove extraneous comment
2025-10-01 19:09:08 -07:00
Vikhyath Mondreti
745eaff622 v0.4.2: autolayout improvements, variable resolution, CI/CD, deployed chat, router block fixes 2025-10-01 17:27:35 -07:00
Vikhyath Mondreti
35d857ef2e fix(trigger): inject project id env var in correctly (#1520) 2025-10-01 17:16:28 -07:00
Waleed
6e63eafb79 improvement(db): remove vercel, remove railway, remove crons, improve DB connection config (#1519)
* improvement(db): remove vercel, remove railway, remove crons, improve DB connection config

* remove NEXT_PUBLIC_VERCEL_URL

* remove db url fallbacks

* remove railway & more vercel stuff

---------

Co-authored-by: waleed <waleed>
2025-10-01 16:37:13 -07:00
Waleed
896f7bb0a0 fix(ci): update trigger.dev ci to only push to staging on merge to staging & for prod as well (#1518) 2025-10-01 13:22:04 -07:00
Waleed
97f69a24e1 fix(redirects): update middleware to allow access to /chat regardless of auth status (#1516) 2025-10-01 10:46:18 -07:00
Waleed
1a2c4040aa improvement(trigger): increase maxDuration for background tasks to 10 min to match sync API executions (#1504)
* improvement(trigger): increase maxDuration for background tasks to 10 min to match sync API executions

* add trigger proj id
2025-10-01 10:40:18 -07:00
Vikhyath Mondreti
4ad9be0836 fix(router): use getBaseUrl() helper (#1515)
* fix(router): use getBaseUrl() helper

* add existence check
2025-10-01 10:39:57 -07:00
Vikhyath Mondreti
0bf2bce368 improvement(var-resolution): resolve variables with block name check and consolidate code (#1469)
* improvement(var-resolution): resolve variables with block name check and consolidate code

* fix tests

* fix type error

* fix var highlighting in kb tags

* fix kb tags
2025-09-30 19:20:35 -07:00
Vikhyath Mondreti
0d881ecc00 fix(deployed-version-check): check deployed version existence pre-queuing (#1508)
* fix(deployed-version-check): check deployed version existence pre-queuing

* fix tests

* fix edge case
2025-09-30 19:20:21 -07:00
Siddharth Ganesan
7e6a5dc7e2 Fix/remove trigger promotion (#1513)
* Revert trigger promotion

* Move trigger

* Fix ci
2025-09-30 18:29:28 -07:00
Siddharth Ganesan
c1a3500bde fix(ci): capture correct deployment version output (#1512)
* Capture correct deployment version output

* Add trigger access token to each step

* Use correct arn
2025-09-30 16:36:19 -07:00
Siddharth Ganesan
561b6f2778 fix(ci): fix trigger version capture 2025-09-30 16:20:25 -07:00
Siddharth Ganesan
cdfee16b8a Fix trigger ci creds (#1510) 2025-09-30 14:03:38 -07:00
Siddharth Ganesan
9f6cb1becf fix(ci): trigger permissions 2025-09-30 13:53:02 -07:00
Siddharth Ganesan
dca8745c44 fix(ci): add skip promotion to trigger ci 2025-09-30 13:37:07 -07:00
Vikhyath Mondreti
c35c8d1f31 improvement(autolayout): use live block heights / widths for autolayout to prevent overlaps (#1505)
* improvement(autolayout): use live block heights / widths for autolayout to prevent overlaps

* improve layering algo for multiple trigger setting

* remove console logs

* add type annotation
2025-09-30 13:24:19 -07:00
Siddharth Ganesan
87c00cec6d improvement(ci): trigger.dev pushes (#1506)
* Fix trigger workflow ci

* Update trigger location
2025-09-30 13:22:24 -07:00
Vikhyath Mondreti
17edf0405b improvement(triggers): uuid, autolayout, copilot context (#1503)
* make trigger select uuid consistent with sidebar selection

* add trigger allowed flag for core triggers

* fix autolayout with new triggers
2025-09-30 11:31:54 -07:00
Siddharth Ganesan
79461840c3 fix(migrations): make sso migration idempotent 2025-09-30 11:04:44 -07:00
Siddharth Ganesan
e76fc8c2da Remove migrations ci (#1501) 2025-09-30 10:43:41 -07:00
Waleed
e9150a53e3 feat(i18n): update translations (#1496) 2025-09-30 09:44:50 -07:00
Waleed
f9f84111cb v0.4.1: docker fixes, deployed state improvements 2025-09-30 01:16:28 -07:00
Waleed
01ffee8e7c fix(deployed): support internal JWT for deployed child workflow executions (#1498) 2025-09-30 01:14:32 -07:00
Siddharth Ganesan
367189fe15 fix(ci): fix docker manifest build (#1495) 2025-09-29 20:57:00 -07:00
Siddharth Ganesan
7de9e5fb19 fix(ci): fix docker manifest build 2025-09-29 20:55:52 -07:00
118 changed files with 3204 additions and 1730 deletions

View File

@@ -10,7 +10,6 @@ services:
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio
- POSTGRES_URL=postgresql://postgres:postgres@db:5432/simstudio
- BETTER_AUTH_URL=http://localhost:3000
- NEXT_PUBLIC_APP_URL=http://localhost:3000
- BUN_INSTALL_CACHE_DIR=/home/bun/.bun/cache

View File

@@ -16,43 +16,200 @@ jobs:
uses: ./.github/workflows/test-build.yml
secrets: inherit
# Build and push images (ECR for staging, ECR + GHCR for main)
build-images:
name: Build Images
# Build AMD64 images and push to ECR immediately (+ GHCR for main)
build-amd64:
name: Build AMD64
needs: test-build
if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging')
uses: ./.github/workflows/images.yml
secrets: inherit
runs-on: blacksmith-4vcpu-ubuntu-2404
permissions:
contents: read
packages: write
id-token: write
strategy:
fail-fast: false
matrix:
include:
- dockerfile: ./docker/app.Dockerfile
ghcr_image: ghcr.io/simstudioai/simstudio
ecr_repo_secret: ECR_APP
- dockerfile: ./docker/db.Dockerfile
ghcr_image: ghcr.io/simstudioai/migrations
ecr_repo_secret: ECR_MIGRATIONS
- dockerfile: ./docker/realtime.Dockerfile
ghcr_image: ghcr.io/simstudioai/realtime
ecr_repo_secret: ECR_REALTIME
steps:
- name: Checkout code
uses: actions/checkout@v4
# Deploy Trigger.dev (after builds complete)
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ github.ref == 'refs/heads/main' && secrets.AWS_ROLE_TO_ASSUME || secrets.STAGING_AWS_ROLE_TO_ASSUME }}
aws-region: ${{ github.ref == 'refs/heads/main' && secrets.AWS_REGION || secrets.STAGING_AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
if: github.ref == 'refs/heads/main'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: useblacksmith/setup-docker-builder@v1
- name: Generate tags
id: meta
run: |
ECR_REGISTRY="${{ steps.login-ecr.outputs.registry }}"
ECR_REPO="${{ secrets[matrix.ecr_repo_secret] }}"
GHCR_IMAGE="${{ matrix.ghcr_image }}"
# ECR tags (always build for ECR)
if [ "${{ github.ref }}" = "refs/heads/main" ]; then
ECR_TAG="latest"
else
ECR_TAG="staging"
fi
ECR_IMAGE="${ECR_REGISTRY}/${ECR_REPO}:${ECR_TAG}"
# Build tags list
TAGS="${ECR_IMAGE}"
# Add GHCR tags only for main branch
if [ "${{ github.ref }}" = "refs/heads/main" ]; then
GHCR_AMD64="${GHCR_IMAGE}:latest-amd64"
GHCR_SHA="${GHCR_IMAGE}:${{ github.sha }}-amd64"
TAGS="${TAGS},$GHCR_AMD64,$GHCR_SHA"
fi
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
- name: Build and push images
uses: useblacksmith/build-push-action@v2
with:
context: .
file: ${{ matrix.dockerfile }}
platforms: linux/amd64
push: true
tags: ${{ steps.meta.outputs.tags }}
provenance: false
sbom: false
# Build ARM64 images for GHCR (main branch only, runs in parallel)
build-ghcr-arm64:
name: Build ARM64 (GHCR Only)
needs: test-build
runs-on: linux-arm64-8-core
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
include:
- dockerfile: ./docker/app.Dockerfile
image: ghcr.io/simstudioai/simstudio
- dockerfile: ./docker/db.Dockerfile
image: ghcr.io/simstudioai/migrations
- dockerfile: ./docker/realtime.Dockerfile
image: ghcr.io/simstudioai/realtime
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: useblacksmith/setup-docker-builder@v1
- name: Generate ARM64 tags
id: meta
run: |
IMAGE="${{ matrix.image }}"
echo "tags=${IMAGE}:latest-arm64,${IMAGE}:${{ github.sha }}-arm64" >> $GITHUB_OUTPUT
- name: Build and push ARM64 to GHCR
uses: useblacksmith/build-push-action@v2
with:
context: .
file: ${{ matrix.dockerfile }}
platforms: linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
provenance: false
sbom: false
# Create GHCR multi-arch manifests (only for main, after both builds)
create-ghcr-manifests:
name: Create GHCR Manifests
runs-on: blacksmith-4vcpu-ubuntu-2404
needs: [build-amd64, build-ghcr-arm64]
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
permissions:
packages: write
strategy:
matrix:
include:
- image: ghcr.io/simstudioai/simstudio
- image: ghcr.io/simstudioai/migrations
- image: ghcr.io/simstudioai/realtime
steps:
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push manifests
run: |
IMAGE_BASE="${{ matrix.image }}"
# Create latest manifest
docker manifest create "${IMAGE_BASE}:latest" \
"${IMAGE_BASE}:latest-amd64" \
"${IMAGE_BASE}:latest-arm64"
docker manifest push "${IMAGE_BASE}:latest"
# Create SHA manifest
docker manifest create "${IMAGE_BASE}:${{ github.sha }}" \
"${IMAGE_BASE}:${{ github.sha }}-amd64" \
"${IMAGE_BASE}:${{ github.sha }}-arm64"
docker manifest push "${IMAGE_BASE}:${{ github.sha }}"
# Deploy Trigger.dev (after ECR images are pushed, runs in parallel with process-docs)
trigger-deploy:
name: Deploy Trigger.dev
needs: build-images
needs: build-amd64
if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging')
uses: ./.github/workflows/trigger-deploy.yml
secrets: inherit
# Run database migrations (depends on build completion and trigger deployment)
migrations:
name: Apply Database Migrations
needs: [build-images, trigger-deploy]
if: |
always() &&
github.event_name == 'push' &&
(github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging') &&
needs.build-images.result == 'success' &&
needs.trigger-deploy.result == 'success'
uses: ./.github/workflows/migrations.yml
secrets: inherit
# Process docs embeddings if needed
# Process docs embeddings (after ECR images are pushed, runs in parallel with trigger-deploy)
process-docs:
name: Process Docs
needs: migrations
needs: build-amd64
if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging')
uses: ./.github/workflows/docs-embeddings.yml
secrets: inherit

View File

@@ -129,12 +129,9 @@ jobs:
- name: Generate ARM64 tags
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ matrix.image }}
tags: |
type=raw,value=latest-arm64
type=sha,format=long,suffix=-arm64
run: |
IMAGE="${{ matrix.image }}"
echo "tags=${IMAGE}:latest-arm64,${IMAGE}:${{ github.sha }}-arm64" >> $GITHUB_OUTPUT
- name: Build and push ARM64 to GHCR
uses: useblacksmith/build-push-action@v2

View File

@@ -13,6 +13,7 @@ jobs:
cancel-in-progress: false
env:
TRIGGER_ACCESS_TOKEN: ${{ secrets.TRIGGER_ACCESS_TOKEN }}
TRIGGER_PROJECT_ID: ${{ secrets.TRIGGER_PROJECT_ID }}
steps:
- name: Checkout code
@@ -39,4 +40,5 @@ jobs:
- name: Deploy to Trigger.dev (Production)
if: github.ref == 'refs/heads/main'
working-directory: ./apps/sim
run: npx --yes trigger.dev@4.0.4 deploy
run: npx --yes trigger.dev@4.0.4 deploy

View File

@@ -1,7 +1,7 @@
import type { ReactNode } from 'react'
import { defineI18nUI } from 'fumadocs-ui/i18n'
import { DocsLayout } from 'fumadocs-ui/layouts/docs'
import { RootProvider } from 'fumadocs-ui/provider'
import { RootProvider } from 'fumadocs-ui/provider/next'
import { ExternalLink, GithubIcon } from 'lucide-react'
import { Inter } from 'next/font/google'
import Image from 'next/image'

View File

@@ -175,56 +175,30 @@ Verwenden Sie einen `Memory`Block mit einer konsistenten `id` (zum Beispiel `cha
- Lesen Sie den Gesprächsverlauf für den Kontext
- Hängen Sie die Antwort des Agenten nach dessen Ausführung an
```yaml
# 1) Add latest user message
- Memory (operation: add)
id: chat
role: user
content: {{input}}
# 2) Load conversation history
- Memory (operation: get)
id: chat
# 3) Run the agent with prior messages available
- Agent
System Prompt: ...
User Prompt: |
Use the conversation so far:
{{memory_get.memories}}
Current user message: {{input}}
# 4) Store the agent reply
- Memory (operation: add)
id: chat
role: assistant
content: {{agent.content}}
```
Siehe die `Memory`Block-Referenz für Details: [/tools/memory](/tools/memory).
Siehe die [`Memory`](/tools/memory) Blockreferenz für Details.
## Eingaben und Ausgaben
<Tabs items={['Configuration', 'Variables', 'Results']}>
<Tabs items={['Konfiguration', 'Variablen', 'Ergebnisse']}>
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>System Prompt</strong>: Anweisungen, die das Verhalten und die Rolle des Agenten definieren
<strong>System-Prompt</strong>: Anweisungen, die das Verhalten und die Rolle des Agenten definieren
</li>
<li>
<strong>User Prompt</strong>: Eingabetext oder -daten zur Verarbeitung
<strong>Benutzer-Prompt</strong>: Eingabetext oder zu verarbeitende Daten
</li>
<li>
<strong>Model</strong>: KI-Modellauswahl (OpenAI, Anthropic, Google, etc.)
<strong>Modell</strong>: KI-Modellauswahl (OpenAI, Anthropic, Google, usw.)
</li>
<li>
<strong>Temperature</strong>: Steuerung der Antwort-Zufälligkeit (0-2)
<strong>Temperatur</strong>: Steuerung der Zufälligkeit der Antwort (0-2)
</li>
<li>
<strong>Tools</strong>: Array verfügbarer Tools für Funktionsaufrufe
</li>
<li>
<strong>Response Format</strong>: JSON-Schema für strukturierte Ausgabe
<strong>Antwortformat</strong>: JSON-Schema für strukturierte Ausgabe
</li>
</ul>
</Tab>
@@ -261,7 +235,7 @@ Siehe die `Memory`Block-Referenz für Details: [/tools/memory](/tools/memory).
## Beispielanwendungsfälle
### Automatisierung des Kundendienstes
### Automatisierung des Kundenservice
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">Szenario: Bearbeitung von Kundenanfragen mit Datenbankzugriff</h4>
@@ -269,9 +243,9 @@ Siehe die `Memory`Block-Referenz für Details: [/tools/memory](/tools/memory).
<li>Benutzer reicht ein Support-Ticket über den API-Block ein</li>
<li>Agent prüft Bestellungen/Abonnements in Postgres und durchsucht die Wissensdatenbank nach Anleitungen</li>
<li>Falls eine Eskalation erforderlich ist, erstellt der Agent ein Linear-Ticket mit relevantem Kontext</li>
<li>Agent verfasst eine klare E-Mail-Antwort</li>
<li>Agent erstellt eine klare E-Mail-Antwort</li>
<li>Gmail sendet die Antwort an den Kunden</li>
<li>Konversation wird im Speicher gesichert, um den Verlauf für zukünftige Nachrichten zu erhalten</li>
<li>Konversation wird im Memory gespeichert, um den Verlauf für zukünftige Nachrichten beizubehalten</li>
</ol>
</div>
@@ -287,20 +261,20 @@ Siehe die `Memory`Block-Referenz für Details: [/tools/memory](/tools/memory).
</ol>
</div>
### Werkzeuggestützter Rechercheassistent
### Werkzeuggestützter Forschungsassistent
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">Szenario: Rechercheassistent mit Websuche und Dokumentenzugriff</h4>
<h4 className="font-medium">Szenario: Forschungsassistent mit Websuche und Dokumentenzugriff</h4>
<ol className="list-decimal pl-5 text-sm">
<li>Benutzeranfrage über Eingabe erhalten</li>
<li>Agent durchsucht das Web mit dem Google Search-Tool</li>
<li>Agent durchsucht das Web mit dem Google-Suchwerkzeug</li>
<li>Agent greift auf Notion-Datenbank für interne Dokumente zu</li>
<li>Agent erstellt umfassenden Recherchebericht</li>
<li>Agent erstellt umfassenden Forschungsbericht</li>
</ol>
</div>
## Best Practices
## Bewährte Praktiken
- **Sei spezifisch in System-Prompts**: Definiere die Rolle, den Tonfall und die Einschränkungen des Agenten klar. Je spezifischer deine Anweisungen sind, desto besser kann der Agent seinen vorgesehenen Zweck erfüllen.
- **Sei spezifisch in System-Prompts**: Definiere die Rolle, den Ton und die Einschränkungen des Agenten klar. Je spezifischer deine Anweisungen sind, desto besser kann der Agent seinen vorgesehenen Zweck erfüllen.
- **Wähle die richtige Temperatureinstellung**: Verwende niedrigere Temperatureinstellungen (0-0,3), wenn Genauigkeit wichtig ist, oder erhöhe die Temperatur (0,7-2,0) für kreativere oder abwechslungsreichere Antworten
- **Nutze Tools effektiv**: Integriere Tools, die den Zweck des Agenten ergänzen und seine Fähigkeiten erweitern. Sei selektiv bei der Auswahl der Tools, um den Agenten nicht zu überfordern. Für Aufgaben mit wenig Überschneidung verwende einen anderen Agent-Block für die besten Ergebnisse.
- **Nutze Werkzeuge effektiv**: Integriere Werkzeuge, die den Zweck des Agenten ergänzen und seine Fähigkeiten verbessern. Sei selektiv bei der Auswahl der Werkzeuge, um den Agenten nicht zu überfordern. Für Aufgaben mit wenig Überschneidung verwende einen anderen Agent-Block für die besten Ergebnisse.

View File

@@ -24,15 +24,7 @@ Der API-Trigger stellt Ihren Workflow als sicheren HTTP-Endpunkt bereit. Senden
Fügen Sie für jeden Parameter ein Feld **Eingabeformat** hinzu. Die Ausgabeschlüssel zur Laufzeit spiegeln das Schema wider und sind auch unter `<api.input>` verfügbar.
```yaml
- type: string
name: userId
value: demo-user # optional manual test value
- type: number
name: maxTokens
```
Manuelle Ausführungen im Editor verwenden die Spalte `value`, damit Sie testen können, ohne eine Anfrage zu senden. Während der Ausführung füllt der Resolver sowohl `<api.userId>` als auch `<api.input.userId>`.
Manuelle Ausführungen im Editor verwenden die Spalte `value`, damit Sie testen können, ohne eine Anfrage zu senden. Während der Ausführung füllt der Resolver sowohl `<api.userId>` als auch `<api.input.userId>` aus.
## Anfrage-Beispiel
@@ -56,5 +48,5 @@ Erfolgreiche Antworten geben das serialisierte Ausführungsergebnis vom Executor
Wenn kein Eingabeformat definiert ist, stellt der Executor das rohe JSON nur unter `<api.input>` bereit.
<Callout type="warning">
Ein Workflow kann nur einen API-Trigger enthalten. Veröffentlichen Sie eine neue Bereitstellung nach Änderungen, damit der Endpunkt aktuell bleibt.
Ein Workflow kann nur einen API-Trigger enthalten. Veröffentlichen Sie nach Änderungen eine neue Bereitstellung, damit der Endpunkt aktuell bleibt.
</Callout>

View File

@@ -10,7 +10,6 @@ type: object
required:
- type
- name
- inputs
- connections
properties:
type:
@@ -22,21 +21,23 @@ properties:
description: Display name for this loop block
inputs:
type: object
required:
- loopType
description: Optional. If omitted, defaults will be applied.
properties:
loopType:
type: string
enum: [for, forEach]
description: Type of loop to execute
default: for
iterations:
type: number
description: Number of iterations (for 'for' loops)
default: 5
minimum: 1
maximum: 1000
collection:
type: string
description: Collection to iterate over (for 'forEach' loops)
default: ""
maxConcurrency:
type: number
description: Maximum concurrent executions
@@ -45,13 +46,10 @@ properties:
maximum: 10
connections:
type: object
required:
- loop
properties:
# Nested format (recommended)
loop:
type: object
required:
- start
properties:
start:
type: string
@@ -59,26 +57,37 @@ properties:
end:
type: string
description: Target block ID for loop completion (optional)
# Direct handle format (alternative)
loop-start-source:
type: string | string[]
description: Target block ID to execute inside the loop (direct format)
loop-end-source:
type: string | string[]
description: Target block ID for loop completion (direct format, optional)
error:
type: string
description: Target block ID for error handling
note: Use either the nested 'loop' format OR the direct 'loop-start-source' format, not both
```
## Verbindungskonfiguration
Loop-Blöcke verwenden ein spezielles Verbindungsformat mit einem `loop`Abschnitt:
Loop-Blöcke unterstützen zwei Verbindungsformate:
### Direktes Handle-Format (Alternative)
```yaml
connections:
loop:
start: <string> # Target block ID to execute inside the loop
end: <string> # Target block ID after loop completion (optional)
loop-start-source: <string> # Target block ID to execute inside the loop
loop-end-source: <string> # Target block ID after loop completion (optional)
error: <string> # Target block ID for error handling (optional)
```
Beide Formate funktionieren identisch. Verwenden Sie das Format, das Ihnen besser gefällt.
## Konfiguration von untergeordneten Blöcken
Blöcke innerhalb einer Schleife müssen ihre `parentId` auf die Loop-Block-ID setzen:
Blöcke innerhalb einer Schleife müssen ihre `parentId` auf die Loop-Block-ID gesetzt haben. Die Eigenschaft `extent` wird automatisch auf `'parent'` gesetzt und muss nicht angegeben werden:
```yaml
loop-1:
@@ -106,7 +115,7 @@ process-item:
## Beispiele
### For-Schleife (feste Iterationen)
### For-Schleife (feste Anzahl von Iterationen)
```yaml
countdown-loop:
@@ -227,7 +236,7 @@ store-analysis:
};
```
### Schleife mit paralleler Verarbeitung
### Schleife für parallele Verarbeitung
```yaml
parallel-processing-loop:
@@ -261,9 +270,62 @@ process-task:
success: task-completed
```
### Beispiel für direktes Handle-Format
Dieselbe Schleife kann mit dem direkten Handle-Format geschrieben werden:
```yaml
my-loop:
type: loop
name: "Process Items"
inputs:
loopType: forEach
collection: <start.items>
connections:
loop-start-source: process-item # Direct handle format
loop-end-source: final-results # Direct handle format
error: handle-error
process-item:
type: agent
name: "Process Item"
parentId: my-loop
inputs:
systemPrompt: "Process this item"
userPrompt: <loop.currentItem>
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
### Minimales Schleifenbeispiel (mit Standardwerten)
Sie können den Abschnitt `inputs` vollständig weglassen, dann werden Standardwerte angewendet:
```yaml
simple-loop:
type: loop
name: "Simple Loop"
# No inputs section - defaults to loopType: 'for', iterations: 5
connections:
loop-start-source: process-step
loop-end-source: complete
process-step:
type: agent
name: "Process Step"
parentId: simple-loop
inputs:
systemPrompt: "Execute step"
userPrompt: "Step <loop.index>"
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
Diese Schleife führt standardmäßig 5 Iterationen aus.
## Schleifenvariablen
Innerhalb von untergeordneten Schleifenblöcken sind diese speziellen Variablen verfügbar:
Innerhalb von Schleifenunterblöcken sind diese speziellen Variablen verfügbar:
```yaml
# Available in all child blocks of the loop
@@ -290,6 +352,6 @@ final-processor:
- Verwenden Sie forEach für die Verarbeitung von Sammlungen, for-Schleifen für feste Iterationen
- Erwägen Sie die Verwendung von maxConcurrency für I/O-gebundene Operationen
- Integrieren Sie Fehlerbehandlung für eine robuste Schleifenausführung
- Verwenden Sie aussagekräftige Namen für Schleifen-Unterblöcke
- Verwenden Sie aussagekräftige Namen für Schleifenunterblöcke
- Testen Sie zuerst mit kleinen Sammlungen
- Überwachen Sie die Ausführungszeit bei großen Sammlungen
- Überwachen Sie die Ausführungszeit für große Sammlungen

View File

@@ -175,33 +175,7 @@ Utiliza un bloque `Memory` con un `id` consistente (por ejemplo, `chat`) para pe
- Lee el historial de conversación para contexto
- Añade la respuesta del agente después de que se ejecute
```yaml
# 1) Add latest user message
- Memory (operation: add)
id: chat
role: user
content: {{input}}
# 2) Load conversation history
- Memory (operation: get)
id: chat
# 3) Run the agent with prior messages available
- Agent
System Prompt: ...
User Prompt: |
Use the conversation so far:
{{memory_get.memories}}
Current user message: {{input}}
# 4) Store the agent reply
- Memory (operation: add)
id: chat
role: assistant
content: {{agent.content}}
```
Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/tools/memory).
Consulta la referencia del bloque [`Memory`](/tools/memory) para más detalles.
## Entradas y salidas
@@ -212,7 +186,7 @@ Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/
<strong>Prompt del sistema</strong>: Instrucciones que definen el comportamiento y rol del agente
</li>
<li>
<strong>Prompt del usuario</strong>: Texto de entrada o datos a procesar
<strong>Prompt del usuario</strong>: Texto o datos de entrada para procesar
</li>
<li>
<strong>Modelo</strong>: Selección del modelo de IA (OpenAI, Anthropic, Google, etc.)
@@ -231,13 +205,13 @@ Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>agent.content</strong>: Texto de respuesta o datos estructurados del agente
<strong>agent.content</strong>: Texto de respuesta del agente o datos estructurados
</li>
<li>
<strong>agent.tokens</strong>: Objeto de estadísticas de uso de tokens
<strong>agent.tokens</strong>: Objeto con estadísticas de uso de tokens
</li>
<li>
<strong>agent.tool_calls</strong>: Array de detalles de ejecución de herramientas
<strong>agent.tool_calls</strong>: Array con detalles de ejecución de herramientas
</li>
<li>
<strong>agent.cost</strong>: Costo estimado de la llamada a la API (si está disponible)
@@ -247,7 +221,7 @@ Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>Contenido</strong>: Salida de respuesta principal del agente
<strong>Contenido</strong>: Salida principal de respuesta del agente
</li>
<li>
<strong>Metadatos</strong>: Estadísticas de uso y detalles de ejecución
@@ -267,15 +241,15 @@ Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/
<h4 className="font-medium">Escenario: Gestionar consultas de clientes con acceso a base de datos</h4>
<ol className="list-decimal pl-5 text-sm">
<li>El usuario envía un ticket de soporte a través del bloque API</li>
<li>El agente verifica pedidos/suscripciones en Postgres y busca en la base de conocimientos para obtener orientación</li>
<li>El agente verifica pedidos/suscripciones en Postgres y busca en la base de conocimientos</li>
<li>Si se necesita escalamiento, el agente crea una incidencia en Linear con el contexto relevante</li>
<li>El agente redacta una respuesta clara por correo electrónico</li>
<li>Gmail envía la respuesta al cliente</li>
<li>La conversación se guarda en Memoria para mantener el historial para mensajes futuros</li>
<li>La conversación se guarda en Memory para mantener el historial para mensajes futuros</li>
</ol>
</div>
### Análisis de contenido multi-modelo
### Análisis de contenido con múltiples modelos
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">Escenario: Analizar contenido con diferentes modelos de IA</h4>
@@ -287,13 +261,13 @@ Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/
</ol>
</div>
### Asistente de investigación con herramientas
### Asistente de investigación potenciado por herramientas
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">Escenario: Asistente de investigación con búsqueda web y acceso a documentos</h4>
<ol className="list-decimal pl-5 text-sm">
<li>Consulta del usuario recibida a través de entrada</li>
<li>El agente busca en la web usando la herramienta de Google Search</li>
<li>El agente busca en la web utilizando la herramienta de Google Search</li>
<li>El agente accede a la base de datos de Notion para documentos internos</li>
<li>El agente compila un informe de investigación completo</li>
</ol>
@@ -301,6 +275,6 @@ Consulta la referencia del bloque `Memory` para más detalles: [/tools/memory](/
## Mejores prácticas
- **Sé específico en los prompts del sistema**: Define claramente el rol, tono y limitaciones del agente. Cuanto más específicas sean tus instrucciones, mejor podrá el agente cumplir con su propósito previsto.
- **Sé específico en los prompts del sistema**: Define claramente el rol del agente, el tono y las limitaciones. Cuanto más específicas sean tus instrucciones, mejor podrá el agente cumplir con su propósito previsto.
- **Elige la configuración de temperatura adecuada**: Usa configuraciones de temperatura más bajas (0-0.3) cuando la precisión es importante, o aumenta la temperatura (0.7-2.0) para respuestas más creativas o variadas
- **Aprovecha las herramientas de manera efectiva**: Integra herramientas que complementen el propósito del agente y mejoren sus capacidades. Sé selectivo sobre qué herramientas proporcionas para evitar sobrecargar al agente. Para tareas con poca superposición, usa otro bloque de Agente para obtener los mejores resultados.
- **Aprovecha las herramientas de manera efectiva**: Integra herramientas que complementen el propósito del agente y mejoren sus capacidades. Sé selectivo sobre qué herramientas proporcionas para evitar sobrecargar al agente. Para tareas con poco solapamiento, usa otro bloque de Agente para obtener los mejores resultados.

View File

@@ -24,14 +24,6 @@ El disparador de API expone tu flujo de trabajo como un punto de conexión HTTP
Añade un campo de **Formato de entrada** para cada parámetro. Las claves de salida en tiempo de ejecución reflejan el esquema y también están disponibles bajo `<api.input>`.
```yaml
- type: string
name: userId
value: demo-user # optional manual test value
- type: number
name: maxTokens
```
Las ejecuciones manuales en el editor utilizan la columna `value` para que puedas realizar pruebas sin enviar una solicitud. Durante la ejecución, el resolutor completa tanto `<api.userId>` como `<api.input.userId>`.
## Ejemplo de solicitud
@@ -44,17 +36,17 @@ curl -X POST \
-d '{"userId":"demo-user","maxTokens":1024}'
```
Las respuestas exitosas devuelven el resultado de ejecución serializado del Ejecutor. Los errores muestran fallos de validación, autenticación o del flujo de trabajo.
Las respuestas exitosas devuelven el resultado de ejecución serializado del Ejecutor. Los errores muestran fallos de validación, autenticación o flujo de trabajo.
## Referencia de salida
| Referencia | Descripción |
|-----------|-------------|
| `<api.field>` | Campo definido en el formato de entrada |
| `<api.field>` | Campo definido en el Formato de Entrada |
| `<api.input>` | Cuerpo completo estructurado de la solicitud |
Si no se define un formato de entrada, el ejecutor expone el JSON sin procesar solo en `<api.input>`.
Si no se define un Formato de Entrada, el ejecutor expone el JSON sin procesar solo en `<api.input>`.
<Callout type="warning">
Un flujo de trabajo puede contener solo un disparador de API. Publica una nueva implementación después de realizar cambios para que el punto de conexión se mantenga actualizado.
Un flujo de trabajo puede contener solo un Disparador de API. Publica una nueva implementación después de realizar cambios para que el punto de conexión se mantenga actualizado.
</Callout>

View File

@@ -10,7 +10,6 @@ type: object
required:
- type
- name
- inputs
- connections
properties:
type:
@@ -22,21 +21,23 @@ properties:
description: Display name for this loop block
inputs:
type: object
required:
- loopType
description: Optional. If omitted, defaults will be applied.
properties:
loopType:
type: string
enum: [for, forEach]
description: Type of loop to execute
default: for
iterations:
type: number
description: Number of iterations (for 'for' loops)
default: 5
minimum: 1
maximum: 1000
collection:
type: string
description: Collection to iterate over (for 'forEach' loops)
default: ""
maxConcurrency:
type: number
description: Maximum concurrent executions
@@ -45,13 +46,10 @@ properties:
maximum: 10
connections:
type: object
required:
- loop
properties:
# Nested format (recommended)
loop:
type: object
required:
- start
properties:
start:
type: string
@@ -59,26 +57,37 @@ properties:
end:
type: string
description: Target block ID for loop completion (optional)
# Direct handle format (alternative)
loop-start-source:
type: string | string[]
description: Target block ID to execute inside the loop (direct format)
loop-end-source:
type: string | string[]
description: Target block ID for loop completion (direct format, optional)
error:
type: string
description: Target block ID for error handling
note: Use either the nested 'loop' format OR the direct 'loop-start-source' format, not both
```
## Configuración de conexión
Los bloques Loop utilizan un formato de conexión especial con una sección `loop`:
Los bloques de bucle admiten dos formatos de conexión:
### Formato de manejador directo (alternativo)
```yaml
connections:
loop:
start: <string> # Target block ID to execute inside the loop
end: <string> # Target block ID after loop completion (optional)
loop-start-source: <string> # Target block ID to execute inside the loop
loop-end-source: <string> # Target block ID after loop completion (optional)
error: <string> # Target block ID for error handling (optional)
```
Ambos formatos funcionan de manera idéntica. Usa el que prefieras.
## Configuración de bloques secundarios
Los bloques dentro de un bucle deben tener su `parentId` configurado con el ID del bloque loop:
Los bloques dentro de un bucle deben tener su `parentId` configurado con el ID del bloque de bucle. La propiedad `extent` se establece automáticamente como `'parent'` y no necesita ser especificada:
```yaml
loop-1:
@@ -261,6 +270,59 @@ process-task:
success: task-completed
```
### Ejemplo de formato de manejador directo
El mismo bucle puede escribirse usando el formato de manejador directo:
```yaml
my-loop:
type: loop
name: "Process Items"
inputs:
loopType: forEach
collection: <start.items>
connections:
loop-start-source: process-item # Direct handle format
loop-end-source: final-results # Direct handle format
error: handle-error
process-item:
type: agent
name: "Process Item"
parentId: my-loop
inputs:
systemPrompt: "Process this item"
userPrompt: <loop.currentItem>
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
### Ejemplo de bucle mínimo (usando valores predeterminados)
Puedes omitir completamente la sección `inputs`, y se aplicarán los valores predeterminados:
```yaml
simple-loop:
type: loop
name: "Simple Loop"
# No inputs section - defaults to loopType: 'for', iterations: 5
connections:
loop-start-source: process-step
loop-end-source: complete
process-step:
type: agent
name: "Process Step"
parentId: simple-loop
inputs:
systemPrompt: "Execute step"
userPrompt: "Step <loop.index>"
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
Este bucle ejecutará 5 iteraciones por defecto.
## Variables de bucle
Dentro de los bloques secundarios del bucle, estas variables especiales están disponibles:
@@ -286,10 +348,10 @@ final-processor:
## Mejores prácticas
- Establece límites razonables de iteración para evitar tiempos de ejecución prolongados
- Establece límites de iteración razonables para evitar tiempos de ejecución largos
- Usa forEach para procesar colecciones, bucles for para iteraciones fijas
- Considera usar maxConcurrency para operaciones limitadas por E/S
- Incluye manejo de errores para una ejecución robusta de bucles
- Incluye manejo de errores para una ejecución robusta del bucle
- Usa nombres descriptivos para los bloques secundarios del bucle
- Prueba primero con colecciones pequeñas
- Monitorea el tiempo de ejecución para colecciones grandes

View File

@@ -175,33 +175,7 @@ Utilisez un bloc `Memory` avec un `id` cohérent (par exemple, `chat`) pour cons
- Lisez l'historique de conversation pour le contexte
- Ajoutez la réponse de l'Agent après son exécution
```yaml
# 1) Add latest user message
- Memory (operation: add)
id: chat
role: user
content: {{input}}
# 2) Load conversation history
- Memory (operation: get)
id: chat
# 3) Run the agent with prior messages available
- Agent
System Prompt: ...
User Prompt: |
Use the conversation so far:
{{memory_get.memories}}
Current user message: {{input}}
# 4) Store the agent reply
- Memory (operation: add)
id: chat
role: assistant
content: {{agent.content}}
```
Consultez la référence du bloc `Memory` pour plus de détails : [/tools/memory](/tools/memory).
Voir la référence du bloc [`Memory`](/tools/memory) pour plus de détails.
## Entrées et sorties
@@ -209,51 +183,51 @@ Consultez la référence du bloc `Memory` pour plus de détails : [/tools/memory
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>Prompt système</strong> : Instructions définissant le comportement et le rôle de l'agent
<strong>Prompt système</strong> : instructions définissant le comportement et le rôle de l'agent
</li>
<li>
<strong>Prompt utilisateur</strong> : Texte d'entrée ou données à traiter
<strong>Prompt utilisateur</strong> : texte ou données d'entrée à traiter
</li>
<li>
<strong>Modèle</strong> : Sélection du modèle d'IA (OpenAI, Anthropic, Google, etc.)
<strong>Modèle</strong> : sélection du modèle d'IA (OpenAI, Anthropic, Google, etc.)
</li>
<li>
<strong>Température</strong> : Contrôle de l'aléatoire des réponses (0-2)
<strong>Température</strong> : contrôle de l'aléatoire des réponses (0-2)
</li>
<li>
<strong>Outils</strong> : Tableau d'outils disponibles pour l'appel de fonctions
<strong>Outils</strong> : tableau des outils disponibles pour l'appel de fonctions
</li>
<li>
<strong>Format de réponse</strong> : Schéma JSON pour une sortie structurée
<strong>Format de réponse</strong> : schéma JSON pour une sortie structurée
</li>
</ul>
</Tab>
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>agent.content</strong> : Texte de réponse de l'agent ou données structurées
<strong>agent.content</strong> : texte de réponse de l'agent ou données structurées
</li>
<li>
<strong>agent.tokens</strong> : Objet de statistiques d'utilisation des tokens
<strong>agent.tokens</strong> : objet de statistiques d'utilisation des tokens
</li>
<li>
<strong>agent.tool_calls</strong> : Tableau des détails d'exécution des outils
<strong>agent.tool_calls</strong> : tableau des détails d'exécution des outils
</li>
<li>
<strong>agent.cost</strong> : Coût estimé de l'appel API (si disponible)
<strong>agent.cost</strong> : coût estimé de l'appel API (si disponible)
</li>
</ul>
</Tab>
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>Contenu</strong> : Sortie de réponse principale de l'agent
<strong>Contenu</strong> : sortie de réponse principale de l'agent
</li>
<li>
<strong>Métadonnées</strong> : Statistiques d'utilisation et détails d'exécution
<strong>Métadonnées</strong> : statistiques d'utilisation et détails d'exécution
</li>
<li>
<strong>Accès</strong> : Disponible dans les blocs après l'agent
<strong>Accès</strong> : disponible dans les blocs après l'agent
</li>
</ul>
</Tab>
@@ -268,7 +242,7 @@ Consultez la référence du bloc `Memory` pour plus de détails : [/tools/memory
<ol className="list-decimal pl-5 text-sm">
<li>L'utilisateur soumet un ticket de support via le bloc API</li>
<li>L'agent vérifie les commandes/abonnements dans Postgres et recherche des conseils dans la base de connaissances</li>
<li>Si une escalade est nécessaire, l'agent crée un ticket Linear avec le contexte pertinent</li>
<li>Si une escalade est nécessaire, l'agent crée un problème Linear avec le contexte pertinent</li>
<li>L'agent rédige une réponse par e-mail claire</li>
<li>Gmail envoie la réponse au client</li>
<li>La conversation est enregistrée dans Memory pour conserver l'historique des messages futurs</li>
@@ -278,7 +252,7 @@ Consultez la référence du bloc `Memory` pour plus de détails : [/tools/memory
### Analyse de contenu multi-modèles
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">Scénario : analyser du contenu avec différents modèles d'IA</h4>
<h4 className="font-medium">Scénario : analyser le contenu avec différents modèles d'IA</h4>
<ol className="list-decimal pl-5 text-sm">
<li>Le bloc de fonction traite le document téléchargé</li>
<li>L'agent avec GPT-4o effectue une analyse technique</li>

View File

@@ -24,14 +24,6 @@ Le déclencheur d'API expose votre flux de travail en tant que point de terminai
Ajoutez un champ **Format d'entrée** pour chaque paramètre. Les clés de sortie d'exécution reflètent le schéma et sont également disponibles sous `<api.input>`.
```yaml
- type: string
name: userId
value: demo-user # optional manual test value
- type: number
name: maxTokens
```
Les exécutions manuelles dans l'éditeur utilisent la colonne `value` pour que vous puissiez tester sans envoyer de requête. Pendant l'exécution, le résolveur remplit à la fois `<api.userId>` et `<api.input.userId>`.
## Exemple de requête
@@ -44,17 +36,17 @@ curl -X POST \
-d '{"userId":"demo-user","maxTokens":1024}'
```
Les réponses réussies renvoient le résultat d'exécution sérialisé de l'Exécuteur. Les erreurs révèlent des problèmes de validation, d'authentification ou d'échec du flux de travail.
Les réponses réussies renvoient le résultat d'exécution sérialisé de l'exécuteur. Les erreurs révèlent des problèmes de validation, d'authentification ou d'échec du workflow.
## Référence de sortie
## Référence des sorties
| Référence | Description |
|-----------|-------------|
| `<api.field>` | Champ défini dans le Format d'entrée |
| `<api.input>` | Corps de la requête structuré complet |
| `<api.field>` | Champ défini dans le format d'entrée |
| `<api.input>` | Corps de requête structuré complet |
Si aucun Format d'entrée n'est défini, l'exécuteur expose le JSON brut uniquement à `<api.input>`.
Si aucun format d'entrée n'est défini, l'exécuteur expose uniquement le JSON brut à `<api.input>`.
<Callout type="warning">
Un flux de travail ne peut contenir qu'un seul déclencheur d'API. Publiez un nouveau déploiement après les modifications pour que le point de terminaison reste à jour.
Un workflow ne peut contenir qu'un seul déclencheur API. Publiez un nouveau déploiement après les modifications pour que le point de terminaison reste à jour.
</Callout>

View File

@@ -10,7 +10,6 @@ type: object
required:
- type
- name
- inputs
- connections
properties:
type:
@@ -22,21 +21,23 @@ properties:
description: Display name for this loop block
inputs:
type: object
required:
- loopType
description: Optional. If omitted, defaults will be applied.
properties:
loopType:
type: string
enum: [for, forEach]
description: Type of loop to execute
default: for
iterations:
type: number
description: Number of iterations (for 'for' loops)
default: 5
minimum: 1
maximum: 1000
collection:
type: string
description: Collection to iterate over (for 'forEach' loops)
default: ""
maxConcurrency:
type: number
description: Maximum concurrent executions
@@ -45,13 +46,10 @@ properties:
maximum: 10
connections:
type: object
required:
- loop
properties:
# Nested format (recommended)
loop:
type: object
required:
- start
properties:
start:
type: string
@@ -59,26 +57,37 @@ properties:
end:
type: string
description: Target block ID for loop completion (optional)
# Direct handle format (alternative)
loop-start-source:
type: string | string[]
description: Target block ID to execute inside the loop (direct format)
loop-end-source:
type: string | string[]
description: Target block ID for loop completion (direct format, optional)
error:
type: string
description: Target block ID for error handling
note: Use either the nested 'loop' format OR the direct 'loop-start-source' format, not both
```
## Configuration de connexion
Les blocs Loop utilisent un format de connexion spécial avec une section `loop` :
Les blocs de boucle prennent en charge deux formats de connexion :
### Format de gestion directe (alternative)
```yaml
connections:
loop:
start: <string> # Target block ID to execute inside the loop
end: <string> # Target block ID after loop completion (optional)
loop-start-source: <string> # Target block ID to execute inside the loop
loop-end-source: <string> # Target block ID after loop completion (optional)
error: <string> # Target block ID for error handling (optional)
```
Les deux formats fonctionnent de manière identique. Utilisez celui que vous préférez.
## Configuration des blocs enfants
Les blocs à l'intérieur d'une boucle doivent avoir leur `parentId` défini sur l'ID du bloc de boucle :
Les blocs à l'intérieur d'une boucle doivent avoir leur `parentId` défini sur l'ID du bloc de boucle. La propriété `extent` est automatiquement définie sur `'parent'` et n'a pas besoin d'être spécifiée :
```yaml
loop-1:
@@ -261,9 +270,62 @@ process-task:
success: task-completed
```
### Exemple de format de gestion directe
La même boucle peut être écrite en utilisant le format de gestion directe :
```yaml
my-loop:
type: loop
name: "Process Items"
inputs:
loopType: forEach
collection: <start.items>
connections:
loop-start-source: process-item # Direct handle format
loop-end-source: final-results # Direct handle format
error: handle-error
process-item:
type: agent
name: "Process Item"
parentId: my-loop
inputs:
systemPrompt: "Process this item"
userPrompt: <loop.currentItem>
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
### Exemple de boucle minimale (utilisant les valeurs par défaut)
Vous pouvez omettre entièrement la section `inputs`, et les valeurs par défaut seront appliquées :
```yaml
simple-loop:
type: loop
name: "Simple Loop"
# No inputs section - defaults to loopType: 'for', iterations: 5
connections:
loop-start-source: process-step
loop-end-source: complete
process-step:
type: agent
name: "Process Step"
parentId: simple-loop
inputs:
systemPrompt: "Execute step"
userPrompt: "Step <loop.index>"
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
Cette boucle exécutera 5 itérations par défaut.
## Variables de boucle
À l'intérieur des blocs enfants de la boucle, ces variables spéciales sont disponibles :
À l'intérieur des blocs enfants de boucle, ces variables spéciales sont disponibles :
```yaml
# Available in all child blocks of the loop

View File

@@ -172,33 +172,7 @@ When responding to questions about investments, include risk disclaimers.
- コンテキストのために会話履歴を読み取る
- エージェントの実行後に返信を追加
```yaml
# 1) Add latest user message
- Memory (operation: add)
id: chat
role: user
content: {{input}}
# 2) Load conversation history
- Memory (operation: get)
id: chat
# 3) Run the agent with prior messages available
- Agent
System Prompt: ...
User Prompt: |
Use the conversation so far:
{{memory_get.memories}}
Current user message: {{input}}
# 4) Store the agent reply
- Memory (operation: add)
id: chat
role: assistant
content: {{agent.content}}
```
詳細については`Memory`ブロックリファレンスを参照してください: [/tools/memory](/tools/memory)。
詳細については[`Memory`](/tools/memory)ブロックリファレンスを参照してください。
## 入力と出力
@@ -209,7 +183,7 @@ When responding to questions about investments, include risk disclaimers.
<strong>システムプロンプト</strong>: エージェントの動作と役割を定義する指示
</li>
<li>
<strong>ユーザープロンプト</strong>: 処理する入力テキストまたはデータ
<strong>ユーザープロンプト</strong>: 処理するテキストまたはデータ入力
</li>
<li>
<strong>モデル</strong>: AIモデルの選択OpenAI、Anthropic、Google など)
@@ -221,14 +195,14 @@ When responding to questions about investments, include risk disclaimers.
<strong>ツール</strong>: 関数呼び出し用の利用可能なツールの配列
</li>
<li>
<strong>レスポンス形式</strong>: 構造化出力用のJSONスキーマ
<strong>レスポンス形式</strong>: 構造化された出力のためのJSONスキーマ
</li>
</ul>
</Tab>
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>agent.content</strong>: エージェントのレスポンステキストまたは構造化データ
<strong>agent.content</strong>: エージェントの応答テキストまたは構造化データ
</li>
<li>
<strong>agent.tokens</strong>: トークン使用統計オブジェクト
@@ -237,14 +211,14 @@ When responding to questions about investments, include risk disclaimers.
<strong>agent.tool_calls</strong>: ツール実行詳細の配列
</li>
<li>
<strong>agent.cost</strong>: 推定APIコールコスト(利用可能な場合)
<strong>agent.cost</strong>: 推定APIコール費用(利用可能な場合)
</li>
</ul>
</Tab>
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>コンテンツ</strong>: エージェントからの主要なレスポンス出力
<strong>コンテンツ</strong>: エージェントからの主要な応答出力
</li>
<li>
<strong>メタデータ</strong>: 使用統計と実行詳細
@@ -261,43 +235,43 @@ When responding to questions about investments, include risk disclaimers.
### カスタマーサポートの自動化
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">シナリオデータベースアクセスによる顧客問い合わせ対応</h4>
<h4 className="font-medium">シナリオ: データベースアクセスによる顧客問い合わせ対応</h4>
<ol className="list-decimal pl-5 text-sm">
<li>ユーザーがAPIブロックを通じてサポートチケットを送信</li>
<li>ユーザーがAPIブロック経由でサポートチケットを送信</li>
<li>エージェントがPostgresで注文/サブスクリプションを確認し、ナレッジベースでガイダンスを検索</li>
<li>エスカレーションが必要な場合、エージェントは関連コンテキストを含むLinearの課題を作成</li>
<li>エージェントが明確なメール返信を作成</li>
<li>エージェントが明確な返信メールを作成</li>
<li>Gmailが顧客に返信を送信</li>
<li>将来のメッセージのために会話履歴を維持するため、会話がメモリに保存される</li>
<li>将来のメッセージのために履歴を維持するため、会話がメモリに保存される</li>
</ol>
</div>
### マルチモデルコンテンツ分析
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">シナリオ異なるAIモデルでコンテンツを分析</h4>
<h4 className="font-medium">シナリオ: 異なるAIモデルでコンテンツを分析</h4>
<ol className="list-decimal pl-5 text-sm">
<li>ファンクションブロックがアップロードされた文書を処理</li>
<li>関数ブロックがアップロードされた文書を処理</li>
<li>GPT-4oを搭載したエージェントが技術的分析を実行</li>
<li>Claudeを搭載したエージェントが感情トーンを分析</li>
<li>ファンクションブロックが最終レポート用に結果を統合</li>
<li>Claudeを搭載したエージェントが感情トーンを分析</li>
<li>関数ブロックが最終レポート用に結果を統合</li>
</ol>
</div>
### ツール活用型リサーチアシスタント
### ツール搭載型リサーチアシスタント
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">シナリオ:ウェブ検索と文書アクセス機能を持つリサーチアシスタント</h4>
<h4 className="font-medium">シナリオ:ウェブ検索とドキュメントアクセス機能を持つリサーチアシスタント</h4>
<ol className="list-decimal pl-5 text-sm">
<li>入力を通じてユーザークエリを受信</li>
<li>入力からユーザークエリを受信</li>
<li>エージェントがGoogle検索ツールを使用してウェブを検索</li>
<li>エージェントが社内文書用のNotionデータベースにアクセス</li>
<li>エージェントが包括的な調査レポートをまとめる</li>
<li>エージェントが包括的な調査レポートを作成</li>
</ol>
</div>
## ベストプラクティス
- **システムプロンプト具体的に**: エージェントの役割、トーン、制限を明確に定義してください。指示が具体的であればあるほど、エージェントは目的を果たすことができます。
- **適切な温度設定を選択**: 精度が重要な場合は低い温度設定0〜0.3を使用し、よりクリエイティブまたは多様な応答には温度を上げる0.7〜2.0
- **ツールを効果的に活用**: エージェントの目的を補完し、その能力を強化するツールを統合してください。エージェントに負担をかけないよう、提供するツールを選択的にしてください。重複の少いタスクには、最良の結果を得るために別のエージェントブロックを使用してください。
- **システムプロンプト具体的に指示する**エージェントの役割、トーン、制限を明確に定義してください。指示が具体的であればあるほど、エージェントは目的を果たすことができます。
- **適切な温度設定を選択する**精度が重要な場合は低い温度設定0〜0.3)を使用し、よりクリエイティブまたは多様な応答を得るには温度を上げる0.7〜2.0
- **ツールを効果的に活用する**エージェントの目的を補完し、その能力を強化するツールを統合してください。エージェントに負担をかけないよう、提供するツールを選択的にしてください。重複の少いタスクには、最良の結果を得るために別のエージェントブロックを使用してください。

View File

@@ -24,15 +24,7 @@ APIトリガーは、ワークフローを安全なHTTPエンドポイントと
各パラメータに**入力フォーマット**フィールドを追加します。実行時の出力キーはスキーマを反映し、`<api.input>`でも利用できます。
```yaml
- type: string
name: userId
value: demo-user # optional manual test value
- type: number
name: maxTokens
```
エディタでの手動実行では、リクエストを送信せずにテストできるように`value`列を使用します。実行中、リゾルバは`<api.userId>`と`<api.input.userId>`の両方に値を設定します。
エディタでの手動実行は `value` 列を使用するため、リクエストを送信せずにテストできます。実行中、リゾルバーは `<api.userId>` と `<api.input.userId>` の両方に値を設定します。
## リクエスト例
@@ -44,7 +36,7 @@ curl -X POST \
-d '{"userId":"demo-user","maxTokens":1024}'
```
成功したレスポンスはエグゼキュータからシリアル化された実行結果を返します。エラーは検証、認証、またはワークフローの失敗を表示します。
成功したレスポンスはエグゼキュータからシリアル化された実行結果を返します。エラーは検証、認証、またはワークフローの失敗を表示します。
## 出力リファレンス
@@ -53,8 +45,8 @@ curl -X POST \
| `<api.field>` | 入力フォーマットで定義されたフィールド |
| `<api.input>` | 構造化されたリクエスト本文全体 |
入力フォーマットが定義されていない場合、エグゼキュータは生のJSONを`<api.input>`のみ公開します。
入力フォーマットが定義されていない場合、エグゼキュータは生のJSONを `<api.input>` のみ公開します。
<Callout type="warning">
ワークフローには1つのAPIトリガーしか含めることができません。変更後は新しいデプロイメントを公開して、エンドポイントを最新の状態に保ってください。
ワークフローには1つのAPIトリガーのみ含めることができま。変更後は新しいデプロイメントを公開して、エンドポイントを最新の状態に保ってください。
</Callout>

View File

@@ -10,7 +10,6 @@ type: object
required:
- type
- name
- inputs
- connections
properties:
type:
@@ -22,21 +21,23 @@ properties:
description: Display name for this loop block
inputs:
type: object
required:
- loopType
description: Optional. If omitted, defaults will be applied.
properties:
loopType:
type: string
enum: [for, forEach]
description: Type of loop to execute
default: for
iterations:
type: number
description: Number of iterations (for 'for' loops)
default: 5
minimum: 1
maximum: 1000
collection:
type: string
description: Collection to iterate over (for 'forEach' loops)
default: ""
maxConcurrency:
type: number
description: Maximum concurrent executions
@@ -45,13 +46,10 @@ properties:
maximum: 10
connections:
type: object
required:
- loop
properties:
# Nested format (recommended)
loop:
type: object
required:
- start
properties:
start:
type: string
@@ -59,26 +57,37 @@ properties:
end:
type: string
description: Target block ID for loop completion (optional)
# Direct handle format (alternative)
loop-start-source:
type: string | string[]
description: Target block ID to execute inside the loop (direct format)
loop-end-source:
type: string | string[]
description: Target block ID for loop completion (direct format, optional)
error:
type: string
description: Target block ID for error handling
note: Use either the nested 'loop' format OR the direct 'loop-start-source' format, not both
```
## 接続設定
ループブロックは `loop` セクションを持つ特別な接続形式を使用します:
ループブロックは2つの接続形式をサポートしています:
### 直接ハンドル形式(代替)
```yaml
connections:
loop:
start: <string> # Target block ID to execute inside the loop
end: <string> # Target block ID after loop completion (optional)
loop-start-source: <string> # Target block ID to execute inside the loop
loop-end-source: <string> # Target block ID after loop completion (optional)
error: <string> # Target block ID for error handling (optional)
```
## 子ブロック設定
両方の形式は同じように機能します。お好みの方を使用してください。
ループ内のブロックは、その `parentId` をループブロックIDに設定する必要があります
## 子ブロックの設定
ループ内のブロックは、その `parentId` をループブロックIDに設定する必要があります。`extent` プロパティは自動的に `'parent'` に設定されるため、指定する必要はありません:
```yaml
loop-1:
@@ -261,6 +270,59 @@ process-task:
success: task-completed
```
### 直接ハンドル形式の例
同じループは直接ハンドル形式を使用して記述することもできます:
```yaml
my-loop:
type: loop
name: "Process Items"
inputs:
loopType: forEach
collection: <start.items>
connections:
loop-start-source: process-item # Direct handle format
loop-end-source: final-results # Direct handle format
error: handle-error
process-item:
type: agent
name: "Process Item"
parentId: my-loop
inputs:
systemPrompt: "Process this item"
userPrompt: <loop.currentItem>
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
### 最小限のループ例(デフォルトを使用)
`inputs` セクションを完全に省略することができ、デフォルト値が適用されます:
```yaml
simple-loop:
type: loop
name: "Simple Loop"
# No inputs section - defaults to loopType: 'for', iterations: 5
connections:
loop-start-source: process-step
loop-end-source: complete
process-step:
type: agent
name: "Process Step"
parentId: simple-loop
inputs:
systemPrompt: "Execute step"
userPrompt: "Step <loop.index>"
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
このループはデフォルトで5回の反復を実行します。
## ループ変数
ループ子ブロック内では、以下の特殊変数が利用可能です:
@@ -274,7 +336,7 @@ process-task:
## 出力参照
ループが完了した後、集約された結果を参照できます:
ループが完了した後、その集計結果を参照できます:
```yaml
# In blocks after the loop
@@ -286,10 +348,10 @@ final-processor:
## ベストプラクティス
- 長い実行時間を避けるため適切な繰り返し回数の制限を設定する
- 長い実行時間を避けるため適切な繰り返し制限を設定する
- コレクション処理にはforEachを、固定回数の繰り返しにはforループを使用する
- I/O処理多い操作にはmaxConcurrencyの使用を検討する
- I/O処理多い操作にはmaxConcurrencyの使用を検討する
- 堅牢なループ実行のためにエラー処理を含める
- ループ子ブロックには説明的な名前を使用する
- ループ子ブロックには説明的な名前を使用する
- 最初に小さなコレクションでテストする
- 大きなコレクションの実行時間を監視する

View File

@@ -172,44 +172,18 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
- 读取对话历史以获取上下文
- 在代理运行后附加其回复
```yaml
# 1) Add latest user message
- Memory (operation: add)
id: chat
role: user
content: {{input}}
# 2) Load conversation history
- Memory (operation: get)
id: chat
# 3) Run the agent with prior messages available
- Agent
System Prompt: ...
User Prompt: |
Use the conversation so far:
{{memory_get.memories}}
Current user message: {{input}}
# 4) Store the agent reply
- Memory (operation: add)
id: chat
role: assistant
content: {{agent.content}}
```
有关详细信息,请参阅 `Memory` 块参考:[工具/内存](/tools/memory)。
有关详细信息,请参阅 [`Memory`](/tools/memory) 块引用。
## 输入和输出
<Tabs items={['配置', '变量', '结果']}>
<Tabs items={['Configuration', 'Variables', 'Results']}>
<Tab>
<ul className="list-disc space-y-2 pl-6">
<li>
<strong>系统提示</strong>:定义代理行为和角色的指令
</li>
<li>
<strong>用户提示</strong>:要处理的输入文本或数据
<strong>用户提示</strong>要处理的输入文本或数据
</li>
<li>
<strong>模型</strong>AI 模型选择OpenAI、Anthropic、Google 等)
@@ -218,7 +192,7 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
<strong>温度</strong>响应随机性控制0-2
</li>
<li>
<strong>工具</strong>:可用于函数调用的工具数组
<strong>工具</strong>:可用工具数组,用于功能调用
</li>
<li>
<strong>响应格式</strong>:用于结构化输出的 JSON Schema
@@ -234,10 +208,10 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
<strong>agent.tokens</strong>:令牌使用统计对象
</li>
<li>
<strong>agent.tool_calls</strong>:工具执行详细信息数组
<strong>agent.tool_calls</strong>:工具执行详数组
</li>
<li>
<strong>agent.cost</strong>API 调用的估计成本(如果可用)
<strong>agent.cost</strong>估算的 API 调用成本(如果可用)
</li>
</ul>
</Tab>
@@ -247,7 +221,7 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
<strong>内容</strong>:代理的主要响应输出
</li>
<li>
<strong>元数据</strong>:使用统计信息和执行详细信息
<strong>元数据</strong>:使用统计和执行详
</li>
<li>
<strong>访问</strong>:在代理之后的块中可用
@@ -261,9 +235,9 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
### 客户支持自动化
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">场景:通过数据库访问处理客户询</h4>
<h4 className="font-medium">场景:通过数据库访问处理客户询</h4>
<ol className="list-decimal pl-5 text-sm">
<li>用户通过 API 块提交支持工单</li>
<li>用户通过 API 块提交支持工单</li>
<li>代理在 Postgres 中检查订单/订阅,并在知识库中搜索指导</li>
<li>如果需要升级,代理会创建一个包含相关上下文的 Linear 问题</li>
<li>代理起草一封清晰的电子邮件回复</li>
@@ -277,10 +251,10 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
<div className="mb-4 rounded-md border p-4">
<h4 className="font-medium">场景:使用不同的 AI 模型分析内容</h4>
<ol className="list-decimal pl-5 text-sm">
<li>功能块处理上传的文档</li>
<li>功能块处理上传的文档</li>
<li>使用 GPT-4o 的代理执行技术分析</li>
<li>使用 Claude 的代理分析情感和语气</li>
<li>功能块结合结果生成最终报告</li>
<li>功能块结合结果生成最终报告</li>
</ol>
</div>
@@ -298,6 +272,6 @@ Agent 模块通过统一的推理接口支持多个 LLM 提供商。可用模型
## 最佳实践
- **在系统提示中具体说明**:清晰定义代理的角色、语气和限制。您的指令越具体,代理越能更好地实现其预期目的。
- **选择合适的温度设置**当准确性很重要时使用较低的温度设置0-0.3);当需要更具创意或多样化的响应时,增加温度0.7-2.0)。
- **有效利用工具**:集成与代理目的互补并增强其能力的工具。选择性地提供工具,以避免让代理不堪重负。对于重叠较少的任务,使用另一个代理块以获得最佳果。
- **在系统提示中具体说明**:清晰定义代理的角色、语气和限制。您的指令越具体,代理越能更好地完成其预期目的。
- **选择合适的温度设置**当准确性很重要时使用较低的温度设置0-0.3);当需要更具创意或多样化的响应时,可将温度提高0.7-2.0)。
- **有效利用工具**:集成与代理目的互补并增强其能力的工具。选择性地提供工具,以避免让代理不堪重负。对于重叠较少的任务,使用另一个代理块以获得最佳果。

View File

@@ -24,15 +24,7 @@ API 触发器将您的工作流公开为一个安全的 HTTP 端点。将 JSON
为每个参数添加一个 **输入格式** 字段。运行时输出键会镜像该模式,并且也可以在 `<api.input>` 下使用。
```yaml
- type: string
name: userId
value: demo-user # optional manual test value
- type: number
name: maxTokens
```
编辑器中的手动运行使用 `value` 列,因此您可以在不发送请求的情况下进行测试。在执行期间,解析器会填充 `<api.userId>` 和 `<api.input.userId>`。
在编辑器中手动运行使用 `value` 列,因此您可以在不发送请求的情况下进行测试。在执行过程中,解析器会填充 `<api.userId>` 和 `<api.input.userId>`。
## 请求示例
@@ -44,17 +36,17 @@ curl -X POST \
-d '{"userId":"demo-user","maxTokens":1024}'
```
成功的响应会返回来自执行器的序列化执行结果。错误会显示验证、身份验证或工作流失败的原因
成功的响应会返回来自执行器的序列化执行结果。错误会显示验证、证或工作流失败的信息
## 输出参考
| 参考 | 描述 |
|-----------|-------------|
| `<api.field>` | 输入格式中定义的字段 |
| `<api.field>` | 输入格式中定义的字段 |
| `<api.input>` | 整个结构化请求体 |
如果未定义输入格式,执行器仅在 `<api.input>` 处公开原始 JSON。
如果未定义输入格式,执行器仅在 `<api.input>` 处暴露原始 JSON。
<Callout type="warning">
一个工作流只能包含一个 API 触发器。更改后发布新的部署,以确保端点保持最新。
一个工作流只能包含一个 API 触发器。更改后发布新的部署,以确保端点保持最新。
</Callout>

View File

@@ -10,7 +10,6 @@ type: object
required:
- type
- name
- inputs
- connections
properties:
type:
@@ -22,21 +21,23 @@ properties:
description: Display name for this loop block
inputs:
type: object
required:
- loopType
description: Optional. If omitted, defaults will be applied.
properties:
loopType:
type: string
enum: [for, forEach]
description: Type of loop to execute
default: for
iterations:
type: number
description: Number of iterations (for 'for' loops)
default: 5
minimum: 1
maximum: 1000
collection:
type: string
description: Collection to iterate over (for 'forEach' loops)
default: ""
maxConcurrency:
type: number
description: Maximum concurrent executions
@@ -45,13 +46,10 @@ properties:
maximum: 10
connections:
type: object
required:
- loop
properties:
# Nested format (recommended)
loop:
type: object
required:
- start
properties:
start:
type: string
@@ -59,26 +57,37 @@ properties:
end:
type: string
description: Target block ID for loop completion (optional)
# Direct handle format (alternative)
loop-start-source:
type: string | string[]
description: Target block ID to execute inside the loop (direct format)
loop-end-source:
type: string | string[]
description: Target block ID for loop completion (direct format, optional)
error:
type: string
description: Target block ID for error handling
note: Use either the nested 'loop' format OR the direct 'loop-start-source' format, not both
```
## 连接配置
Loop 块使用一种特殊的连接格式,其中包含一个 `loop` 部分
循环块支持两种连接格式
### 直接句柄格式(替代方案)
```yaml
connections:
loop:
start: <string> # Target block ID to execute inside the loop
end: <string> # Target block ID after loop completion (optional)
loop-start-source: <string> # Target block ID to execute inside the loop
loop-end-source: <string> # Target block ID after loop completion (optional)
error: <string> # Target block ID for error handling (optional)
```
两种格式的功能完全相同,可根据您的喜好选择使用。
## 子块配置
循环中的块必须将其 `parentId` 设置为循环块的 ID
循环中的块必须将其 `parentId` 设置为循环块的 ID。`extent` 属性会自动设置为 `'parent'`,无需手动指定
```yaml
loop-1:
@@ -261,9 +270,62 @@ process-task:
success: task-completed
```
### 直接句柄格式示例
同一个循环可以使用直接句柄格式编写:
```yaml
my-loop:
type: loop
name: "Process Items"
inputs:
loopType: forEach
collection: <start.items>
connections:
loop-start-source: process-item # Direct handle format
loop-end-source: final-results # Direct handle format
error: handle-error
process-item:
type: agent
name: "Process Item"
parentId: my-loop
inputs:
systemPrompt: "Process this item"
userPrompt: <loop.currentItem>
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
### 最小循环示例(使用默认值)
您可以完全省略 `inputs` 部分,系统将应用默认值:
```yaml
simple-loop:
type: loop
name: "Simple Loop"
# No inputs section - defaults to loopType: 'for', iterations: 5
connections:
loop-start-source: process-step
loop-end-source: complete
process-step:
type: agent
name: "Process Step"
parentId: simple-loop
inputs:
systemPrompt: "Execute step"
userPrompt: "Step <loop.index>"
model: gpt-4o
apiKey: '{{OPENAI_API_KEY}}'
```
此循环默认将执行 5 次迭代。
## 循环变量
在循环子块中,可以使用以下特殊变量:
在循环子块中,可以使用以下特殊变量:
```yaml
# Available in all child blocks of the loop
@@ -274,7 +336,7 @@ process-task:
## 输出引用
循环完成后,可以引用其聚合结果:
循环完成后,可以引用其聚合结果:
```yaml
# In blocks after the loop
@@ -286,10 +348,10 @@ final-processor:
## 最佳实践
- 设置合理的迭代限制以避免长时间执行
- 使用 forEach 处理集合,使用 for 循环处理固定迭代
- 对 I/O 密集型操作,考虑使用 maxConcurrency
- 设置合理的迭代限制以避免长时间执行
- 使用 forEach 处理集合,使用 for 循环进行固定迭代
- 考虑对 I/O 密集型操作使用 maxConcurrency
- 包含错误处理以确保循环执行的健壮性
- 为循环子块使用描述性名称
- 先用小集合进行测试
- 对大集合执行时间进行监控
- 对大集合,监控执行时间

View File

@@ -3378,19 +3378,18 @@ checksums:
content/38: 1b693e51b5b8e31dd088c602663daab4
content/39: 5003cde407a705d39b969eff5bdab18a
content/40: 624199f0ed2378588024cfe6055d6b6b
content/41: 8accff30f8b36f6fc562b44d8fe271dd
content/42: fc62aefa5b726f57f2c9a17a276e601f
content/43: d72903dda50a36b12ec050a06ef23a1b
content/44: 19984dc55d279f1ae3226edf4b62aaa3
content/45: 9c2f91f89a914bf4661512275e461104
content/46: adc97756961688b2f4cc69b773c961c9
content/47: 7b0c309be79b5e1ab30e58a98ea0a778
content/48: ac70442527be4edcc6b0936e2e5dc8c1
content/49: a298d382850ddaa0b53e19975b9d12d2
content/50: 27535bb1de08548a7389708045c10714
content/51: 6f366fdb6389a03bfc4d83c12fa4099d
content/52: b2a4a0c279f47d58a2456f25a1e1c6f9
content/53: 17af9269613458de7f8e36a81b2a6d30
content/41: a2c636da376e80aa3427ce26b2dce0fd
content/42: d72903dda50a36b12ec050a06ef23a1b
content/43: 19984dc55d279f1ae3226edf4b62aaa3
content/44: 9c2f91f89a914bf4661512275e461104
content/45: adc97756961688b2f4cc69b773c961c9
content/46: 7b0c309be79b5e1ab30e58a98ea0a778
content/47: ac70442527be4edcc6b0936e2e5dc8c1
content/48: a298d382850ddaa0b53e19975b9d12d2
content/49: 27535bb1de08548a7389708045c10714
content/50: 6f366fdb6389a03bfc4d83c12fa4099d
content/51: b2a4a0c279f47d58a2456f25a1e1c6f9
content/52: 17af9269613458de7f8e36a81b2a6d30
fa2a1ea3b95cd7608e0a7d78834b7d49:
meta/title: d8df37d5e95512e955c43661de8a40d0
meta/description: d25527b81409cb3d42d9841e8ed318d4
@@ -3567,30 +3566,39 @@ checksums:
meta/title: 27e1d8e6df8b8d3ee07124342bcc5599
meta/description: 5a19804a907fe2a0c7ddc8a933e7e147
content/0: 07f0ef1d9ef5ee2993ab113d95797f37
content/1: c7dc738fa39cff694ecc04dca0aec33e
content/1: 200b847a5b848c11a507cecfcb381e02
content/2: dd7f8a45778d4dddd9bda78c19f046a4
content/3: 298c570bb45cd493870e9406dc8be50e
content/4: d4346df7c1c5da08e84931d2d449023a
content/5: bacf5914637cc0c1e00dfac72f60cf1f
content/6: 700ac74ffe23bbcc102b001676ee1818
content/7: d7c2f6c70070e594bffd0af3d20bbccb
content/8: 33b9b1e9744318597da4b925b0995be2
content/9: caa663af7342e02001aca78c23695b22
content/10: 1fa44e09185c753fec303e2c73e44eaf
content/11: d794cf2ea75f4aa8e73069f41fe8bc45
content/12: f9553f38263ad53c261995083622bdde
content/13: e4625b8a75814b2fcfe3c643a47e22cc
content/14: 20ee0fd34f3baab1099a2f8fb06b13cf
content/15: 73a9d04015d0016a994cf1e8fe8d5c12
content/16: 9a71a905db9dd5d43bdd769f006caf14
content/17: 18c31983f32539861fd5b4e8dd943169
content/18: 55300ae3e3c3213c4ad82c1cf21c89b2
content/19: 8a8aa301371bd07b15c6f568a8e7826f
content/20: a98cce6db23d9a86ac51179100f32529
content/21: b5a605662dbb6fc20ad37fdb436f0581
content/22: 2b204164f64dcf034baa6e5367679735
content/23: b2a4a0c279f47d58a2456f25a1e1c6f9
content/24: 15ebde5d554a3ec6000f71cf32b16859
content/3: b1870986cdef32b6cf3c79a4cd56a8b0
content/4: 5e7c060bf001ead8fb4005385509e857
content/5: 1de0d605f73842c3464e7fb2e09fb92c
content/6: 1a8e292ce7cc3adb2fe38cf2f5668b43
content/7: bacf5914637cc0c1e00dfac72f60cf1f
content/8: 336fdb536f9f5654d4d69a5adb1cf071
content/9: d7c2f6c70070e594bffd0af3d20bbccb
content/10: 33b9b1e9744318597da4b925b0995be2
content/11: caa663af7342e02001aca78c23695b22
content/12: 1fa44e09185c753fec303e2c73e44eaf
content/13: d794cf2ea75f4aa8e73069f41fe8bc45
content/14: f9553f38263ad53c261995083622bdde
content/15: e4625b8a75814b2fcfe3c643a47e22cc
content/16: 20ee0fd34f3baab1099a2f8fb06b13cf
content/17: 73a9d04015d0016a994cf1e8fe8d5c12
content/18: 9a71a905db9dd5d43bdd769f006caf14
content/19: b4017a890213e9ac0afd6b2cfc1bdefc
content/20: 479fd4d587cd0a1b8d27dd440e019215
content/21: db263bbe8b5984777eb738e9e4c3ec71
content/22: 1128c613d71aad35f668367ba2065a01
content/23: 12be239d6ea36a71b022996f56d66901
content/24: aa2240ef8ced8d9b67f7ab50665caae5
content/25: 5cce1d6a21fae7252b8670a47a2fae9e
content/26: 18c31983f32539861fd5b4e8dd943169
content/27: 55300ae3e3c3213c4ad82c1cf21c89b2
content/28: 8a8aa301371bd07b15c6f568a8e7826f
content/29: a98cce6db23d9a86ac51179100f32529
content/30: b5a605662dbb6fc20ad37fdb436f0581
content/31: 2b204164f64dcf034baa6e5367679735
content/32: b2a4a0c279f47d58a2456f25a1e1c6f9
content/33: 15ebde5d554a3ec6000f71cf32b16859
132869ed8674995bace940b1cefc4241:
meta/title: a753d6bd11bc5876c739b95c6d174914
meta/description: 71efdaceb123c4d6b6ee19c085cd9f0f
@@ -3958,12 +3966,11 @@ checksums:
content/3: b3c762557a1a308f3531ef1f19701807
content/4: bf29da79344f37eeadd4c176aa19b8ff
content/5: ae52879ebefa5664a6b7bf8ce5dd57ab
content/6: 5e1cbe37c5714b16c908c7e0fe0b23e3
content/7: ce487c9bc7a730e7d9da4a87b8eaa0a6
content/8: e73f4b831f5b77c71d7d86c83abcbf11
content/9: 07e064793f3e0bbcb02c4dc6083b6daa
content/10: a702b191c3f94458bee880d33853e0cb
content/11: ce110ab5da3ff96f8cbf96ce3376fc51
content/12: 83f9b3ab46b0501c8eb3989bec3f4f1b
content/13: e00be80effb71b0acb014f9aa53dfbe1
content/14: 847a381137856ded9faa5994fbc489fb
content/6: ce487c9bc7a730e7d9da4a87b8eaa0a6
content/7: e73f4b831f5b77c71d7d86c83abcbf11
content/8: 07e064793f3e0bbcb02c4dc6083b6daa
content/9: a702b191c3f94458bee880d33853e0cb
content/10: ce110ab5da3ff96f8cbf96ce3376fc51
content/11: 83f9b3ab46b0501c8eb3989bec3f4f1b
content/12: e00be80effb71b0acb014f9aa53dfbe1
content/13: 847a381137856ded9faa5994fbc489fb

View File

@@ -15,9 +15,9 @@
"@vercel/analytics": "1.5.0",
"@vercel/og": "^0.6.5",
"clsx": "^2.1.1",
"fumadocs-core": "^15.7.5",
"fumadocs-mdx": "^11.5.6",
"fumadocs-ui": "^15.7.5",
"fumadocs-core": "15.8.2",
"fumadocs-mdx": "11.10.1",
"fumadocs-ui": "15.8.2",
"lucide-react": "^0.511.0",
"next": "15.4.1",
"next-themes": "^0.4.6",

View File

@@ -237,7 +237,6 @@ describe('Copilot Checkpoints Revert API Route', () => {
parallels: {},
isDeployed: true,
deploymentStatuses: { production: 'deployed' },
hasActiveWebhook: false,
},
}
@@ -287,7 +286,6 @@ describe('Copilot Checkpoints Revert API Route', () => {
parallels: {},
isDeployed: true,
deploymentStatuses: { production: 'deployed' },
hasActiveWebhook: false,
lastSaved: 1640995200000,
},
},
@@ -309,7 +307,6 @@ describe('Copilot Checkpoints Revert API Route', () => {
parallels: {},
isDeployed: true,
deploymentStatuses: { production: 'deployed' },
hasActiveWebhook: false,
lastSaved: 1640995200000,
}),
}
@@ -445,7 +442,6 @@ describe('Copilot Checkpoints Revert API Route', () => {
parallels: {},
isDeployed: false,
deploymentStatuses: {},
hasActiveWebhook: false,
lastSaved: 1640995200000,
})
})
@@ -722,7 +718,6 @@ describe('Copilot Checkpoints Revert API Route', () => {
production: 'deployed',
staging: 'pending',
},
hasActiveWebhook: true,
deployedAt: '2024-01-01T10:00:00.000Z',
},
}
@@ -769,7 +764,6 @@ describe('Copilot Checkpoints Revert API Route', () => {
production: 'deployed',
staging: 'pending',
},
hasActiveWebhook: true,
deployedAt: '2024-01-01T10:00:00.000Z',
lastSaved: 1640995200000,
})

View File

@@ -73,7 +73,6 @@ export async function POST(request: NextRequest) {
parallels: checkpointState?.parallels || {},
isDeployed: checkpointState?.isDeployed || false,
deploymentStatuses: checkpointState?.deploymentStatuses || {},
hasActiveWebhook: checkpointState?.hasActiveWebhook || false,
lastSaved: Date.now(),
// Only include deployedAt if it's a valid date string that can be converted
...(checkpointState?.deployedAt &&

View File

@@ -0,0 +1,59 @@
import { type NextRequest, NextResponse } from 'next/server'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('CopilotTrainingExamplesAPI')
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function POST(request: NextRequest) {
const baseUrl = env.AGENT_INDEXER_URL
if (!baseUrl) {
logger.error('Missing AGENT_INDEXER_URL environment variable')
return NextResponse.json({ error: 'Missing AGENT_INDEXER_URL env' }, { status: 500 })
}
const apiKey = env.AGENT_INDEXER_API_KEY
if (!apiKey) {
logger.error('Missing AGENT_INDEXER_API_KEY environment variable')
return NextResponse.json({ error: 'Missing AGENT_INDEXER_API_KEY env' }, { status: 500 })
}
try {
const body = await request.json()
logger.info('Sending workflow example to agent indexer', {
hasJsonField: typeof body?.json === 'string',
})
const upstream = await fetch(`${baseUrl}/examples/add`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': apiKey,
},
body: JSON.stringify(body),
})
if (!upstream.ok) {
const errorText = await upstream.text()
logger.error('Agent indexer rejected the example', {
status: upstream.status,
error: errorText,
})
return NextResponse.json({ error: errorText }, { status: upstream.status })
}
const data = await upstream.json()
logger.info('Successfully sent workflow example to agent indexer')
return NextResponse.json(data, {
headers: { 'content-type': 'application/json' },
})
} catch (err) {
const errorMessage = err instanceof Error ? err.message : 'Failed to add example'
logger.error('Failed to send workflow example', { error: err })
return NextResponse.json({ error: errorMessage }, { status: 502 })
}
}

View File

@@ -97,7 +97,13 @@ export async function GET(request: NextRequest) {
const baseQuery = db
.select(selectColumns)
.from(workflowExecutionLogs)
.innerJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id))
.innerJoin(
workflow,
and(
eq(workflowExecutionLogs.workflowId, workflow.id),
eq(workflow.workspaceId, params.workspaceId)
)
)
.innerJoin(
permissions,
and(
@@ -107,8 +113,8 @@ export async function GET(request: NextRequest) {
)
)
// Build conditions for the joined query
let conditions: SQL | undefined = eq(workflow.workspaceId, params.workspaceId)
// Build additional conditions for the query
let conditions: SQL | undefined
// Filter by level
if (params.level && params.level !== 'all') {
@@ -180,7 +186,13 @@ export async function GET(request: NextRequest) {
const countQuery = db
.select({ count: sql<number>`count(*)` })
.from(workflowExecutionLogs)
.innerJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id))
.innerJoin(
workflow,
and(
eq(workflowExecutionLogs.workflowId, workflow.id),
eq(workflow.workspaceId, params.workspaceId)
)
)
.innerJoin(
permissions,
and(

View File

@@ -76,6 +76,8 @@ export async function GET() {
telemetryEnabled: userSettings.telemetryEnabled,
emailPreferences: userSettings.emailPreferences ?? {},
billingUsageNotificationsEnabled: userSettings.billingUsageNotificationsEnabled ?? true,
showFloatingControls: userSettings.showFloatingControls ?? true,
showTrainingControls: userSettings.showTrainingControls ?? false,
},
},
{ status: 200 }

View File

@@ -124,7 +124,13 @@ export async function GET(request: NextRequest) {
workflowDescription: workflow.description,
})
.from(workflowExecutionLogs)
.innerJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id))
.innerJoin(
workflow,
and(
eq(workflowExecutionLogs.workflowId, workflow.id),
eq(workflow.workspaceId, params.workspaceId)
)
)
.innerJoin(
permissions,
and(

View File

@@ -133,6 +133,7 @@ describe('Webhook Trigger API Route', () => {
parallels: {},
isFromNormalizedTables: true,
}),
blockExistsInDeployment: vi.fn().mockResolvedValue(true),
}))
hasProcessedMessageMock.mockResolvedValue(false)

View File

@@ -10,6 +10,7 @@ import {
queueWebhookExecution,
verifyProviderAuth,
} from '@/lib/webhooks/processor'
import { blockExistsInDeployment } from '@/lib/workflows/db-helpers'
const logger = createLogger('WebhookTriggerAPI')
@@ -62,6 +63,16 @@ export async function POST(
return usageLimitError
}
if (foundWebhook.blockId) {
const blockExists = await blockExistsInDeployment(foundWorkflow.id, foundWebhook.blockId)
if (!blockExists) {
logger.warn(
`[${requestId}] Trigger block ${foundWebhook.blockId} not found in deployment for workflow ${foundWorkflow.id}`
)
return new NextResponse('Trigger block not deployed', { status: 404 })
}
}
return queueWebhookExecution(foundWebhook, foundWorkflow, body, request, {
requestId,
path,

View File

@@ -8,7 +8,10 @@ import { createLogger } from '@/lib/logs/console/logger'
import { getUserEntityPermissions } from '@/lib/permissions/utils'
import { generateRequestId } from '@/lib/utils'
import { applyAutoLayout } from '@/lib/workflows/autolayout'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import {
loadWorkflowFromNormalizedTables,
type NormalizedWorkflowData,
} from '@/lib/workflows/db-helpers'
export const dynamic = 'force-dynamic'
@@ -36,10 +39,14 @@ const AutoLayoutRequestSchema = z.object({
})
.optional()
.default({}),
// Optional: if provided, use these blocks instead of loading from DB
// This allows using blocks with live measurements from the UI
blocks: z.record(z.any()).optional(),
edges: z.array(z.any()).optional(),
loops: z.record(z.any()).optional(),
parallels: z.record(z.any()).optional(),
})
type AutoLayoutRequest = z.infer<typeof AutoLayoutRequestSchema>
/**
* POST /api/workflows/[id]/autolayout
* Apply autolayout to an existing workflow
@@ -108,8 +115,23 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Load current workflow state
const currentWorkflowData = await loadWorkflowFromNormalizedTables(workflowId)
// Use provided blocks/edges if available (with live measurements from UI),
// otherwise load from database
let currentWorkflowData: NormalizedWorkflowData | null
if (layoutOptions.blocks && layoutOptions.edges) {
logger.info(`[${requestId}] Using provided blocks with live measurements`)
currentWorkflowData = {
blocks: layoutOptions.blocks,
edges: layoutOptions.edges,
loops: layoutOptions.loops || {},
parallels: layoutOptions.parallels || {},
isFromNormalizedTables: false,
}
} else {
logger.info(`[${requestId}] Loading blocks from database`)
currentWorkflowData = await loadWorkflowFromNormalizedTables(workflowId)
}
if (!currentWorkflowData) {
logger.error(`[${requestId}] Could not load workflow ${workflowId} for autolayout`)

View File

@@ -1,6 +1,7 @@
import { db, workflowDeploymentVersion } from '@sim/db'
import { and, desc, eq } from 'drizzle-orm'
import type { NextRequest, NextResponse } from 'next/server'
import { verifyInternalToken } from '@/lib/auth/internal'
import { createLogger } from '@/lib/logs/console/logger'
import { generateRequestId } from '@/lib/utils'
import { validateWorkflowPermissions } from '@/lib/workflows/utils'
@@ -23,10 +24,22 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
try {
logger.debug(`[${requestId}] Fetching deployed state for workflow: ${id}`)
const { error } = await validateWorkflowPermissions(id, requestId, 'read')
if (error) {
const response = createErrorResponse(error.message, error.status)
return addNoCacheHeaders(response)
const authHeader = request.headers.get('authorization')
let isInternalCall = false
if (authHeader?.startsWith('Bearer ')) {
const token = authHeader.split(' ')[1]
isInternalCall = await verifyInternalToken(token)
}
if (!isInternalCall) {
const { error } = await validateWorkflowPermissions(id, requestId, 'read')
if (error) {
const response = createErrorResponse(error.message, error.status)
return addNoCacheHeaders(response)
}
} else {
logger.debug(`[${requestId}] Internal API call for deployed workflow: ${id}`)
}
const [active] = await db

View File

@@ -76,7 +76,6 @@ export async function POST(
isDeployed: true,
deployedAt: new Date(),
deploymentStatuses: deployedState.deploymentStatuses || {},
hasActiveWebhook: deployedState.hasActiveWebhook || false,
})
if (!saveResult.success) {

View File

@@ -133,7 +133,6 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
state: {
// Default values for expected properties
deploymentStatuses: {},
hasActiveWebhook: false,
// Data from normalized tables
blocks: normalizedData.blocks,
edges: normalizedData.edges,

View File

@@ -89,13 +89,6 @@ const ParallelSchema = z.object({
parallelType: z.enum(['count', 'collection']).optional(),
})
const DeploymentStatusSchema = z.object({
id: z.string(),
status: z.enum(['deploying', 'deployed', 'failed', 'stopping', 'stopped']),
deployedAt: z.date().optional(),
error: z.string().optional(),
})
const WorkflowStateSchema = z.object({
blocks: z.record(BlockStateSchema),
edges: z.array(EdgeSchema),
@@ -103,9 +96,7 @@ const WorkflowStateSchema = z.object({
parallels: z.record(ParallelSchema).optional(),
lastSaved: z.number().optional(),
isDeployed: z.boolean().optional(),
deployedAt: z.date().optional(),
deploymentStatuses: z.record(DeploymentStatusSchema).optional(),
hasActiveWebhook: z.boolean().optional(),
deployedAt: z.coerce.date().optional(),
})
/**
@@ -204,8 +195,6 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
lastSaved: state.lastSaved || Date.now(),
isDeployed: state.isDeployed || false,
deployedAt: state.deployedAt,
deploymentStatuses: state.deploymentStatuses || {},
hasActiveWebhook: state.hasActiveWebhook || false,
}
const saveResult = await saveWorkflowToNormalizedTables(workflowId, workflowState as any)

View File

@@ -89,7 +89,6 @@ export async function GET(request: NextRequest) {
// Use normalized table data - construct state from normalized tables
workflowState = {
deploymentStatuses: {},
hasActiveWebhook: false,
blocks: normalizedData.blocks,
edges: normalizedData.edges,
loops: normalizedData.loops,

View File

@@ -89,7 +89,6 @@ const UPLOAD_CONFIG = {
RETRY_DELAY: 2000, // Initial retry delay in ms (2 seconds)
RETRY_MULTIPLIER: 2, // Standard exponential backoff (2s, 4s, 8s)
CHUNK_SIZE: 5 * 1024 * 1024,
VERCEL_MAX_BODY_SIZE: 4.5 * 1024 * 1024, // Vercel's 4.5MB limit
DIRECT_UPLOAD_THRESHOLD: 4 * 1024 * 1024, // Files > 4MB must use presigned URLs
LARGE_FILE_THRESHOLD: 50 * 1024 * 1024, // Files > 50MB need multipart upload
UPLOAD_TIMEOUT: 60000, // 60 second timeout per upload

View File

@@ -86,7 +86,6 @@ export function DiffControls() {
lastSaved: rawState.lastSaved || Date.now(),
isDeployed: rawState.isDeployed || false,
deploymentStatuses: rawState.deploymentStatuses || {},
hasActiveWebhook: rawState.hasActiveWebhook || false,
// Only include deployedAt if it's a valid date, never include null/undefined
...(rawState.deployedAt && rawState.deployedAt instanceof Date
? { deployedAt: rawState.deployedAt }

View File

@@ -30,6 +30,7 @@ import { Textarea } from '@/components/ui/textarea'
import { cn } from '@/lib/utils'
import { sanitizeForCopilot } from '@/lib/workflows/json-sanitizer'
import { formatEditSequence } from '@/lib/workflows/training/compute-edit-sequence'
import { useCurrentWorkflow } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-current-workflow'
import { useCopilotTrainingStore } from '@/stores/copilot-training/store'
/**
@@ -52,6 +53,8 @@ export function TrainingModal() {
markDatasetSent,
} = useCopilotTrainingStore()
const currentWorkflow = useCurrentWorkflow()
const [localPrompt, setLocalPrompt] = useState(currentPrompt)
const [localTitle, setLocalTitle] = useState(currentTitle)
const [copiedId, setCopiedId] = useState<string | null>(null)
@@ -63,6 +66,11 @@ export function TrainingModal() {
const [sendingSelected, setSendingSelected] = useState(false)
const [sentDatasets, setSentDatasets] = useState<Set<string>>(new Set())
const [failedDatasets, setFailedDatasets] = useState<Set<string>>(new Set())
const [sendingLiveWorkflow, setSendingLiveWorkflow] = useState(false)
const [liveWorkflowSent, setLiveWorkflowSent] = useState(false)
const [liveWorkflowFailed, setLiveWorkflowFailed] = useState(false)
const [liveWorkflowTitle, setLiveWorkflowTitle] = useState('')
const [liveWorkflowDescription, setLiveWorkflowDescription] = useState('')
const handleStart = () => {
if (localTitle.trim() && localPrompt.trim()) {
@@ -285,6 +293,46 @@ export function TrainingModal() {
}
}
const handleSendLiveWorkflow = async () => {
if (!liveWorkflowTitle.trim() || !liveWorkflowDescription.trim()) {
return
}
setLiveWorkflowSent(false)
setLiveWorkflowFailed(false)
setSendingLiveWorkflow(true)
try {
const sanitizedWorkflow = sanitizeForCopilot(currentWorkflow.workflowState)
const response = await fetch('/api/copilot/training/examples', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
json: JSON.stringify(sanitizedWorkflow),
source_path: liveWorkflowTitle,
summary: liveWorkflowDescription,
}),
})
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to send live workflow')
}
setLiveWorkflowSent(true)
setLiveWorkflowTitle('')
setLiveWorkflowDescription('')
setTimeout(() => setLiveWorkflowSent(false), 5000)
} catch (error) {
console.error('Failed to send live workflow:', error)
setLiveWorkflowFailed(true)
setTimeout(() => setLiveWorkflowFailed(false), 5000)
} finally {
setSendingLiveWorkflow(false)
}
}
return (
<Dialog open={showModal} onOpenChange={toggleModal}>
<DialogContent className='max-w-3xl'>
@@ -335,24 +383,24 @@ export function TrainingModal() {
)}
<Tabs defaultValue={isTraining ? 'datasets' : 'new'} className='mt-4'>
<TabsList className='grid w-full grid-cols-2'>
<TabsList className='grid w-full grid-cols-3'>
<TabsTrigger value='new' disabled={isTraining}>
New Session
</TabsTrigger>
<TabsTrigger value='datasets'>Datasets ({datasets.length})</TabsTrigger>
<TabsTrigger value='live'>Send Live State</TabsTrigger>
</TabsList>
{/* New Training Session Tab */}
<TabsContent value='new' className='space-y-4'>
{startSnapshot && (
<div className='rounded-lg border bg-muted/50 p-3'>
<p className='font-medium text-muted-foreground text-sm'>Current Workflow State</p>
<p className='text-sm'>
{Object.keys(startSnapshot.blocks).length} blocks, {startSnapshot.edges.length}{' '}
edges
</p>
</div>
)}
<div className='rounded-lg border bg-muted/50 p-3'>
<p className='mb-2 font-medium text-muted-foreground text-sm'>
Current Workflow State
</p>
<p className='text-sm'>
{currentWorkflow.getBlockCount()} blocks, {currentWorkflow.getEdgeCount()} edges
</p>
</div>
<div className='space-y-2'>
<Label htmlFor='title'>Title</Label>
@@ -628,6 +676,94 @@ export function TrainingModal() {
</>
)}
</TabsContent>
{/* Send Live State Tab */}
<TabsContent value='live' className='space-y-4'>
<div className='rounded-lg border bg-muted/50 p-3'>
<p className='mb-2 font-medium text-muted-foreground text-sm'>
Current Workflow State
</p>
<p className='text-sm'>
{currentWorkflow.getBlockCount()} blocks, {currentWorkflow.getEdgeCount()} edges
</p>
</div>
<div className='space-y-2'>
<Label htmlFor='live-title'>Title</Label>
<Input
id='live-title'
placeholder='e.g., Customer Onboarding Workflow'
value={liveWorkflowTitle}
onChange={(e) => setLiveWorkflowTitle(e.target.value)}
/>
<p className='text-muted-foreground text-xs'>
A short title identifying this workflow
</p>
</div>
<div className='space-y-2'>
<Label htmlFor='live-description'>Description</Label>
<Textarea
id='live-description'
placeholder='Describe what this workflow does...'
value={liveWorkflowDescription}
onChange={(e) => setLiveWorkflowDescription(e.target.value)}
rows={3}
/>
<p className='text-muted-foreground text-xs'>
Explain the purpose and functionality of this workflow
</p>
</div>
<Button
onClick={handleSendLiveWorkflow}
disabled={
!liveWorkflowTitle.trim() ||
!liveWorkflowDescription.trim() ||
sendingLiveWorkflow ||
currentWorkflow.getBlockCount() === 0
}
className='w-full'
>
{sendingLiveWorkflow ? (
<>
<div className='mr-2 h-4 w-4 animate-spin rounded-full border-2 border-current border-t-transparent' />
Sending...
</>
) : liveWorkflowSent ? (
<>
<CheckCircle2 className='mr-2 h-4 w-4' />
Sent Successfully
</>
) : liveWorkflowFailed ? (
<>
<XCircle className='mr-2 h-4 w-4' />
Failed - Try Again
</>
) : (
<>
<Send className='mr-2 h-4 w-4' />
Send Live Workflow State
</>
)}
</Button>
{liveWorkflowSent && (
<div className='rounded-lg border bg-green-50 p-3 dark:bg-green-950/30'>
<p className='text-green-700 text-sm dark:text-green-300'>
Workflow state sent successfully!
</p>
</div>
)}
{liveWorkflowFailed && (
<div className='rounded-lg border bg-red-50 p-3 dark:bg-red-950/30'>
<p className='text-red-700 text-sm dark:text-red-300'>
Failed to send workflow state. Please try again.
</p>
</div>
)}
</TabsContent>
</Tabs>
</DialogContent>
</Dialog>

View File

@@ -10,6 +10,7 @@ import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { createLogger } from '@/lib/logs/console/logger'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import type { SubBlockConfig } from '@/blocks/types'
import { useTagSelection } from '@/hooks/use-tag-selection'
@@ -60,6 +61,7 @@ export function ComboBox({
const [highlightedIndex, setHighlightedIndex] = useState(-1)
const emitTagSelection = useTagSelection(blockId, subBlockId)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
const inputRef = useRef<HTMLInputElement>(null)
const overlayRef = useRef<HTMLDivElement>(null)
@@ -432,7 +434,10 @@ export function ComboBox({
style={{ right: '42px' }}
>
<div className='w-full truncate text-foreground' style={{ scrollbarWidth: 'none' }}>
{formatDisplayText(displayValue)}
{formatDisplayText(displayValue, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
{/* Chevron button */}

View File

@@ -5,6 +5,7 @@ import { Label } from '@/components/ui/label'
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
interface InputFormatField {
@@ -152,6 +153,8 @@ export function InputMapping({
setMapping(updated)
}
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
if (!selectedWorkflowId) {
return (
<div className='flex flex-col items-center justify-center rounded-lg border border-border/50 bg-muted/30 p-8 text-center'>
@@ -213,6 +216,7 @@ export function InputMapping({
blockId={blockId}
subBlockId={subBlockId}
disabled={isPreview || disabled}
accessiblePrefixes={accessiblePrefixes}
/>
)
})}
@@ -229,6 +233,7 @@ function InputMappingField({
blockId,
subBlockId,
disabled,
accessiblePrefixes,
}: {
fieldName: string
fieldType?: string
@@ -237,6 +242,7 @@ function InputMappingField({
blockId: string
subBlockId: string
disabled: boolean
accessiblePrefixes: Set<string> | undefined
}) {
const [showTags, setShowTags] = useState(false)
const [cursorPosition, setCursorPosition] = useState(0)
@@ -318,7 +324,10 @@ function InputMappingField({
className='w-full whitespace-pre'
style={{ scrollbarWidth: 'none', minWidth: 'fit-content' }}
>
{formatDisplayText(value)}
{formatDisplayText(value, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>

View File

@@ -7,6 +7,7 @@ import { formatDisplayText } from '@/components/ui/formatted-text'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import type { SubBlockConfig } from '@/blocks/types'
import { useKnowledgeBaseTagDefinitions } from '@/hooks/use-knowledge-base-tag-definitions'
import { useTagSelection } from '@/hooks/use-tag-selection'
@@ -55,6 +56,9 @@ export function KnowledgeTagFilters({
// Use KB tag definitions hook to get available tags
const { tagDefinitions, isLoading } = useKnowledgeBaseTagDefinitions(knowledgeBaseId)
// Get accessible prefixes for variable highlighting
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
// State for managing tag dropdown
const [activeTagDropdown, setActiveTagDropdown] = useState<{
rowIndex: number
@@ -314,7 +318,12 @@ export function KnowledgeTagFilters({
className='w-full border-0 text-transparent caret-foreground placeholder:text-muted-foreground/50 focus-visible:ring-0 focus-visible:ring-offset-0'
/>
<div className='pointer-events-none absolute inset-0 flex items-center overflow-hidden bg-transparent px-3 text-sm'>
<div className='whitespace-pre'>{formatDisplayText(cellValue)}</div>
<div className='whitespace-pre'>
{formatDisplayText(cellValue, {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
</div>
</td>

View File

@@ -11,6 +11,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import { cn } from '@/lib/utils'
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
import type { SubBlockConfig } from '@/blocks/types'
import { useTagSelection } from '@/hooks/use-tag-selection'
@@ -92,6 +93,7 @@ export function LongInput({
const overlayRef = useRef<HTMLDivElement>(null)
const [activeSourceBlockId, setActiveSourceBlockId] = useState<string | null>(null)
const containerRef = useRef<HTMLDivElement>(null)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
// Use preview value when in preview mode, otherwise use store value or prop value
const baseValue = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
@@ -405,7 +407,10 @@ export function LongInput({
height: `${height}px`,
}}
>
{formatDisplayText(value?.toString() ?? '')}
{formatDisplayText(value?.toString() ?? '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
{/* Wand Button */}

View File

@@ -11,6 +11,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import { cn } from '@/lib/utils'
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
import type { SubBlockConfig } from '@/blocks/types'
import { useTagSelection } from '@/hooks/use-tag-selection'
@@ -345,6 +346,8 @@ export function ShortInput({
}
}
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
return (
<>
<WandPromptBar
@@ -417,7 +420,10 @@ export function ShortInput({
>
{password && !isFocused
? '•'.repeat(value?.toString().length ?? 0)
: formatDisplayText(value?.toString() ?? '')}
: formatDisplayText(value?.toString() ?? '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>

View File

@@ -22,6 +22,7 @@ import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
import { Textarea } from '@/components/ui/textarea'
import { cn } from '@/lib/utils'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
interface Field {
id: string
@@ -80,6 +81,7 @@ export function FieldFormat({
const [cursorPosition, setCursorPosition] = useState(0)
const [activeFieldId, setActiveFieldId] = useState<string | null>(null)
const [activeSourceBlockId, setActiveSourceBlockId] = useState<string | null>(null)
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
// Use preview value when in preview mode, otherwise use store value
const value = isPreview ? previewValue : storeValue
@@ -471,7 +473,10 @@ export function FieldFormat({
style={{ scrollbarWidth: 'none', minWidth: 'fit-content' }}
>
{formatDisplayText(
(localValues[field.id] ?? field.value ?? '')?.toString()
(localValues[field.id] ?? field.value ?? '')?.toString(),
accessiblePrefixes
? { accessiblePrefixes }
: { highlightAll: true }
)}
</div>
</div>

View File

@@ -24,6 +24,7 @@ import {
} from '@/components/ui/select'
import { createLogger } from '@/lib/logs/console/logger'
import type { McpTransport } from '@/lib/mcp/types'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { useMcpServerTest } from '@/hooks/use-mcp-server-test'
import { useMcpServersStore } from '@/stores/mcp-servers/store'
@@ -33,6 +34,7 @@ interface McpServerModalProps {
open: boolean
onOpenChange: (open: boolean) => void
onServerCreated?: () => void
blockId: string
}
interface McpServerFormData {
@@ -42,7 +44,12 @@ interface McpServerFormData {
headers?: Record<string, string>
}
export function McpServerModal({ open, onOpenChange, onServerCreated }: McpServerModalProps) {
export function McpServerModal({
open,
onOpenChange,
onServerCreated,
blockId,
}: McpServerModalProps) {
const params = useParams()
const workspaceId = params.workspaceId as string
const [formData, setFormData] = useState<McpServerFormData>({
@@ -262,6 +269,8 @@ export function McpServerModal({ open, onOpenChange, onServerCreated }: McpServe
workspaceId,
])
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className='sm:max-w-[600px]'>
@@ -337,7 +346,10 @@ export function McpServerModal({ open, onOpenChange, onServerCreated }: McpServe
className='whitespace-nowrap'
style={{ transform: `translateX(-${urlScrollLeft}px)` }}
>
{formatDisplayText(formData.url || '')}
{formatDisplayText(formData.url || '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
</div>
@@ -389,7 +401,10 @@ export function McpServerModal({ open, onOpenChange, onServerCreated }: McpServe
transform: `translateX(-${headerScrollLeft[`key-${index}`] || 0}px)`,
}}
>
{formatDisplayText(key || '')}
{formatDisplayText(key || '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
</div>
@@ -417,7 +432,10 @@ export function McpServerModal({ open, onOpenChange, onServerCreated }: McpServe
transform: `translateX(-${headerScrollLeft[`value-${index}`] || 0}px)`,
}}
>
{formatDisplayText(value || '')}
{formatDisplayText(value || '', {
accessiblePrefixes,
highlightAll: !accessiblePrefixes,
})}
</div>
</div>
</div>

View File

@@ -1977,6 +1977,7 @@ export function ToolInput({
// Refresh MCP tools when a new server is created
refreshTools(true)
}}
blockId={blockId}
/>
</div>
)

View File

@@ -148,6 +148,7 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
)
const storeIsWide = useWorkflowStore((state) => state.blocks[id]?.isWide ?? false)
const storeBlockHeight = useWorkflowStore((state) => state.blocks[id]?.height ?? 0)
const storeBlockLayout = useWorkflowStore((state) => state.blocks[id]?.layout)
const storeBlockAdvancedMode = useWorkflowStore(
(state) => state.blocks[id]?.advancedMode ?? false
)
@@ -168,6 +169,10 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
? (currentWorkflow.blocks[id]?.height ?? 0)
: storeBlockHeight
const blockWidth = currentWorkflow.isDiffMode
? (currentWorkflow.blocks[id]?.layout?.measuredWidth ?? 0)
: (storeBlockLayout?.measuredWidth ?? 0)
// Get per-block webhook status by checking if webhook is configured
const activeWorkflowId = useWorkflowRegistry((state) => state.activeWorkflowId)
@@ -240,7 +245,7 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
}, [id, collaborativeSetSubblockValue])
// Workflow store actions
const updateBlockHeight = useWorkflowStore((state) => state.updateBlockHeight)
const updateBlockLayoutMetrics = useWorkflowStore((state) => state.updateBlockLayoutMetrics)
// Execution store
const isActiveBlock = useExecutionStore((state) => state.activeBlockIds.has(id))
@@ -419,9 +424,9 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
if (!contentRef.current) return
let rafId: number
const debouncedUpdate = debounce((height: number) => {
if (height !== blockHeight) {
updateBlockHeight(id, height)
const debouncedUpdate = debounce((dimensions: { width: number; height: number }) => {
if (dimensions.height !== blockHeight || dimensions.width !== blockWidth) {
updateBlockLayoutMetrics(id, dimensions)
updateNodeInternals(id)
}
}, 100)
@@ -435,9 +440,10 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
// Schedule the update on the next animation frame
rafId = requestAnimationFrame(() => {
for (const entry of entries) {
const height =
entry.borderBoxSize[0]?.blockSize ?? entry.target.getBoundingClientRect().height
debouncedUpdate(height)
const rect = entry.target.getBoundingClientRect()
const height = entry.borderBoxSize[0]?.blockSize ?? rect.height
const width = entry.borderBoxSize[0]?.inlineSize ?? rect.width
debouncedUpdate({ width, height })
}
})
})
@@ -450,7 +456,7 @@ export function WorkflowBlock({ id, data }: NodeProps<WorkflowBlockProps>) {
cancelAnimationFrame(rafId)
}
}
}, [id, blockHeight, updateBlockHeight, updateNodeInternals, lastUpdate])
}, [id, blockHeight, blockWidth, updateBlockLayoutMetrics, updateNodeInternals, lastUpdate])
// SubBlock layout management
function groupSubBlocks(subBlocks: SubBlockConfig[], blockId: string) {

View File

@@ -0,0 +1,64 @@
import { useMemo } from 'react'
import { shallow } from 'zustand/shallow'
import { BlockPathCalculator } from '@/lib/block-path-calculator'
import { SYSTEM_REFERENCE_PREFIXES } from '@/lib/workflows/references'
import { normalizeBlockName } from '@/stores/workflows/utils'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import type { Loop, Parallel } from '@/stores/workflows/workflow/types'
export function useAccessibleReferencePrefixes(blockId?: string | null): Set<string> | undefined {
const { blocks, edges, loops, parallels } = useWorkflowStore(
(state) => ({
blocks: state.blocks,
edges: state.edges,
loops: state.loops || {},
parallels: state.parallels || {},
}),
shallow
)
return useMemo(() => {
if (!blockId) {
return undefined
}
const graphEdges = edges.map((edge) => ({ source: edge.source, target: edge.target }))
const ancestorIds = BlockPathCalculator.findAllPathNodes(graphEdges, blockId)
const accessibleIds = new Set<string>(ancestorIds)
accessibleIds.add(blockId)
const starterBlock = Object.values(blocks).find((block) => block.type === 'starter')
if (starterBlock) {
accessibleIds.add(starterBlock.id)
}
const loopValues = Object.values(loops as Record<string, Loop>)
loopValues.forEach((loop) => {
if (!loop?.nodes) return
if (loop.nodes.includes(blockId)) {
loop.nodes.forEach((nodeId) => accessibleIds.add(nodeId))
}
})
const parallelValues = Object.values(parallels as Record<string, Parallel>)
parallelValues.forEach((parallel) => {
if (!parallel?.nodes) return
if (parallel.nodes.includes(blockId)) {
parallel.nodes.forEach((nodeId) => accessibleIds.add(nodeId))
}
})
const prefixes = new Set<string>()
accessibleIds.forEach((id) => {
prefixes.add(normalizeBlockName(id))
const block = blocks[id]
if (block?.name) {
prefixes.add(normalizeBlockName(block.name))
}
})
SYSTEM_REFERENCE_PREFIXES.forEach((prefix) => prefixes.add(prefix))
return prefixes
}, [blockId, blocks, edges, loops, parallels])
}

View File

@@ -19,7 +19,6 @@ export interface CurrentWorkflow {
deployedAt?: Date
deploymentStatuses?: Record<string, DeploymentStatus>
needsRedeployment?: boolean
hasActiveWebhook?: boolean
// Mode information
isDiffMode: boolean
@@ -66,7 +65,6 @@ export function useCurrentWorkflow(): CurrentWorkflow {
deployedAt: activeWorkflow.deployedAt,
deploymentStatuses: activeWorkflow.deploymentStatuses,
needsRedeployment: activeWorkflow.needsRedeployment,
hasActiveWebhook: activeWorkflow.hasActiveWebhook,
// Mode information - update to reflect ready state
isDiffMode: shouldUseDiff,

View File

@@ -98,18 +98,12 @@ const getBlockDimensions = (
}
}
if (block.type === 'workflowBlock') {
const nodeWidth = block.data?.width || block.width
const nodeHeight = block.data?.height || block.height
if (nodeWidth && nodeHeight) {
return { width: nodeWidth, height: nodeHeight }
}
}
return {
width: block.isWide ? 450 : block.data?.width || block.width || 350,
height: Math.max(block.height || block.data?.height || 150, 100),
width: block.layout?.measuredWidth || (block.isWide ? 450 : block.data?.width || 350),
height: Math.max(
block.layout?.measuredHeight || block.height || block.data?.height || 150,
100
),
}
}

View File

@@ -78,13 +78,19 @@ export async function applyAutoLayoutToWorkflow(
},
}
// Call the autolayout API route which has access to the server-side API key
// Call the autolayout API route, sending blocks with live measurements
const response = await fetch(`/api/workflows/${workflowId}/autolayout`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(layoutOptions),
body: JSON.stringify({
...layoutOptions,
blocks,
edges,
loops,
parallels,
}),
})
if (!response.ok) {
@@ -198,16 +204,19 @@ export async function applyAutoLayoutAndUpdateStore(
useWorkflowStore.getState().updateLastSaved()
// Clean up the workflow state for API validation
// Destructure out UI-only fields that shouldn't be persisted
const { deploymentStatuses, needsRedeployment, dragStartPosition, ...stateToSave } =
newWorkflowState
const cleanedWorkflowState = {
...newWorkflowState,
...stateToSave,
// Convert null dates to undefined (since they're optional)
deployedAt: newWorkflowState.deployedAt ? new Date(newWorkflowState.deployedAt) : undefined,
deployedAt: stateToSave.deployedAt ? new Date(stateToSave.deployedAt) : undefined,
// Ensure other optional fields are properly handled
loops: newWorkflowState.loops || {},
parallels: newWorkflowState.parallels || {},
deploymentStatuses: newWorkflowState.deploymentStatuses || {},
loops: stateToSave.loops || {},
parallels: stateToSave.parallels || {},
// Sanitize edges: remove null/empty handle fields to satisfy schema (optional strings)
edges: (newWorkflowState.edges || []).map((edge: any) => {
edges: (stateToSave.edges || []).map((edge: any) => {
const { sourceHandle, targetHandle, ...rest } = edge || {}
const sanitized: any = { ...rest }
if (typeof sourceHandle === 'string' && sourceHandle.length > 0) {

View File

@@ -781,7 +781,7 @@ const WorkflowContent = React.memo(() => {
// Create the trigger block at the center of the viewport
const centerPosition = project({ x: window.innerWidth / 2, y: window.innerHeight / 2 })
const id = `${triggerId}_${Date.now()}`
const id = crypto.randomUUID()
// Add the trigger block with trigger mode if specified
addBlock(

View File

@@ -19,10 +19,6 @@ import { Dialog, DialogOverlay, DialogPortal, DialogTitle } from '@/components/u
import { Input } from '@/components/ui/input'
import { useBrandConfig } from '@/lib/branding/branding'
import { cn } from '@/lib/utils'
import {
TemplateCard,
TemplateCardSkeleton,
} from '@/app/workspace/[workspaceId]/templates/components/template-card'
import { getKeyboardShortcutText } from '@/app/workspace/[workspaceId]/w/hooks/use-keyboard-shortcuts'
import { getAllBlocks } from '@/blocks'
import { type NavigationSection, useSearchNavigation } from './hooks/use-search-navigation'
@@ -30,28 +26,12 @@ import { type NavigationSection, useSearchNavigation } from './hooks/use-search-
interface SearchModalProps {
open: boolean
onOpenChange: (open: boolean) => void
templates?: TemplateData[]
workflows?: WorkflowItem[]
workspaces?: WorkspaceItem[]
loading?: boolean
knowledgeBases?: KnowledgeBaseItem[]
isOnWorkflowPage?: boolean
}
interface TemplateData {
id: string
title: string
description: string
author: string
usageCount: string
stars: number
icon: string
iconColor: string
state?: {
blocks?: Record<string, { type: string; name?: string }>
}
isStarred?: boolean
}
interface WorkflowItem {
id: string
name: string
@@ -93,6 +73,14 @@ interface PageItem {
shortcut?: string
}
interface KnowledgeBaseItem {
id: string
name: string
description?: string
href: string
isCurrent?: boolean
}
interface DocItem {
id: string
name: string
@@ -104,10 +92,9 @@ interface DocItem {
export function SearchModal({
open,
onOpenChange,
templates = [],
workflows = [],
workspaces = [],
loading = false,
knowledgeBases = [],
isOnWorkflowPage = false,
}: SearchModalProps) {
const [searchQuery, setSearchQuery] = useState('')
@@ -116,14 +103,6 @@ export function SearchModal({
const workspaceId = params.workspaceId as string
const brand = useBrandConfig()
// Local state for templates to handle star changes
const [localTemplates, setLocalTemplates] = useState<TemplateData[]>(templates)
// Update local templates when props change
useEffect(() => {
setLocalTemplates(templates)
}, [templates])
// Get all available blocks - only when on workflow page
const blocks = useMemo(() => {
if (!isOnWorkflowPage) return []
@@ -131,10 +110,7 @@ export function SearchModal({
const allBlocks = getAllBlocks()
const regularBlocks = allBlocks
.filter(
(block) =>
block.type !== 'starter' &&
!block.hideFromToolbar &&
(block.category === 'blocks' || block.category === 'triggers')
(block) => block.type !== 'starter' && !block.hideFromToolbar && block.category === 'blocks'
)
.map(
(block): BlockItem => ({
@@ -171,6 +147,30 @@ export function SearchModal({
return [...regularBlocks, ...specialBlocks].sort((a, b) => a.name.localeCompare(b.name))
}, [isOnWorkflowPage])
// Get all available triggers - only when on workflow page
const triggers = useMemo(() => {
if (!isOnWorkflowPage) return []
const allBlocks = getAllBlocks()
return allBlocks
.filter(
(block) =>
block.type !== 'starter' && !block.hideFromToolbar && block.category === 'triggers'
)
.map(
(block): BlockItem => ({
id: block.type,
name: block.name,
description: block.description || '',
longDescription: block.longDescription,
icon: block.icon,
bgColor: block.bgColor || '#6B7280',
type: block.type,
})
)
.sort((a, b) => a.name.localeCompare(b.name))
}, [isOnWorkflowPage])
// Get all available tools - only when on workflow page
const tools = useMemo(() => {
if (!isOnWorkflowPage) return []
@@ -252,24 +252,18 @@ export function SearchModal({
return blocks.filter((block) => block.name.toLowerCase().includes(query))
}, [blocks, searchQuery])
const filteredTriggers = useMemo(() => {
if (!searchQuery.trim()) return triggers
const query = searchQuery.toLowerCase()
return triggers.filter((trigger) => trigger.name.toLowerCase().includes(query))
}, [triggers, searchQuery])
const filteredTools = useMemo(() => {
if (!searchQuery.trim()) return tools
const query = searchQuery.toLowerCase()
return tools.filter((tool) => tool.name.toLowerCase().includes(query))
}, [tools, searchQuery])
const filteredTemplates = useMemo(() => {
if (!searchQuery.trim()) return localTemplates.slice(0, 8)
const query = searchQuery.toLowerCase()
return localTemplates
.filter(
(template) =>
template.title.toLowerCase().includes(query) ||
template.description.toLowerCase().includes(query)
)
.slice(0, 8)
}, [localTemplates, searchQuery])
const filteredWorkflows = useMemo(() => {
if (!searchQuery.trim()) return workflows
const query = searchQuery.toLowerCase()
@@ -282,6 +276,14 @@ export function SearchModal({
return workspaces.filter((workspace) => workspace.name.toLowerCase().includes(query))
}, [workspaces, searchQuery])
const filteredKnowledgeBases = useMemo(() => {
if (!searchQuery.trim()) return knowledgeBases
const query = searchQuery.toLowerCase()
return knowledgeBases.filter(
(kb) => kb.name.toLowerCase().includes(query) || kb.description?.toLowerCase().includes(query)
)
}, [knowledgeBases, searchQuery])
const filteredPages = useMemo(() => {
if (!searchQuery.trim()) return pages
const query = searchQuery.toLowerCase()
@@ -308,6 +310,16 @@ export function SearchModal({
})
}
if (filteredTriggers.length > 0) {
sections.push({
id: 'triggers',
name: 'Triggers',
type: 'grid',
items: filteredTriggers,
gridCols: filteredTriggers.length, // Single row - all items in one row
})
}
if (filteredTools.length > 0) {
sections.push({
id: 'tools',
@@ -318,20 +330,11 @@ export function SearchModal({
})
}
if (filteredTemplates.length > 0) {
sections.push({
id: 'templates',
name: 'Templates',
type: 'grid',
items: filteredTemplates,
gridCols: filteredTemplates.length, // Single row - all templates in one row
})
}
// Combine all list items into one section
const listItems = [
...filteredWorkspaces.map((item) => ({ type: 'workspace', data: item })),
...filteredWorkflows.map((item) => ({ type: 'workflow', data: item })),
...filteredKnowledgeBases.map((item) => ({ type: 'knowledgebase', data: item })),
...filteredPages.map((item) => ({ type: 'page', data: item })),
...filteredDocs.map((item) => ({ type: 'doc', data: item })),
]
@@ -348,10 +351,11 @@ export function SearchModal({
return sections
}, [
filteredBlocks,
filteredTriggers,
filteredTools,
filteredTemplates,
filteredWorkspaces,
filteredWorkflows,
filteredKnowledgeBases,
filteredPages,
filteredDocs,
])
@@ -463,23 +467,6 @@ export function SearchModal({
return () => window.removeEventListener('keydown', handleKeyDown)
}, [open, handlePageClick, workspaceId])
// Handle template usage callback (closes modal after template is used)
const handleTemplateUsed = useCallback(() => {
onOpenChange(false)
}, [onOpenChange])
// Handle star change callback from template card
const handleStarChange = useCallback(
(templateId: string, isStarred: boolean, newStarCount: number) => {
setLocalTemplates((prevTemplates) =>
prevTemplates.map((template) =>
template.id === templateId ? { ...template, isStarred, stars: newStarCount } : template
)
)
},
[]
)
// Handle item selection based on current item
const handleItemSelection = useCallback(() => {
const current = getCurrentItem()
@@ -487,11 +474,8 @@ export function SearchModal({
const { section, item } = current
if (section.id === 'blocks' || section.id === 'tools') {
if (section.id === 'blocks' || section.id === 'triggers' || section.id === 'tools') {
handleBlockClick(item.type)
} else if (section.id === 'templates') {
// Templates don't have direct selection, but we close the modal
onOpenChange(false)
} else if (section.id === 'list') {
switch (item.type) {
case 'workspace':
@@ -508,6 +492,13 @@ export function SearchModal({
handleNavigationClick(item.data.href)
}
break
case 'knowledgebase':
if (item.data.isCurrent) {
onOpenChange(false)
} else {
handleNavigationClick(item.data.href)
}
break
case 'page':
handlePageClick(item.data.href)
break
@@ -570,15 +561,6 @@ export function SearchModal({
[getCurrentItem]
)
// Render skeleton cards for loading state
const renderSkeletonCards = () => {
return Array.from({ length: 8 }).map((_, index) => (
<div key={`skeleton-${index}`} className='w-80 flex-shrink-0'>
<TemplateCardSkeleton />
</div>
))
}
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogPortal>
@@ -654,6 +636,52 @@ export function SearchModal({
</div>
)}
{/* Triggers Section */}
{filteredTriggers.length > 0 && (
<div>
<h3 className='mb-3 ml-6 font-normal font-sans text-muted-foreground text-sm leading-none tracking-normal'>
Triggers
</h3>
<div
ref={(el) => {
if (el) scrollRefs.current.set('triggers', el)
}}
className='scrollbar-none flex gap-2 overflow-x-auto px-6 pb-1'
style={{ scrollbarWidth: 'none', msOverflowStyle: 'none' }}
>
{filteredTriggers.map((trigger, index) => (
<button
key={trigger.id}
onClick={() => handleBlockClick(trigger.type)}
data-nav-item={`triggers-${index}`}
className={`flex h-auto w-[180px] flex-shrink-0 cursor-pointer flex-col items-start gap-2 rounded-[8px] border p-3 transition-all duration-200 ${
isItemSelected('triggers', index)
? 'border-border bg-secondary/80'
: 'border-border/40 bg-background/60 hover:border-border hover:bg-secondary/80'
}`}
>
<div className='flex items-center gap-2'>
<div
className='flex h-5 w-5 items-center justify-center rounded-[4px]'
style={{ backgroundColor: trigger.bgColor }}
>
<trigger.icon className='!h-3.5 !w-3.5 text-white' />
</div>
<span className='font-medium font-sans text-foreground text-sm leading-none tracking-normal'>
{trigger.name}
</span>
</div>
{(trigger.longDescription || trigger.description) && (
<p className='line-clamp-2 text-left text-muted-foreground text-xs'>
{trigger.longDescription || trigger.description}
</p>
)}
</button>
))}
</div>
</div>
)}
{/* Tools Section */}
{filteredTools.length > 0 && (
<div>
@@ -700,49 +728,6 @@ export function SearchModal({
</div>
)}
{/* Templates Section */}
{(loading || filteredTemplates.length > 0) && (
<div>
<h3 className='mb-3 ml-6 font-normal font-sans text-muted-foreground text-sm leading-none tracking-normal'>
Templates
</h3>
<div
ref={(el) => {
if (el) scrollRefs.current.set('templates', el)
}}
className='scrollbar-none flex gap-4 overflow-x-auto pr-6 pb-1 pl-6'
style={{ scrollbarWidth: 'none', msOverflowStyle: 'none' }}
>
{loading
? renderSkeletonCards()
: filteredTemplates.map((template, index) => (
<div
key={template.id}
data-nav-item={`templates-${index}`}
className={`w-80 flex-shrink-0 rounded-lg transition-all duration-200 ${
isItemSelected('templates', index) ? 'opacity-75' : 'opacity-100'
}`}
>
<TemplateCard
id={template.id}
title={template.title}
description={template.description}
author={template.author}
usageCount={template.usageCount}
stars={template.stars}
icon={template.icon}
iconColor={template.iconColor}
state={template.state}
isStarred={template.isStarred}
onTemplateUsed={handleTemplateUsed}
onStarChange={handleStarChange}
/>
</div>
))}
</div>
</div>
)}
{/* List sections (Workspaces, Workflows, Pages, Docs) */}
{navigationSections.find((s) => s.id === 'list') && (
<div
@@ -826,6 +811,43 @@ export function SearchModal({
</div>
)}
{/* Knowledge Bases */}
{filteredKnowledgeBases.length > 0 && (
<div className='mb-6'>
<h3 className='mb-3 ml-6 font-normal font-sans text-muted-foreground text-sm leading-none tracking-normal'>
Knowledge Bases
</h3>
<div className='space-y-1 px-6'>
{filteredKnowledgeBases.map((kb, kbIndex) => {
const globalIndex =
filteredWorkspaces.length + filteredWorkflows.length + kbIndex
return (
<button
key={kb.id}
onClick={() =>
kb.isCurrent ? onOpenChange(false) : handleNavigationClick(kb.href)
}
data-nav-item={`list-${globalIndex}`}
className={`flex h-10 w-full items-center gap-3 rounded-[8px] px-3 py-2 transition-colors focus:outline-none ${
isItemSelected('list', globalIndex)
? 'bg-accent text-accent-foreground'
: 'hover:bg-accent/60 focus:bg-accent/60'
}`}
>
<div className='flex h-5 w-5 items-center justify-center'>
<LibraryBig className='h-4 w-4 text-muted-foreground' />
</div>
<span className='flex-1 text-left font-normal font-sans text-muted-foreground text-sm leading-none tracking-normal'>
{kb.name}
{kb.isCurrent && ' (current)'}
</span>
</button>
)
})}
</div>
</div>
)}
{/* Pages */}
{filteredPages.length > 0 && (
<div className='mb-6'>
@@ -835,7 +857,10 @@ export function SearchModal({
<div className='space-y-1 px-6'>
{filteredPages.map((page, pageIndex) => {
const globalIndex =
filteredWorkspaces.length + filteredWorkflows.length + pageIndex
filteredWorkspaces.length +
filteredWorkflows.length +
filteredKnowledgeBases.length +
pageIndex
return (
<button
key={page.id}
@@ -872,6 +897,7 @@ export function SearchModal({
const globalIndex =
filteredWorkspaces.length +
filteredWorkflows.length +
filteredKnowledgeBases.length +
filteredPages.length +
docIndex
return (
@@ -902,14 +928,14 @@ export function SearchModal({
{/* Empty state */}
{searchQuery &&
!loading &&
filteredWorkflows.length === 0 &&
filteredWorkspaces.length === 0 &&
filteredKnowledgeBases.length === 0 &&
filteredPages.length === 0 &&
filteredDocs.length === 0 &&
filteredBlocks.length === 0 &&
filteredTools.length === 0 &&
filteredTemplates.length === 0 && (
filteredTriggers.length === 0 &&
filteredTools.length === 0 && (
<div className='ml-6 py-12 text-center'>
<p className='text-muted-foreground'>No results found for "{searchQuery}"</p>
</div>

View File

@@ -18,6 +18,8 @@ import { getEnv, isTruthy } from '@/lib/env'
import { isHosted } from '@/lib/environment'
import { cn } from '@/lib/utils'
import { useOrganizationStore } from '@/stores/organization'
import { useGeneralStore } from '@/stores/settings/general/store'
import { useSubscriptionStore } from '@/stores/subscription/store'
const isBillingEnabled = isTruthy(getEnv('NEXT_PUBLIC_BILLING_ENABLED'))
@@ -200,6 +202,21 @@ export function SettingsNavigation({
{navigationItems.map((item) => (
<div key={item.id} className='mb-1'>
<button
onMouseEnter={() => {
switch (item.id) {
case 'general':
useGeneralStore.getState().loadSettings()
break
case 'subscription':
useSubscriptionStore.getState().loadData()
break
case 'team':
useOrganizationStore.getState().loadData()
break
default:
break
}
}}
onClick={() => onSectionChange(item.id)}
className={cn(
'group flex h-9 w-full cursor-pointer items-center rounded-[8px] px-2 py-2 font-medium font-sans text-sm transition-colors',

View File

@@ -21,6 +21,7 @@ import {
getVisiblePlans,
} from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/subscription/subscription-permissions'
import { useOrganizationStore } from '@/stores/organization'
import { useGeneralStore } from '@/stores/settings/general/store'
import { useSubscriptionStore } from '@/stores/subscription/store'
const CONSTANTS = {
@@ -531,32 +532,14 @@ export function Subscription({ onOpenChange }: SubscriptionProps) {
}
function BillingUsageNotificationsToggle() {
const [enabled, setEnabled] = useState<boolean | null>(null)
const isLoading = useGeneralStore((s) => s.isBillingUsageNotificationsLoading)
const enabled = useGeneralStore((s) => s.isBillingUsageNotificationsEnabled)
const setEnabled = useGeneralStore((s) => s.setBillingUsageNotificationsEnabled)
const loadSettings = useGeneralStore((s) => s.loadSettings)
useEffect(() => {
let isMounted = true
const load = async () => {
const res = await fetch('/api/users/me/settings')
const json = await res.json()
const current = json?.data?.billingUsageNotificationsEnabled
if (isMounted) setEnabled(current !== false)
}
load()
return () => {
isMounted = false
}
}, [])
const update = async (next: boolean) => {
setEnabled(next)
await fetch('/api/users/me/settings', {
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ billingUsageNotificationsEnabled: next }),
})
}
if (enabled === null) return null
void loadSettings()
}, [loadSettings])
return (
<div className='mt-4 flex items-center justify-between'>
@@ -564,7 +547,13 @@ function BillingUsageNotificationsToggle() {
<span className='font-medium text-sm'>Usage notifications</span>
<span className='text-muted-foreground text-xs'>Email me when I reach 80% usage</span>
</div>
<Switch checked={enabled} onCheckedChange={(v: boolean) => update(v)} />
<Switch
checked={!!enabled}
disabled={isLoading}
onCheckedChange={(v: boolean) => {
void setEnabled(v)
}}
/>
</div>
)
}

View File

@@ -32,6 +32,7 @@ import {
getKeyboardShortcutText,
useGlobalShortcuts,
} from '@/app/workspace/[workspaceId]/w/hooks/use-keyboard-shortcuts'
import { useKnowledgeBasesList } from '@/hooks/use-knowledge'
import { useSubscriptionStore } from '@/stores/subscription/store'
import { useWorkflowDiffStore } from '@/stores/workflow-diff/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
@@ -115,6 +116,9 @@ export function Sidebar() {
const [templates, setTemplates] = useState<TemplateData[]>([])
const [isTemplatesLoading, setIsTemplatesLoading] = useState(false)
// Knowledge bases for search modal
const { knowledgeBases } = useKnowledgeBasesList(workspaceId)
// Refs
const workflowScrollAreaRef = useRef<HTMLDivElement | null>(null)
const workspaceIdRef = useRef<string>(workspaceId)
@@ -726,6 +730,17 @@ export function Sidebar() {
}))
}, [workspaces, workspaceId])
// Prepare knowledge bases for search modal
const searchKnowledgeBases = useMemo(() => {
return knowledgeBases.map((kb) => ({
id: kb.id,
name: kb.name,
description: kb.description,
href: `/workspace/${workspaceId}/knowledge/${kb.id}`,
isCurrent: knowledgeBaseId === kb.id,
}))
}, [knowledgeBases, workspaceId, knowledgeBaseId])
// Create workflow handler
const handleCreateWorkflow = async (folderId?: string): Promise<string> => {
if (isCreatingWorkflow) {
@@ -1035,10 +1050,9 @@ export function Sidebar() {
<SearchModal
open={showSearchModal}
onOpenChange={setShowSearchModal}
templates={templates}
workflows={searchWorkflows}
workspaces={searchWorkspaces}
loading={isTemplatesLoading}
knowledgeBases={searchKnowledgeBases}
isOnWorkflowPage={isOnWorkflowPage}
/>
</>

View File

@@ -3,6 +3,7 @@ import type { BlockConfig } from '@/blocks/types'
export const ApiTriggerBlock: BlockConfig = {
type: 'api_trigger',
triggerAllowed: true,
name: 'API',
description: 'Expose as HTTP API endpoint',
longDescription:

View File

@@ -7,6 +7,7 @@ const ChatTriggerIcon = (props: SVGProps<SVGSVGElement>) => createElement(Messag
export const ChatTriggerBlock: BlockConfig = {
type: 'chat_trigger',
triggerAllowed: true,
name: 'Chat',
description: 'Start workflow from a chat deployment',
longDescription: 'Chat trigger to run the workflow via deployed chat interfaces.',

View File

@@ -7,6 +7,7 @@ const InputTriggerIcon = (props: SVGProps<SVGSVGElement>) => createElement(FormI
export const InputTriggerBlock: BlockConfig = {
type: 'input_trigger',
triggerAllowed: true,
name: 'Input Form',
description: 'Start workflow manually with a defined input schema',
longDescription:

View File

@@ -7,6 +7,7 @@ const ManualTriggerIcon = (props: SVGProps<SVGSVGElement>) => createElement(Play
export const ManualTriggerBlock: BlockConfig = {
type: 'manual_trigger',
triggerAllowed: true,
name: 'Manual',
description: 'Start workflow manually from the editor',
longDescription:

View File

@@ -7,6 +7,7 @@ const ScheduleIcon = (props: SVGProps<SVGSVGElement>) => createElement(Clock, pr
export const ScheduleBlock: BlockConfig = {
type: 'schedule',
triggerAllowed: true,
name: 'Schedule',
description: 'Trigger workflow execution on a schedule',
longDescription:

View File

@@ -1,28 +1,50 @@
'use client'
import type { ReactNode } from 'react'
import { normalizeBlockName } from '@/stores/workflows/utils'
export interface HighlightContext {
accessiblePrefixes?: Set<string>
highlightAll?: boolean
}
const SYSTEM_PREFIXES = new Set(['start', 'loop', 'parallel', 'variable'])
/**
* Formats text by highlighting block references (<...>) and environment variables ({{...}})
* Used in code editor, long inputs, and short inputs for consistent syntax highlighting
*
* @param text The text to format
*/
export function formatDisplayText(text: string): ReactNode[] {
export function formatDisplayText(text: string, context?: HighlightContext): ReactNode[] {
if (!text) return []
const shouldHighlightPart = (part: string): boolean => {
if (!part.startsWith('<') || !part.endsWith('>')) {
return false
}
if (context?.highlightAll) {
return true
}
const inner = part.slice(1, -1)
const [prefix] = inner.split('.')
const normalizedPrefix = normalizeBlockName(prefix)
if (SYSTEM_PREFIXES.has(normalizedPrefix)) {
return true
}
if (context?.accessiblePrefixes?.has(normalizedPrefix)) {
return true
}
return false
}
const parts = text.split(/(<[^>]+>|\{\{[^}]+\}\})/g)
return parts.map((part, index) => {
if (part.startsWith('<') && part.endsWith('>')) {
return (
<span key={index} className='text-blue-500'>
{part}
</span>
)
}
if (part.match(/^\{\{[^}]+\}\}$/)) {
if (shouldHighlightPart(part) || part.match(/^\{\{[^}]+\}\}$/)) {
return (
<span key={index} className='text-blue-500'>
{part}

View File

@@ -1,13 +1,12 @@
import type React from 'react'
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { ChevronRight } from 'lucide-react'
import { BlockPathCalculator } from '@/lib/block-path-calculator'
import { shallow } from 'zustand/shallow'
import { extractFieldsFromSchema, parseResponseFormatSafely } from '@/lib/response-format'
import { cn } from '@/lib/utils'
import { getBlockOutputPaths, getBlockOutputType } from '@/lib/workflows/block-outputs'
import { useAccessibleReferencePrefixes } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-accessible-reference-prefixes'
import { getBlock } from '@/blocks'
import type { BlockConfig } from '@/blocks/types'
import { Serializer } from '@/serializer'
import { useVariablesStore } from '@/stores/panel/variables/store'
import type { Variable } from '@/stores/panel/variables/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
@@ -25,6 +24,15 @@ interface BlockTagGroup {
distance: number
}
interface NestedBlockTagGroup extends BlockTagGroup {
nestedTags: Array<{
key: string
display: string
fullTag?: string
children?: Array<{ key: string; display: string; fullTag: string }>
}>
}
interface TagDropdownProps {
visible: boolean
onSelect: (newValue: string) => void
@@ -70,6 +78,18 @@ const normalizeVariableName = (variableName: string): string => {
return variableName.replace(/\s+/g, '')
}
const ensureRootTag = (tags: string[], rootTag: string): string[] => {
if (!rootTag) {
return tags
}
if (tags.includes(rootTag)) {
return tags
}
return [rootTag, ...tags]
}
const getSubBlockValue = (blockId: string, property: string): any => {
return useSubBlockStore.getState().getValue(blockId, property)
}
@@ -300,12 +320,27 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
const [parentHovered, setParentHovered] = useState<string | null>(null)
const [submenuHovered, setSubmenuHovered] = useState(false)
const blocks = useWorkflowStore((state) => state.blocks)
const loops = useWorkflowStore((state) => state.loops)
const parallels = useWorkflowStore((state) => state.parallels)
const edges = useWorkflowStore((state) => state.edges)
const { blocks, edges, loops, parallels } = useWorkflowStore(
(state) => ({
blocks: state.blocks,
edges: state.edges,
loops: state.loops || {},
parallels: state.parallels || {},
}),
shallow
)
const workflowId = useWorkflowRegistry((state) => state.activeWorkflowId)
const rawAccessiblePrefixes = useAccessibleReferencePrefixes(blockId)
const combinedAccessiblePrefixes = useMemo(() => {
if (!rawAccessiblePrefixes) return new Set<string>()
const normalized = new Set<string>(rawAccessiblePrefixes)
normalized.add(normalizeBlockName(blockId))
return normalized
}, [rawAccessiblePrefixes, blockId])
// Subscribe to live subblock values for the active workflow to react to input format changes
const workflowSubBlockValues = useSubBlockStore((state) =>
workflowId ? (state.workflowValues[workflowId] ?? {}) : {}
@@ -325,7 +360,6 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
)
const getVariablesByWorkflowId = useVariablesStore((state) => state.getVariablesByWorkflowId)
const variables = useVariablesStore((state) => state.variables)
const workflowVariables = workflowId ? getVariablesByWorkflowId(workflowId) : []
const searchTerm = useMemo(() => {
@@ -336,8 +370,12 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
const {
tags,
variableInfoMap = {},
blockTagGroups = [],
variableInfoMap,
blockTagGroups: computedBlockTagGroups,
}: {
tags: string[]
variableInfoMap: Record<string, { type: string; id: string }>
blockTagGroups: BlockTagGroup[]
} = useMemo(() => {
if (activeSourceBlockId) {
const sourceBlock = blocks[activeSourceBlockId]
@@ -481,6 +519,12 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}
}
blockTags = ensureRootTag(blockTags, normalizedBlockName)
const shouldShowRootTag = sourceBlock.type === 'generic_webhook'
if (!shouldShowRootTag) {
blockTags = blockTags.filter((tag) => tag !== normalizedBlockName)
}
const blockTagGroups: BlockTagGroup[] = [
{
blockName,
@@ -507,18 +551,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}
}
const serializer = new Serializer()
const serializedWorkflow = serializer.serializeWorkflow(blocks, edges, loops, parallels)
const accessibleBlockIds = BlockPathCalculator.findAllPathNodes(
serializedWorkflow.connections,
blockId
)
const starterBlock = Object.values(blocks).find((block) => block.type === 'starter')
if (starterBlock && !accessibleBlockIds.includes(starterBlock.id)) {
accessibleBlockIds.push(starterBlock.id)
}
const blockDistances: Record<string, number> = {}
if (starterBlock) {
@@ -623,6 +656,10 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
const blockTagGroups: BlockTagGroup[] = []
const allBlockTags: string[] = []
// Use the combinedAccessiblePrefixes to iterate through accessible blocks
const accessibleBlockIds = combinedAccessiblePrefixes
? Array.from(combinedAccessiblePrefixes)
: []
for (const accessibleBlockId of accessibleBlockIds) {
const accessibleBlock = blocks[accessibleBlockId]
if (!accessibleBlock) continue
@@ -648,7 +685,8 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
const normalizedBlockName = normalizeBlockName(blockName)
const outputPaths = generateOutputPaths(mockConfig.outputs)
const blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
let blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
blockTags = ensureRootTag(blockTags, normalizedBlockName)
blockTagGroups.push({
blockName,
@@ -750,6 +788,12 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}
}
blockTags = ensureRootTag(blockTags, normalizedBlockName)
const shouldShowRootTag = accessibleBlock.type === 'generic_webhook'
if (!shouldShowRootTag) {
blockTags = blockTags.filter((tag) => tag !== normalizedBlockName)
}
blockTagGroups.push({
blockName,
blockId: accessibleBlockId,
@@ -781,51 +825,54 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}
return {
tags: [...variableTags, ...contextualTags, ...allBlockTags],
tags: [...allBlockTags, ...variableTags, ...contextualTags],
variableInfoMap,
blockTagGroups: finalBlockTagGroups,
}
}, [
activeSourceBlockId,
combinedAccessiblePrefixes,
blockId,
blocks,
edges,
getMergedSubBlocks,
loops,
parallels,
blockId,
activeSourceBlockId,
workflowVariables,
workflowSubBlockValues,
getMergedSubBlocks,
workflowId,
])
const filteredTags = useMemo(() => {
if (!searchTerm) return tags
return tags.filter((tag: string) => tag.toLowerCase().includes(searchTerm))
return tags.filter((tag) => tag.toLowerCase().includes(searchTerm))
}, [tags, searchTerm])
const { variableTags, filteredBlockTagGroups } = useMemo(() => {
const varTags: string[] = []
filteredTags.forEach((tag) => {
filteredTags.forEach((tag: string) => {
if (tag.startsWith(TAG_PREFIXES.VARIABLE)) {
varTags.push(tag)
}
})
const filteredBlockTagGroups = blockTagGroups
.map((group) => ({
const filteredBlockTagGroups = computedBlockTagGroups
.map((group: BlockTagGroup) => ({
...group,
tags: group.tags.filter((tag) => !searchTerm || tag.toLowerCase().includes(searchTerm)),
tags: group.tags.filter(
(tag: string) => !searchTerm || tag.toLowerCase().includes(searchTerm)
),
}))
.filter((group) => group.tags.length > 0)
.filter((group: BlockTagGroup) => group.tags.length > 0)
return {
variableTags: varTags,
filteredBlockTagGroups,
}
}, [filteredTags, blockTagGroups, searchTerm])
}, [filteredTags, computedBlockTagGroups, searchTerm])
const nestedBlockTagGroups = useMemo(() => {
return filteredBlockTagGroups.map((group) => {
const nestedBlockTagGroups: NestedBlockTagGroup[] = useMemo(() => {
return filteredBlockTagGroups.map((group: BlockTagGroup) => {
const nestedTags: Array<{
key: string
display: string
@@ -839,7 +886,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
> = {}
const directTags: Array<{ key: string; display: string; fullTag: string }> = []
group.tags.forEach((tag) => {
group.tags.forEach((tag: string) => {
const tagParts = tag.split('.')
if (tagParts.length >= 3) {
const parent = tagParts[1]
@@ -899,8 +946,8 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
visualTags.push(...variableTags)
nestedBlockTagGroups.forEach((group) => {
group.nestedTags.forEach((nestedTag) => {
nestedBlockTagGroups.forEach((group: NestedBlockTagGroup) => {
group.nestedTags.forEach((nestedTag: any) => {
if (nestedTag.children && nestedTag.children.length > 0) {
const firstChild = nestedTag.children[0]
if (firstChild.fullTag) {
@@ -952,8 +999,8 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (tag.startsWith(TAG_PREFIXES.VARIABLE)) {
const variableName = tag.substring(TAG_PREFIXES.VARIABLE.length)
const variableObj = Object.values(variables).find(
(v) => v.name.replace(/\s+/g, '') === variableName
const variableObj = workflowVariables.find(
(v: Variable) => v.name.replace(/\s+/g, '') === variableName
)
if (variableObj) {
@@ -985,7 +1032,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
onSelect(newValue)
onClose?.()
},
[inputValue, cursorPosition, variables, onSelect, onClose]
[inputValue, cursorPosition, workflowVariables, onSelect, onClose]
)
useEffect(() => setSelectedIndex(0), [searchTerm])
@@ -1030,7 +1077,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (selectedIndex < 0 || selectedIndex >= orderedTags.length) return null
const selectedTag = orderedTags[selectedIndex]
for (let gi = 0; gi < nestedBlockTagGroups.length; gi++) {
const group = nestedBlockTagGroups[gi]
const group = nestedBlockTagGroups[gi]!
for (let ni = 0; ni < group.nestedTags.length; ni++) {
const nestedTag = group.nestedTags[ni]
if (nestedTag.children && nestedTag.children.length > 0) {
@@ -1051,16 +1098,16 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
return
}
const currentGroup = nestedBlockTagGroups.find((group) => {
const currentGroup = nestedBlockTagGroups.find((group: NestedBlockTagGroup) => {
return group.nestedTags.some(
(tag, index) =>
(tag: any, index: number) =>
`${group.blockId}-${tag.key}` === currentHovered.tag &&
index === currentHovered.index
)
})
const currentNestedTag = currentGroup?.nestedTags.find(
(tag, index) =>
(tag: any, index: number) =>
`${currentGroup.blockId}-${tag.key}` === currentHovered.tag &&
index === currentHovered.index
)
@@ -1089,8 +1136,8 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
e.preventDefault()
e.stopPropagation()
if (submenuIndex >= 0 && submenuIndex < children.length) {
const selectedChild = children[submenuIndex]
handleTagSelect(selectedChild.fullTag, currentGroup)
const selectedChild = children[submenuIndex] as any
handleTagSelect(selectedChild.fullTag, currentGroup as BlockTagGroup | undefined)
}
break
case 'Escape':
@@ -1324,7 +1371,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
{nestedBlockTagGroups.length > 0 && (
<>
{variableTags.length > 0 && <div className='my-0' />}
{nestedBlockTagGroups.map((group) => {
{nestedBlockTagGroups.map((group: NestedBlockTagGroup) => {
const blockConfig = getBlock(group.blockType)
let blockColor = blockConfig?.bgColor || BLOCK_COLORS.DEFAULT
@@ -1340,7 +1387,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
{group.blockName}
</div>
<div>
{group.nestedTags.map((nestedTag, index) => {
{group.nestedTags.map((nestedTag: any, index: number) => {
const tagIndex = nestedTag.fullTag
? (tagIndexMap.get(nestedTag.fullTag) ?? -1)
: -1
@@ -1505,7 +1552,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
}}
>
<div className='py-1'>
{nestedTag.children!.map((child, childIndex) => {
{nestedTag.children!.map((child: any, childIndex: number) => {
const isKeyboardSelected =
inSubmenu && submenuIndex === childIndex
const isSelected = isKeyboardSelected

View File

@@ -382,7 +382,6 @@ export function SocketProvider({ children, user }: SocketProviderProps) {
isDeployed: workflowState.isDeployed ?? false,
deployedAt: workflowState.deployedAt,
deploymentStatuses: workflowState.deploymentStatuses || {},
hasActiveWebhook: workflowState.hasActiveWebhook ?? false,
})
// Replace subblock store values for this workflow

View File

@@ -1,6 +1,6 @@
import { getEnv } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { createMcpToolId } from '@/lib/mcp/utils'
import { getBaseUrl } from '@/lib/urls/utils'
import { getAllBlocks } from '@/blocks'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
@@ -261,8 +261,7 @@ export class AgentBlockHandler implements BlockHandler {
}
}
const appUrl = getEnv('NEXT_PUBLIC_APP_URL')
const url = new URL(`${appUrl}/api/mcp/tools/discover`)
const url = new URL('/api/mcp/tools/discover', getBaseUrl())
url.searchParams.set('serverId', serverId)
if (context.workspaceId) {
url.searchParams.set('workspaceId', context.workspaceId)
@@ -316,7 +315,7 @@ export class AgentBlockHandler implements BlockHandler {
}
}
const execResponse = await fetch(`${appUrl}/api/mcp/tools/execute`, {
const execResponse = await fetch(`${getBaseUrl()}/api/mcp/tools/execute`, {
method: 'POST',
headers,
body: JSON.stringify({
@@ -640,7 +639,7 @@ export class AgentBlockHandler implements BlockHandler {
) {
logger.info('Using HTTP provider request (browser environment)')
const url = new URL('/api/providers', getEnv('NEXT_PUBLIC_APP_URL') || '')
const url = new URL('/api/providers', getBaseUrl())
const response = await fetch(url.toString(), {
method: 'POST',
headers: { 'Content-Type': 'application/json' },

View File

@@ -1,5 +1,5 @@
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { getBaseUrl } from '@/lib/urls/utils'
import { generateRouterPrompt } from '@/blocks/blocks/router'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
@@ -40,8 +40,7 @@ export class RouterBlockHandler implements BlockHandler {
const providerId = getProviderFromModel(routerConfig.model)
try {
const baseUrl = env.NEXT_PUBLIC_APP_URL || ''
const url = new URL('/api/providers', baseUrl)
const url = new URL('/api/providers', getBaseUrl())
// Create the provider request with proper message formatting
const messages = [{ role: 'user', content: routerConfig.prompt }]

View File

@@ -1356,7 +1356,7 @@ describe('InputResolver', () => {
expect(result.code).toBe('return "Agent response"')
})
it('should reject references to unconnected blocks', () => {
it('should leave references to unconnected blocks as strings', () => {
// Create a new block that is added to the workflow but not connected to isolated-block
workflowWithConnections.blocks.push({
id: 'test-block',
@@ -1402,9 +1402,9 @@ describe('InputResolver', () => {
enabled: true,
}
expect(() => connectionResolver.resolveInputs(testBlock, contextWithConnections)).toThrow(
/Block "isolated-block" is not connected to this block/
)
// Should not throw - inaccessible references remain as strings
const result = connectionResolver.resolveInputs(testBlock, contextWithConnections)
expect(result.code).toBe('return <isolated-block.content>') // Reference remains as-is
})
it('should always allow references to starter block', () => {
@@ -1546,7 +1546,7 @@ describe('InputResolver', () => {
expect(otherResult).toBe('content: Hello World')
})
it('should provide helpful error messages for unconnected blocks', () => {
it('should not throw for unconnected blocks and leave references as strings', () => {
// Create a test block in the workflow first
workflowWithConnections.blocks.push({
id: 'test-block-2',
@@ -1592,9 +1592,9 @@ describe('InputResolver', () => {
enabled: true,
}
expect(() => connectionResolver.resolveInputs(testBlock, contextWithConnections)).toThrow(
/Available connected blocks:.*Agent Block.*Start/
)
// Should not throw - references to nonexistent blocks remain as strings
const result = connectionResolver.resolveInputs(testBlock, contextWithConnections)
expect(result.code).toBe('return <nonexistent.value>') // Reference remains as-is
})
it('should work with block names and normalized names', () => {
@@ -1725,7 +1725,7 @@ describe('InputResolver', () => {
extendedResolver.resolveInputs(block1, extendedContext)
}).not.toThrow()
// Should fail for indirect connection
// Should not fail for indirect connection - reference remains as string
expect(() => {
// Add the response block to the workflow so it can be validated properly
extendedWorkflow.blocks.push({
@@ -1748,8 +1748,9 @@ describe('InputResolver', () => {
outputs: {},
enabled: true,
}
extendedResolver.resolveInputs(block2, extendedContext)
}).toThrow(/Block "agent-1" is not connected to this block/)
const result = extendedResolver.resolveInputs(block2, extendedContext)
expect(result.test).toBe('<agent-1.content>') // Reference remains as-is since agent-1 is not accessible
}).not.toThrow()
})
it('should handle blocks in same loop referencing each other', () => {

View File

@@ -1,11 +1,13 @@
import { BlockPathCalculator } from '@/lib/block-path-calculator'
import { createLogger } from '@/lib/logs/console/logger'
import { VariableManager } from '@/lib/variables/variable-manager'
import { extractReferencePrefixes, SYSTEM_REFERENCE_PREFIXES } from '@/lib/workflows/references'
import { TRIGGER_REFERENCE_ALIAS_MAP } from '@/lib/workflows/triggers'
import { getBlock } from '@/blocks/index'
import type { LoopManager } from '@/executor/loops/loops'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
import { normalizeBlockName } from '@/stores/workflows/utils'
const logger = createLogger('InputResolver')
@@ -461,64 +463,40 @@ export class InputResolver {
return value
}
const blockMatches = value.match(/<([^>]+)>/g)
if (!blockMatches) return value
const blockMatches = extractReferencePrefixes(value)
if (blockMatches.length === 0) return value
// Filter out patterns that are clearly not variable references (e.g., comparison operators)
const validBlockMatches = blockMatches.filter((match) => this.isValidVariableReference(match))
// If no valid matches found after filtering, return original value
if (validBlockMatches.length === 0) {
return value
}
// If we're in an API block body, check each valid match to see if it looks like XML rather than a reference
if (
currentBlock.metadata?.id === 'api' &&
validBlockMatches.some((match) => {
const innerContent = match.slice(1, -1)
// Patterns that suggest this is XML, not a block reference:
return (
innerContent.includes(':') || // namespaces like soap:Envelope
innerContent.includes('=') || // attributes like xmlns="http://..."
innerContent.includes(' ') || // any space indicates attributes
innerContent.includes('/') || // self-closing tags
!innerContent.includes('.')
) // block refs always have dots
})
) {
return value // Likely XML content, return unchanged
}
const accessiblePrefixes = this.getAccessiblePrefixes(currentBlock)
let resolvedValue = value
// Check if we're in a template literal for function blocks
const isInTemplateLiteral =
currentBlock.metadata?.id === 'function' &&
value.includes('${') &&
value.includes('}') &&
value.includes('`')
for (const match of validBlockMatches) {
// Skip variables - they've already been processed
if (match.startsWith('<variable.')) {
for (const match of blockMatches) {
const { raw, prefix } = match
if (!accessiblePrefixes.has(prefix)) {
continue
}
const path = match.slice(1, -1)
const [blockRef, ...pathParts] = path.split('.')
if (raw.startsWith('<variable.')) {
continue
}
const path = raw.slice(1, -1)
const [blockRefToken, ...pathParts] = path.split('.')
const blockRef = blockRefToken.trim()
// Skip XML-like tags (but allow block names with spaces)
if (blockRef.includes(':')) {
continue
}
// System references (start, loop, parallel, variable) are handled as special cases
const isSystemReference = ['start', 'loop', 'parallel', 'variable'].includes(
blockRef.toLowerCase()
)
// Check if we're in a template literal context
const isInTemplateLiteral =
currentBlock.metadata?.id === 'function' &&
value.includes('${') &&
value.includes('}') &&
value.includes('`')
// System references and regular block references are both processed
// System references (start, loop, parallel, variable) and regular block references are both processed
// Accessibility validation happens later in validateBlockReference
// Special case for trigger block references (start, api, chat, manual)
@@ -657,7 +635,7 @@ export class InputResolver {
}
}
resolvedValue = resolvedValue.replace(match, formattedValue)
resolvedValue = resolvedValue.replace(raw, formattedValue)
continue
}
}
@@ -678,7 +656,7 @@ export class InputResolver {
)
if (formattedValue !== null) {
resolvedValue = resolvedValue.replace(match, formattedValue)
resolvedValue = resolvedValue.replace(raw, formattedValue)
continue
}
}
@@ -699,7 +677,7 @@ export class InputResolver {
)
if (formattedValue !== null) {
resolvedValue = resolvedValue.replace(match, formattedValue)
resolvedValue = resolvedValue.replace(raw, formattedValue)
continue
}
}
@@ -723,7 +701,7 @@ export class InputResolver {
const isInActivePath = context.activeExecutionPath.has(sourceBlock.id)
if (!isInActivePath) {
resolvedValue = resolvedValue.replace(match, '')
resolvedValue = resolvedValue.replace(raw, '')
continue
}
@@ -753,14 +731,14 @@ export class InputResolver {
const isInLoop = this.loopsByBlockId.has(sourceBlock.id)
if (isInLoop) {
resolvedValue = resolvedValue.replace(match, '')
resolvedValue = resolvedValue.replace(raw, '')
continue
}
// If the block hasn't been executed and isn't in the active path,
// it means it's in an inactive branch - return empty string
if (!context.activeExecutionPath.has(sourceBlock.id)) {
resolvedValue = resolvedValue.replace(match, '')
resolvedValue = resolvedValue.replace(raw, '')
continue
}
@@ -861,7 +839,7 @@ export class InputResolver {
: String(replacementValue)
}
resolvedValue = resolvedValue.replace(match, formattedValue)
resolvedValue = resolvedValue.replace(raw, formattedValue)
}
return resolvedValue
@@ -1362,6 +1340,18 @@ export class InputResolver {
if (!sourceBlock) {
const normalizedRef = this.normalizeBlockName(blockRef)
sourceBlock = this.blockByNormalizedName.get(normalizedRef)
if (!sourceBlock) {
for (const candidate of this.workflow.blocks) {
const candidateName = candidate.metadata?.name
if (!candidateName) continue
const normalizedName = this.normalizeBlockName(candidateName)
if (normalizedName === normalizedRef) {
sourceBlock = candidate
break
}
}
}
}
if (!sourceBlock) {
@@ -2035,4 +2025,21 @@ export class InputResolver {
getContainingParallelId(blockId: string): string | undefined {
return this.parallelsByBlockId.get(blockId)
}
private getAccessiblePrefixes(block: SerializedBlock): Set<string> {
const prefixes = new Set<string>()
const accessibleBlocks = this.getAccessibleBlocks(block.id)
accessibleBlocks.forEach((blockId) => {
prefixes.add(normalizeBlockName(blockId))
const sourceBlock = this.blockById.get(blockId)
if (sourceBlock?.metadata?.name) {
prefixes.add(normalizeBlockName(sourceBlock.metadata.name))
}
})
SYSTEM_REFERENCE_PREFIXES.forEach((prefix) => prefixes.add(prefix))
return prefixes
}
}

View File

@@ -479,7 +479,6 @@ export function useCollaborativeWorkflow() {
isDeployed: workflowData.state.isDeployed || false,
deployedAt: workflowData.state.deployedAt,
lastSaved: workflowData.state.lastSaved || Date.now(),
hasActiveWebhook: workflowData.state.hasActiveWebhook || false,
deploymentStatuses: workflowData.state.deploymentStatuses || {},
})

View File

@@ -5,7 +5,34 @@
* It respects the user's telemetry preferences stored in localStorage.
*
*/
import { env } from './lib/env'
import posthog from 'posthog-js'
import { env, getEnv, isTruthy } from './lib/env'
// Initialize PostHog only if explicitly enabled
if (isTruthy(getEnv('NEXT_PUBLIC_POSTHOG_ENABLED')) && getEnv('NEXT_PUBLIC_POSTHOG_KEY')) {
posthog.init(getEnv('NEXT_PUBLIC_POSTHOG_KEY')!, {
api_host: '/ingest',
ui_host: 'https://us.posthog.com',
person_profiles: 'identified_only',
capture_pageview: true,
capture_pageleave: true,
capture_performance: true,
session_recording: {
maskAllInputs: false,
maskInputOptions: {
password: true,
email: false,
},
recordCrossOriginIframes: false,
recordHeaders: true,
recordBody: true,
},
autocapture: true,
capture_dead_clicks: true,
persistence: 'localStorage+cookie',
enable_heatmaps: true,
})
}
if (typeof window !== 'undefined') {
const TELEMETRY_STATUS_KEY = 'simstudio-telemetry-status'

View File

@@ -14,19 +14,7 @@ import { isBillingEnabled } from '@/lib/environment'
import { SessionContext, type SessionHookResult } from '@/lib/session/session-context'
export function getBaseURL() {
let baseURL
if (env.VERCEL_ENV === 'preview') {
baseURL = `https://${getEnv('NEXT_PUBLIC_VERCEL_URL')}`
} else if (env.VERCEL_ENV === 'development') {
baseURL = `https://${getEnv('NEXT_PUBLIC_VERCEL_URL')}`
} else if (env.VERCEL_ENV === 'production') {
baseURL = env.BETTER_AUTH_URL || getEnv('NEXT_PUBLIC_APP_URL')
} else if (env.NODE_ENV === 'development') {
baseURL = getEnv('NEXT_PUBLIC_APP_URL') || env.BETTER_AUTH_URL || 'http://localhost:3000'
}
return baseURL
return getEnv('NEXT_PUBLIC_APP_URL') || 'http://localhost:3000'
}
export const client = createAuthClient({

View File

@@ -63,7 +63,6 @@ export const auth = betterAuth({
baseURL: getBaseURL(),
trustedOrigins: [
env.NEXT_PUBLIC_APP_URL,
...(env.NEXT_PUBLIC_VERCEL_URL ? [`https://${env.NEXT_PUBLIC_VERCEL_URL}`] : []),
...(env.NEXT_PUBLIC_SOCKET_URL ? [env.NEXT_PUBLIC_SOCKET_URL] : []),
].filter(Boolean),
database: drizzleAdapter(db, {

View File

@@ -268,7 +268,6 @@ async function processWorkflowFromDb(
logger.info('Processed sanitized workflow context', {
workflowId,
blocks: Object.keys(sanitizedState.blocks || {}).length,
edges: sanitizedState.edges.length,
})
// Use the provided kind for the type
return { type: kind, tag, content }

View File

@@ -262,6 +262,14 @@ const ExecutionEntry = z.object({
totalTokens: z.number().nullable(),
blockExecutions: z.array(z.any()), // can be detailed per need
output: z.any().optional(),
errorMessage: z.string().optional(),
errorBlock: z
.object({
blockId: z.string().optional(),
blockName: z.string().optional(),
blockType: z.string().optional(),
})
.optional(),
})
export const ToolResultSchemas = {

View File

@@ -98,7 +98,35 @@ export class EditWorkflowClientTool extends BaseClientTool {
// Prepare currentUserWorkflow JSON from stores to preserve block IDs
let currentUserWorkflow = args?.currentUserWorkflow
if (!currentUserWorkflow) {
const diffStoreState = useWorkflowDiffStore.getState()
let usedDiffWorkflow = false
if (!currentUserWorkflow && diffStoreState.isDiffReady && diffStoreState.diffWorkflow) {
try {
const diffWorkflow = diffStoreState.diffWorkflow
const normalizedDiffWorkflow = {
...diffWorkflow,
blocks: diffWorkflow.blocks || {},
edges: diffWorkflow.edges || [],
loops: diffWorkflow.loops || {},
parallels: diffWorkflow.parallels || {},
}
currentUserWorkflow = JSON.stringify(normalizedDiffWorkflow)
usedDiffWorkflow = true
logger.info('Using diff workflow state as base for edit_workflow operations', {
toolCallId: this.toolCallId,
blocksCount: Object.keys(normalizedDiffWorkflow.blocks).length,
edgesCount: normalizedDiffWorkflow.edges.length,
})
} catch (e) {
logger.warn(
'Failed to serialize diff workflow state; falling back to active workflow',
e as any
)
}
}
if (!currentUserWorkflow && !usedDiffWorkflow) {
try {
const workflowStore = useWorkflowStore.getState()
const fullState = workflowStore.getWorkflowState()

View File

@@ -77,13 +77,13 @@ export interface CopilotBlockMetadata {
name: string
description: string
bestPractices?: string
commonParameters: CopilotSubblockMetadata[]
inputs?: Record<string, any>
inputSchema: CopilotSubblockMetadata[]
inputDefinitions?: Record<string, any>
triggerAllowed?: boolean
authType?: 'OAuth' | 'API Key' | 'Bot Token'
tools: CopilotToolMetadata[]
triggers: CopilotTriggerMetadata[]
operationParameters: Record<string, CopilotSubblockMetadata[]>
operationInputSchema: Record<string, CopilotSubblockMetadata[]>
operations?: Record<
string,
{
@@ -92,7 +92,7 @@ export interface CopilotBlockMetadata {
description?: string
inputs?: Record<string, any>
outputs?: Record<string, any>
parameters?: CopilotSubblockMetadata[]
inputSchema?: CopilotSubblockMetadata[]
}
>
yamlDocumentation?: string
@@ -125,11 +125,11 @@ export const getBlocksMetadataServerTool: BaseServerTool<
id: specialBlock.id,
name: specialBlock.name,
description: specialBlock.description || '',
commonParameters: commonParameters,
inputs: specialBlock.inputs || {},
inputSchema: commonParameters,
inputDefinitions: specialBlock.inputs || {},
tools: [],
triggers: [],
operationParameters,
operationInputSchema: operationParameters,
}
;(metadata as any).subBlocks = undefined
} else {
@@ -192,7 +192,7 @@ export const getBlocksMetadataServerTool: BaseServerTool<
description: toolCfg?.description || undefined,
inputs: { ...filteredToolParams, ...(operationInputs[opId] || {}) },
outputs: toolOutputs,
parameters: operationParameters[opId] || [],
inputSchema: operationParameters[opId] || [],
}
}
@@ -201,13 +201,13 @@ export const getBlocksMetadataServerTool: BaseServerTool<
name: blockConfig.name || blockId,
description: blockConfig.longDescription || blockConfig.description || '',
bestPractices: blockConfig.bestPractices,
commonParameters: commonParameters,
inputs: blockInputs,
inputSchema: commonParameters,
inputDefinitions: blockInputs,
triggerAllowed: !!blockConfig.triggerAllowed,
authType: resolveAuthType(blockConfig.authMode),
tools,
triggers,
operationParameters,
operationInputSchema: operationParameters,
operations,
}
}
@@ -420,7 +420,7 @@ function splitParametersByOperation(
operationParameters[key].push(processed)
}
} else {
// Override description from blockInputs if available (by id or canonicalParamId)
// Override description from inputDefinitions if available (by id or canonicalParamId)
if (blockInputsForDescriptions) {
const candidates = [sb.id, sb.canonicalParamId].filter(Boolean)
for (const key of candidates) {

View File

@@ -11,7 +11,7 @@ import { resolveOutputType } from '@/blocks/utils'
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
interface EditWorkflowOperation {
operation_type: 'add' | 'edit' | 'delete'
operation_type: 'add' | 'edit' | 'delete' | 'insert_into_subflow' | 'extract_from_subflow'
block_id: string
params?: Record<string, any>
}
@@ -22,6 +22,78 @@ interface EditWorkflowParams {
currentUserWorkflow?: string
}
/**
* Helper to create a block state from operation params
*/
function createBlockFromParams(blockId: string, params: any, parentId?: string): any {
const blockConfig = getAllBlocks().find((b) => b.type === params.type)
const blockState: any = {
id: blockId,
type: params.type,
name: params.name,
position: { x: 0, y: 0 },
enabled: params.enabled !== undefined ? params.enabled : true,
horizontalHandles: true,
isWide: false,
advancedMode: params.advancedMode || false,
height: 0,
triggerMode: params.triggerMode || false,
subBlocks: {},
outputs: params.outputs || (blockConfig ? resolveOutputType(blockConfig.outputs) : {}),
data: parentId ? { parentId, extent: 'parent' as const } : {},
}
// Add inputs as subBlocks
if (params.inputs) {
Object.entries(params.inputs).forEach(([key, value]) => {
blockState.subBlocks[key] = {
id: key,
type: 'short-input',
value: value,
}
})
}
// Set up subBlocks from block configuration
if (blockConfig) {
blockConfig.subBlocks.forEach((subBlock) => {
if (!blockState.subBlocks[subBlock.id]) {
blockState.subBlocks[subBlock.id] = {
id: subBlock.id,
type: subBlock.type,
value: null,
}
}
})
}
return blockState
}
/**
* Helper to add connections as edges for a block
*/
function addConnectionsAsEdges(
modifiedState: any,
blockId: string,
connections: Record<string, any>
): void {
Object.entries(connections).forEach(([sourceHandle, targets]) => {
const targetArray = Array.isArray(targets) ? targets : [targets]
targetArray.forEach((targetId: string) => {
modifiedState.edges.push({
id: crypto.randomUUID(),
source: blockId,
sourceHandle,
target: targetId,
targetHandle: 'target',
type: 'default',
})
})
})
}
/**
* Apply operations directly to the workflow JSON state
*/
@@ -43,11 +115,19 @@ function applyOperationsToWorkflowState(
})),
})
// Reorder operations: delete -> add -> edit to ensure consistent application semantics
// Reorder operations: delete -> extract -> add -> insert -> edit
const deletes = operations.filter((op) => op.operation_type === 'delete')
const extracts = operations.filter((op) => op.operation_type === 'extract_from_subflow')
const adds = operations.filter((op) => op.operation_type === 'add')
const inserts = operations.filter((op) => op.operation_type === 'insert_into_subflow')
const edits = operations.filter((op) => op.operation_type === 'edit')
const orderedOperations: EditWorkflowOperation[] = [...deletes, ...adds, ...edits]
const orderedOperations: EditWorkflowOperation[] = [
...deletes,
...extracts,
...adds,
...inserts,
...edits,
]
for (const operation of orderedOperations) {
const { operation_type, block_id, params } = operation
@@ -105,6 +185,23 @@ function applyOperationsToWorkflowState(
block.subBlocks[key].value = value
}
})
// Update loop/parallel configuration in block.data
if (block.type === 'loop') {
block.data = block.data || {}
if (params.inputs.loopType !== undefined) block.data.loopType = params.inputs.loopType
if (params.inputs.iterations !== undefined)
block.data.count = params.inputs.iterations
if (params.inputs.collection !== undefined)
block.data.collection = params.inputs.collection
} else if (block.type === 'parallel') {
block.data = block.data || {}
if (params.inputs.parallelType !== undefined)
block.data.parallelType = params.inputs.parallelType
if (params.inputs.count !== undefined) block.data.count = params.inputs.count
if (params.inputs.collection !== undefined)
block.data.collection = params.inputs.collection
}
}
// Update basic properties
@@ -123,6 +220,50 @@ function applyOperationsToWorkflowState(
}
}
// Handle advanced mode toggle
if (typeof params?.advancedMode === 'boolean') {
block.advancedMode = params.advancedMode
}
// Handle nested nodes update (for loops/parallels)
if (params?.nestedNodes) {
// Remove all existing child blocks
const existingChildren = Object.keys(modifiedState.blocks).filter(
(id) => modifiedState.blocks[id].data?.parentId === block_id
)
existingChildren.forEach((childId) => delete modifiedState.blocks[childId])
// Remove edges to/from removed children
modifiedState.edges = modifiedState.edges.filter(
(edge: any) =>
!existingChildren.includes(edge.source) && !existingChildren.includes(edge.target)
)
// Add new nested blocks
Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => {
const childBlockState = createBlockFromParams(childId, childBlock, block_id)
modifiedState.blocks[childId] = childBlockState
// Add connections for child block
if (childBlock.connections) {
addConnectionsAsEdges(modifiedState, childId, childBlock.connections)
}
})
// Update loop/parallel configuration based on type
if (block.type === 'loop') {
block.data = block.data || {}
if (params.inputs?.loopType) block.data.loopType = params.inputs.loopType
if (params.inputs?.iterations) block.data.count = params.inputs.iterations
if (params.inputs?.collection) block.data.collection = params.inputs.collection
} else if (block.type === 'parallel') {
block.data = block.data || {}
if (params.inputs?.parallelType) block.data.parallelType = params.inputs.parallelType
if (params.inputs?.count) block.data.count = params.inputs.count
if (params.inputs?.collection) block.data.collection = params.inputs.collection
}
}
// Handle connections update (convert to edges)
if (params?.connections) {
// Remove existing edges from this block
@@ -191,82 +332,135 @@ function applyOperationsToWorkflowState(
case 'add': {
if (params?.type && params?.name) {
// Get block configuration
const blockConfig = getAllBlocks().find((block) => block.type === params.type)
// Create new block with proper structure
const newBlock: any = {
id: block_id,
type: params.type,
name: params.name,
position: { x: 0, y: 0 }, // Default position
enabled: true,
horizontalHandles: true,
isWide: false,
advancedMode: false,
height: 0,
triggerMode: false,
subBlocks: {},
outputs: blockConfig ? resolveOutputType(blockConfig.outputs) : {},
data: {},
}
const newBlock = createBlockFromParams(block_id, params)
// Add inputs as subBlocks
if (params.inputs) {
Object.entries(params.inputs).forEach(([key, value]) => {
newBlock.subBlocks[key] = {
id: key,
type: 'short-input',
value: value,
// Handle nested nodes (for loops/parallels created from scratch)
if (params.nestedNodes) {
Object.entries(params.nestedNodes).forEach(([childId, childBlock]: [string, any]) => {
const childBlockState = createBlockFromParams(childId, childBlock, block_id)
modifiedState.blocks[childId] = childBlockState
if (childBlock.connections) {
addConnectionsAsEdges(modifiedState, childId, childBlock.connections)
}
})
}
// Set up subBlocks from block configuration
if (blockConfig) {
blockConfig.subBlocks.forEach((subBlock) => {
if (!newBlock.subBlocks[subBlock.id]) {
newBlock.subBlocks[subBlock.id] = {
id: subBlock.id,
type: subBlock.type,
value: null,
}
// Set loop/parallel data on parent block
if (params.type === 'loop') {
newBlock.data = {
...newBlock.data,
loopType: params.inputs?.loopType || 'for',
...(params.inputs?.collection && { collection: params.inputs.collection }),
...(params.inputs?.iterations && { count: params.inputs.iterations }),
}
})
} else if (params.type === 'parallel') {
newBlock.data = {
...newBlock.data,
parallelType: params.inputs?.parallelType || 'count',
...(params.inputs?.collection && { collection: params.inputs.collection }),
...(params.inputs?.count && { count: params.inputs.count }),
}
}
}
modifiedState.blocks[block_id] = newBlock
// Add connections as edges
if (params.connections) {
Object.entries(params.connections).forEach(([sourceHandle, targets]) => {
const addEdge = (targetBlock: string, targetHandle?: string) => {
modifiedState.edges.push({
id: crypto.randomUUID(),
source: block_id,
sourceHandle: sourceHandle,
target: targetBlock,
targetHandle: targetHandle || 'target',
type: 'default',
})
}
addConnectionsAsEdges(modifiedState, block_id, params.connections)
}
}
break
}
if (typeof targets === 'string') {
addEdge(targets)
} else if (Array.isArray(targets)) {
targets.forEach((target: any) => {
if (typeof target === 'string') {
addEdge(target)
} else if (target?.block) {
addEdge(target.block, target.handle)
}
})
} else if (typeof targets === 'object' && (targets as any)?.block) {
addEdge((targets as any).block, (targets as any).handle)
case 'insert_into_subflow': {
const subflowId = params?.subflowId
if (!subflowId || !params?.type || !params?.name) {
logger.warn('Missing required params for insert_into_subflow', { block_id, params })
break
}
const subflowBlock = modifiedState.blocks[subflowId]
if (!subflowBlock || (subflowBlock.type !== 'loop' && subflowBlock.type !== 'parallel')) {
logger.warn('Subflow block not found or invalid type', {
subflowId,
type: subflowBlock?.type,
})
break
}
// Get block configuration
const blockConfig = getAllBlocks().find((block) => block.type === params.type)
// Check if block already exists (moving into subflow) or is new
const existingBlock = modifiedState.blocks[block_id]
if (existingBlock) {
// Moving existing block into subflow - just update parent
existingBlock.data = {
...existingBlock.data,
parentId: subflowId,
extent: 'parent' as const,
}
// Update inputs if provided
if (params.inputs) {
Object.entries(params.inputs).forEach(([key, value]) => {
if (!existingBlock.subBlocks[key]) {
existingBlock.subBlocks[key] = { id: key, type: 'short-input', value }
} else {
existingBlock.subBlocks[key].value = value
}
})
}
} else {
// Create new block as child of subflow
const newBlock = createBlockFromParams(block_id, params, subflowId)
modifiedState.blocks[block_id] = newBlock
}
// Add/update connections as edges
if (params.connections) {
// Remove existing edges from this block
modifiedState.edges = modifiedState.edges.filter((edge: any) => edge.source !== block_id)
// Add new connections
addConnectionsAsEdges(modifiedState, block_id, params.connections)
}
break
}
case 'extract_from_subflow': {
const subflowId = params?.subflowId
if (!subflowId) {
logger.warn('Missing subflowId for extract_from_subflow', { block_id })
break
}
const block = modifiedState.blocks[block_id]
if (!block) {
logger.warn('Block not found for extraction', { block_id })
break
}
// Verify it's actually a child of this subflow
if (block.data?.parentId !== subflowId) {
logger.warn('Block is not a child of specified subflow', {
block_id,
actualParent: block.data?.parentId,
specifiedParent: subflowId,
})
}
// Remove parent relationship
if (block.data) {
block.data.parentId = undefined
block.data.extent = undefined
}
// Note: We keep the block and its edges, just remove parent relationship
// The block becomes a root-level block
break
}
}

View File

@@ -43,6 +43,12 @@ interface ExecutionEntry {
totalTokens: number | null
blockExecutions: BlockExecution[]
output?: any
errorMessage?: string
errorBlock?: {
blockId?: string
blockName?: string
blockType?: string
}
}
function extractBlockExecutionsFromTraceSpans(traceSpans: any[]): BlockExecution[] {
@@ -74,6 +80,140 @@ function extractBlockExecutionsFromTraceSpans(traceSpans: any[]): BlockExecution
return blockExecutions
}
function normalizeErrorMessage(errorValue: unknown): string | undefined {
if (!errorValue) return undefined
if (typeof errorValue === 'string') return errorValue
if (errorValue instanceof Error) return errorValue.message
if (typeof errorValue === 'object') {
try {
return JSON.stringify(errorValue)
} catch {}
}
try {
return String(errorValue)
} catch {
return undefined
}
}
function extractErrorFromExecutionData(executionData: any): ExecutionEntry['errorBlock'] & {
message?: string
} {
if (!executionData) return {}
const errorDetails = executionData.errorDetails
if (errorDetails) {
const message = normalizeErrorMessage(errorDetails.error || errorDetails.message)
if (message) {
return {
message,
blockId: errorDetails.blockId,
blockName: errorDetails.blockName,
blockType: errorDetails.blockType,
}
}
}
const finalOutputError = normalizeErrorMessage(executionData.finalOutput?.error)
if (finalOutputError) {
return {
message: finalOutputError,
blockName: 'Workflow',
}
}
const genericError = normalizeErrorMessage(executionData.error)
if (genericError) {
return {
message: genericError,
blockName: 'Workflow',
}
}
return {}
}
function extractErrorFromTraceSpans(traceSpans: any[]): ExecutionEntry['errorBlock'] & {
message?: string
} {
if (!Array.isArray(traceSpans) || traceSpans.length === 0) return {}
const queue = [...traceSpans]
while (queue.length > 0) {
const span = queue.shift()
if (!span || typeof span !== 'object') continue
const message =
normalizeErrorMessage(span.output?.error) ||
normalizeErrorMessage(span.error) ||
normalizeErrorMessage(span.output?.message) ||
normalizeErrorMessage(span.message)
const status = span.status
if (status === 'error' || message) {
return {
message,
blockId: span.blockId,
blockName: span.blockName || span.name || (span.blockId ? undefined : 'Workflow'),
blockType: span.blockType || span.type,
}
}
if (Array.isArray(span.children)) {
queue.push(...span.children)
}
}
return {}
}
function deriveExecutionErrorSummary(params: {
blockExecutions: BlockExecution[]
traceSpans: any[]
executionData: any
}): { message?: string; block?: ExecutionEntry['errorBlock'] } {
const { blockExecutions, traceSpans, executionData } = params
const blockError = blockExecutions.find((block) => block.status === 'error' && block.errorMessage)
if (blockError) {
return {
message: blockError.errorMessage,
block: {
blockId: blockError.blockId,
blockName: blockError.blockName,
blockType: blockError.blockType,
},
}
}
const executionDataError = extractErrorFromExecutionData(executionData)
if (executionDataError.message) {
return {
message: executionDataError.message,
block: {
blockId: executionDataError.blockId,
blockName:
executionDataError.blockName || (executionDataError.blockId ? undefined : 'Workflow'),
blockType: executionDataError.blockType,
},
}
}
const traceError = extractErrorFromTraceSpans(traceSpans)
if (traceError.message) {
return {
message: traceError.message,
block: {
blockId: traceError.blockId,
blockName: traceError.blockName || (traceError.blockId ? undefined : 'Workflow'),
blockType: traceError.blockType,
},
}
}
return {}
}
export const getWorkflowConsoleServerTool: BaseServerTool<GetWorkflowConsoleArgs, any> = {
name: 'get_workflow_console',
async execute(rawArgs: GetWorkflowConsoleArgs): Promise<any> {
@@ -108,7 +248,8 @@ export const getWorkflowConsoleServerTool: BaseServerTool<GetWorkflowConsoleArgs
.limit(limit)
const formattedEntries: ExecutionEntry[] = executionLogs.map((log) => {
const traceSpans = (log.executionData as any)?.traceSpans || []
const executionData = log.executionData as any
const traceSpans = executionData?.traceSpans || []
const blockExecutions = includeDetails ? extractBlockExecutionsFromTraceSpans(traceSpans) : []
let finalOutput: any
@@ -125,6 +266,12 @@ export const getWorkflowConsoleServerTool: BaseServerTool<GetWorkflowConsoleArgs
if (outputBlock) finalOutput = outputBlock.outputData
}
const { message: errorMessage, block: errorBlock } = deriveExecutionErrorSummary({
blockExecutions,
traceSpans,
executionData,
})
return {
id: log.id,
executionId: log.executionId,
@@ -137,6 +284,8 @@ export const getWorkflowConsoleServerTool: BaseServerTool<GetWorkflowConsoleArgs
totalTokens: (log.cost as any)?.tokens?.total ?? null,
blockExecutions,
output: finalOutput,
errorMessage: errorMessage,
errorBlock: errorBlock,
}
})

View File

@@ -17,6 +17,8 @@ export const env = createEnv({
server: {
// Core Database & Authentication
DATABASE_URL: z.string().url(), // Primary database connection string
DATABASE_SSL: z.enum(['disable', 'prefer', 'require', 'verify-ca', 'verify-full']).optional(), // PostgreSQL SSL mode
DATABASE_SSL_CA: z.string().optional(), // Base64-encoded CA certificate for SSL verification
BETTER_AUTH_URL: z.string().url(), // Base URL for Better Auth service
BETTER_AUTH_SECRET: z.string().min(32), // Secret key for Better Auth JWT signing
DISABLE_REGISTRATION: z.boolean().optional(), // Flag to disable new user registration
@@ -36,7 +38,6 @@ export const env = createEnv({
// Database & Storage
POSTGRES_URL: z.string().url().optional(), // Alternative PostgreSQL connection string
REDIS_URL: z.string().url().optional(), // Redis connection string for caching/sessions
// Payment & Billing
@@ -91,6 +92,7 @@ export const env = createEnv({
TELEMETRY_ENDPOINT: z.string().url().optional(), // Custom telemetry/analytics endpoint
COST_MULTIPLIER: z.number().optional(), // Multiplier for cost calculations
LOG_LEVEL: z.enum(['DEBUG', 'INFO', 'WARN', 'ERROR']).optional(), // Minimum log level to display (defaults to ERROR in production, DEBUG in development)
POSTHOG_ENABLED: z.boolean().optional(), // Enable PostHog analytics and session recording
// External Services
BROWSERBASE_API_KEY: z.string().min(1).optional(), // Browserbase API key for browser automation
@@ -99,10 +101,10 @@ export const env = createEnv({
// Infrastructure & Deployment
NEXT_RUNTIME: z.string().optional(), // Next.js runtime environment
VERCEL_ENV: z.string().optional(), // Vercel deployment environment
DOCKER_BUILD: z.boolean().optional(), // Flag indicating Docker build environment
// Background Jobs & Scheduling
TRIGGER_PROJECT_ID: z.string().optional(), // Trigger.dev project ID
TRIGGER_SECRET_KEY: z.string().min(1).optional(), // Trigger.dev secret key for background jobs
TRIGGER_DEV_ENABLED: z.boolean().optional(), // Toggle to enable/disable Trigger.dev for async jobs
CRON_SECRET: z.string().optional(), // Secret for authenticating cron job requests
@@ -243,7 +245,6 @@ export const env = createEnv({
client: {
// Core Application URLs - Required for frontend functionality
NEXT_PUBLIC_APP_URL: z.string().url(), // Base URL of the application (e.g., https://app.sim.ai)
NEXT_PUBLIC_VERCEL_URL: z.string().optional(), // Vercel deployment URL for preview/production
// Client-side Services
NEXT_PUBLIC_SOCKET_URL: z.string().url().optional(), // WebSocket server URL for real-time features
@@ -260,6 +261,8 @@ export const env = createEnv({
// Analytics & Tracking
NEXT_PUBLIC_GOOGLE_API_KEY: z.string().optional(), // Google API key for client-side API calls
NEXT_PUBLIC_GOOGLE_PROJECT_NUMBER: z.string().optional(), // Google project number for Drive picker
NEXT_PUBLIC_POSTHOG_ENABLED: z.boolean().optional(), // Enable PostHog analytics (client-side)
NEXT_PUBLIC_POSTHOG_KEY: z.string().optional(), // PostHog project API key
// UI Branding & Whitelabeling
NEXT_PUBLIC_BRAND_NAME: z.string().optional(), // Custom brand name (defaults to "Sim")
@@ -295,7 +298,6 @@ export const env = createEnv({
experimental__runtimeEnv: {
NEXT_PUBLIC_APP_URL: process.env.NEXT_PUBLIC_APP_URL,
NEXT_PUBLIC_VERCEL_URL: process.env.NEXT_PUBLIC_VERCEL_URL,
NEXT_PUBLIC_BLOB_BASE_URL: process.env.NEXT_PUBLIC_BLOB_BASE_URL,
NEXT_PUBLIC_BILLING_ENABLED: process.env.NEXT_PUBLIC_BILLING_ENABLED,
NEXT_PUBLIC_GOOGLE_CLIENT_ID: process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID,
@@ -320,6 +322,8 @@ export const env = createEnv({
NEXT_PUBLIC_EMAIL_PASSWORD_SIGNUP_ENABLED: process.env.NEXT_PUBLIC_EMAIL_PASSWORD_SIGNUP_ENABLED,
NEXT_PUBLIC_E2B_ENABLED: process.env.NEXT_PUBLIC_E2B_ENABLED,
NEXT_PUBLIC_COPILOT_TRAINING_ENABLED: process.env.NEXT_PUBLIC_COPILOT_TRAINING_ENABLED,
NEXT_PUBLIC_POSTHOG_ENABLED: process.env.NEXT_PUBLIC_POSTHOG_ENABLED,
NEXT_PUBLIC_POSTHOG_KEY: process.env.NEXT_PUBLIC_POSTHOG_KEY,
NODE_ENV: process.env.NODE_ENV,
NEXT_TELEMETRY_DISABLED: process.env.NEXT_TELEMETRY_DISABLED,
},

View File

@@ -38,14 +38,6 @@ export const buildTimeCSPDirectives: CSPDirectives = {
"'unsafe-eval'",
'https://*.google.com',
'https://apis.google.com',
'https://*.vercel-scripts.com',
'https://*.vercel-insights.com',
'https://vercel.live',
'https://*.vercel.live',
'https://vercel.com',
'https://*.vercel.app',
'https://vitals.vercel-insights.com',
'https://b2bjsstore.s3.us-west-2.amazonaws.com',
],
'style-src': ["'self'", "'unsafe-inline'", 'https://fonts.googleapis.com'],
@@ -90,8 +82,6 @@ export const buildTimeCSPDirectives: CSPDirectives = {
env.NEXT_PUBLIC_SOCKET_URL || 'http://localhost:3002',
env.NEXT_PUBLIC_SOCKET_URL?.replace('http://', 'ws://').replace('https://', 'wss://') ||
'ws://localhost:3002',
'https://*.up.railway.app',
'wss://*.up.railway.app',
'https://api.browser-use.com',
'https://api.exa.ai',
'https://api.firecrawl.dev',
@@ -99,16 +89,8 @@ export const buildTimeCSPDirectives: CSPDirectives = {
'https://*.amazonaws.com',
'https://*.s3.amazonaws.com',
'https://*.blob.core.windows.net',
'https://*.vercel-insights.com',
'https://vitals.vercel-insights.com',
'https://*.atlassian.com',
'https://*.supabase.co',
'https://vercel.live',
'https://*.vercel.live',
'https://vercel.com',
'https://*.vercel.app',
'wss://*.vercel.app',
'https://pro.ip-api.com',
'https://api.github.com',
'https://github.com/*',
...getHostnameFromUrl(env.NEXT_PUBLIC_BRAND_LOGO_URL),
@@ -168,12 +150,12 @@ export function generateRuntimeCSP(): string {
return `
default-src 'self';
script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.google.com https://apis.google.com https://*.vercel-scripts.com https://*.vercel-insights.com https://vercel.live https://*.vercel.live https://vercel.com https://*.vercel.app https://vitals.vercel-insights.com https://b2bjsstore.s3.us-west-2.amazonaws.com;
script-src 'self' 'unsafe-inline' 'unsafe-eval' https://*.google.com https://apis.google.com;
style-src 'self' 'unsafe-inline' https://fonts.googleapis.com;
img-src 'self' data: blob: https://*.googleusercontent.com https://*.google.com https://*.atlassian.com https://cdn.discordapp.com https://*.githubusercontent.com https://*.public.blob.vercel-storage.com ${brandLogoDomain} ${brandFaviconDomain};
media-src 'self' blob:;
font-src 'self' https://fonts.gstatic.com;
connect-src 'self' ${appUrl} ${ollamaUrl} ${socketUrl} ${socketWsUrl} https://*.up.railway.app wss://*.up.railway.app https://api.browser-use.com https://api.exa.ai https://api.firecrawl.dev https://*.googleapis.com https://*.amazonaws.com https://*.s3.amazonaws.com https://*.blob.core.windows.net https://api.github.com https://github.com/* https://*.vercel-insights.com https://vitals.vercel-insights.com https://*.atlassian.com https://*.supabase.co https://vercel.live https://*.vercel.live https://vercel.com https://*.vercel.app wss://*.vercel.app https://pro.ip-api.com ${dynamicDomainsStr};
connect-src 'self' ${appUrl} ${ollamaUrl} ${socketUrl} ${socketWsUrl} https://api.browser-use.com https://api.exa.ai https://api.firecrawl.dev https://*.googleapis.com https://*.amazonaws.com https://*.s3.amazonaws.com https://*.blob.core.windows.net https://api.github.com https://github.com/* https://*.atlassian.com https://*.supabase.co ${dynamicDomainsStr};
frame-src https://drive.google.com https://docs.google.com https://*.google.com;
frame-ancestors 'self';
form-action 'self';

View File

@@ -2,6 +2,7 @@
import type React from 'react'
import { createContext, useCallback, useEffect, useMemo, useState } from 'react'
import posthog from 'posthog-js'
import { client } from '@/lib/auth-client'
export type AppSession = {
@@ -52,6 +53,25 @@ export function SessionProvider({ children }: { children: React.ReactNode }) {
loadSession()
}, [loadSession])
useEffect(() => {
if (isPending || typeof posthog.identify !== 'function') {
return
}
try {
if (data?.user) {
posthog.identify(data.user.id, {
email: data.user.email,
name: data.user.name,
email_verified: data.user.emailVerified,
created_at: data.user.createdAt,
})
} else {
posthog.reset()
}
} catch {}
}, [data, isPending])
const value = useMemo<SessionHookResult>(
() => ({ data, isPending, error, refetch: loadSession }),
[data, isPending, error, loadSession]

View File

@@ -6,7 +6,7 @@ import { isProd } from '@/lib/environment'
* @returns The base URL string (e.g., 'http://localhost:3000' or 'https://example.com')
*/
export function getBaseUrl(): string {
if (typeof window !== 'undefined') {
if (typeof window !== 'undefined' && window.location?.origin) {
return window.location.origin
}

View File

@@ -3,7 +3,12 @@ import type { BlockState } from '@/stores/workflows/workflow/types'
import { assignLayers, groupByLayer } from './layering'
import { calculatePositions } from './positioning'
import type { Edge, LayoutOptions } from './types'
import { DEFAULT_CONTAINER_HEIGHT, DEFAULT_CONTAINER_WIDTH, getBlocksByParent } from './utils'
import {
DEFAULT_CONTAINER_HEIGHT,
DEFAULT_CONTAINER_WIDTH,
getBlocksByParent,
prepareBlockMetrics,
} from './utils'
const logger = createLogger('AutoLayout:Containers')
@@ -45,6 +50,7 @@ export function layoutContainers(
}
const childNodes = assignLayers(childBlocks, childEdges)
prepareBlockMetrics(childNodes)
const childLayers = groupByLayer(childNodes)
calculatePositions(childLayers, containerOptions)
@@ -57,8 +63,8 @@ export function layoutContainers(
for (const node of childNodes.values()) {
minX = Math.min(minX, node.position.x)
minY = Math.min(minY, node.position.y)
maxX = Math.max(maxX, node.position.x + node.dimensions.width)
maxY = Math.max(maxY, node.position.y + node.dimensions.height)
maxX = Math.max(maxX, node.position.x + node.metrics.width)
maxY = Math.max(maxY, node.position.y + node.metrics.height)
}
// Adjust all child positions to start at proper padding from container edges

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockState } from '@/stores/workflows/workflow/types'
import type { AdjustmentOptions, Edge } from './types'
import { boxesOverlap, createBoundingBox, getBlockDimensions } from './utils'
import { boxesOverlap, createBoundingBox, getBlockMetrics } from './utils'
const logger = createLogger('AutoLayout:Incremental')
@@ -70,8 +70,8 @@ export function adjustForNewBlock(
})
}
const newBlockDims = getBlockDimensions(newBlock)
const newBlockBox = createBoundingBox(newBlock.position, newBlockDims)
const newBlockMetrics = getBlockMetrics(newBlock)
const newBlockBox = createBoundingBox(newBlock.position, newBlockMetrics)
const blocksToShift: Array<{ block: BlockState; shiftAmount: number }> = []
@@ -80,11 +80,11 @@ export function adjustForNewBlock(
if (block.data?.parentId) continue
if (block.position.x >= newBlock.position.x) {
const blockDims = getBlockDimensions(block)
const blockBox = createBoundingBox(block.position, blockDims)
const blockMetrics = getBlockMetrics(block)
const blockBox = createBoundingBox(block.position, blockMetrics)
if (boxesOverlap(newBlockBox, blockBox, 50)) {
const requiredShift = newBlock.position.x + newBlockDims.width + 50 - block.position.x
const requiredShift = newBlock.position.x + newBlockMetrics.width + 50 - block.position.x
if (requiredShift > 0) {
blocksToShift.push({ block, shiftAmount: requiredShift })
}
@@ -115,8 +115,8 @@ export function compactHorizontally(blocks: Record<string, BlockState>, edges: E
const prevBlock = blockArray[i - 1]
const currentBlock = blockArray[i]
const prevDims = getBlockDimensions(prevBlock)
const expectedX = prevBlock.position.x + prevDims.width + MIN_SPACING
const prevMetrics = getBlockMetrics(prevBlock)
const expectedX = prevBlock.position.x + prevMetrics.width + MIN_SPACING
if (currentBlock.position.x > expectedX + 150) {
const shift = currentBlock.position.x - expectedX

View File

@@ -5,7 +5,7 @@ import { adjustForNewBlock as adjustForNewBlockInternal, compactHorizontally } f
import { assignLayers, groupByLayer } from './layering'
import { calculatePositions } from './positioning'
import type { AdjustmentOptions, Edge, LayoutOptions, LayoutResult, Loop, Parallel } from './types'
import { getBlocksByParent } from './utils'
import { getBlocksByParent, prepareBlockMetrics } from './utils'
const logger = createLogger('AutoLayout')
@@ -39,6 +39,7 @@ export function applyAutoLayout(
if (Object.keys(rootBlocks).length > 0) {
const nodes = assignLayers(rootBlocks, rootEdges)
prepareBlockMetrics(nodes)
const layers = groupByLayer(nodes)
calculatePositions(layers, options)
@@ -99,4 +100,4 @@ export function adjustForNewBlock(
}
export type { LayoutOptions, LayoutResult, AdjustmentOptions, Edge, Loop, Parallel }
export { getBlockDimensions, isContainerType } from './utils'
export { getBlockMetrics, isContainerType } from './utils'

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockState } from '@/stores/workflows/workflow/types'
import type { Edge, GraphNode } from './types'
import { getBlockDimensions, isStarterBlock } from './utils'
import { getBlockMetrics } from './utils'
const logger = createLogger('AutoLayout:Layering')
@@ -15,7 +15,7 @@ export function assignLayers(
nodes.set(id, {
id,
block,
dimensions: getBlockDimensions(block),
metrics: getBlockMetrics(block),
incoming: new Set(),
outgoing: new Set(),
layer: 0,
@@ -33,9 +33,9 @@ export function assignLayers(
}
}
const starterNodes = Array.from(nodes.values()).filter(
(node) => node.incoming.size === 0 || isStarterBlock(node.block)
)
// Only treat blocks as starters if they have no incoming edges
// This prevents triggers that are mid-flow from being forced to layer 0
const starterNodes = Array.from(nodes.values()).filter((node) => node.incoming.size === 0)
if (starterNodes.length === 0 && nodes.size > 0) {
const firstNode = Array.from(nodes.values())[0]
@@ -43,35 +43,50 @@ export function assignLayers(
logger.warn('No starter blocks found, using first block as starter', { blockId: firstNode.id })
}
const visited = new Set<string>()
const queue: Array<{ nodeId: string; layer: number }> = []
// Use topological sort to ensure proper layering based on dependencies
// Each node's layer = max(all incoming nodes' layers) + 1
const inDegreeCount = new Map<string, number>()
for (const starter of starterNodes) {
starter.layer = 0
queue.push({ nodeId: starter.id, layer: 0 })
for (const node of nodes.values()) {
inDegreeCount.set(node.id, node.incoming.size)
if (starterNodes.includes(node)) {
node.layer = 0
}
}
while (queue.length > 0) {
const { nodeId, layer } = queue.shift()!
const queue: string[] = starterNodes.map((n) => n.id)
const processed = new Set<string>()
if (visited.has(nodeId)) {
continue
while (queue.length > 0) {
const nodeId = queue.shift()!
const node = nodes.get(nodeId)!
processed.add(nodeId)
// Calculate this node's layer based on all incoming edges
if (node.incoming.size > 0) {
let maxIncomingLayer = -1
for (const incomingId of node.incoming) {
const incomingNode = nodes.get(incomingId)
if (incomingNode) {
maxIncomingLayer = Math.max(maxIncomingLayer, incomingNode.layer)
}
}
node.layer = maxIncomingLayer + 1
}
visited.add(nodeId)
const node = nodes.get(nodeId)!
node.layer = Math.max(node.layer, layer)
// Add outgoing nodes to queue when all their dependencies are processed
for (const targetId of node.outgoing) {
const targetNode = nodes.get(targetId)
if (targetNode) {
queue.push({ nodeId: targetId, layer: layer + 1 })
const currentCount = inDegreeCount.get(targetId) || 0
inDegreeCount.set(targetId, currentCount - 1)
if (inDegreeCount.get(targetId) === 0 && !processed.has(targetId)) {
queue.push(targetId)
}
}
}
for (const node of nodes.values()) {
if (!visited.has(node.id)) {
if (!processed.has(node.id)) {
logger.debug('Isolated node detected, assigning to layer 0', { blockId: node.id })
node.layer = 0
}

View File

@@ -26,7 +26,7 @@ export function calculatePositions(
// Calculate total height needed for this layer
const totalHeight = nodesInLayer.reduce(
(sum, node, idx) => sum + node.dimensions.height + (idx > 0 ? verticalSpacing : 0),
(sum, node, idx) => sum + node.metrics.height + (idx > 0 ? verticalSpacing : 0),
0
)
@@ -55,7 +55,7 @@ export function calculatePositions(
y: yOffset,
}
yOffset += node.dimensions.height + verticalSpacing
yOffset += node.metrics.height + verticalSpacing
}
}
@@ -83,8 +83,8 @@ function resolveOverlaps(nodes: GraphNode[], verticalSpacing: number): void {
const node1 = sortedNodes[i]
const node2 = sortedNodes[j]
const box1 = createBoundingBox(node1.position, node1.dimensions)
const box2 = createBoundingBox(node2.position, node2.dimensions)
const box1 = createBoundingBox(node1.position, node1.metrics)
const box2 = createBoundingBox(node2.position, node2.metrics)
// Check for overlap with margin
if (boxesOverlap(box1, box2, 30)) {
@@ -92,11 +92,11 @@ function resolveOverlaps(nodes: GraphNode[], verticalSpacing: number): void {
// If in same layer, shift vertically
if (node1.layer === node2.layer) {
const totalHeight = node1.dimensions.height + node2.dimensions.height + verticalSpacing
const totalHeight = node1.metrics.height + node2.metrics.height + verticalSpacing
const midpoint = (node1.position.y + node2.position.y) / 2
node1.position.y = midpoint - node1.dimensions.height / 2 - verticalSpacing / 2
node2.position.y = midpoint + node2.dimensions.height / 2 + verticalSpacing / 2
node1.position.y = midpoint - node1.metrics.height / 2 - verticalSpacing / 2
node2.position.y = midpoint + node2.metrics.height / 2 + verticalSpacing / 2
} else {
// Different layers - shift the later one down
const requiredSpace = box1.y + box1.height + verticalSpacing

View File

@@ -35,9 +35,15 @@ export interface Parallel {
parallelType?: 'count' | 'collection'
}
export interface BlockDimensions {
export interface BlockMetrics {
width: number
height: number
minWidth: number
minHeight: number
paddingTop: number
paddingBottom: number
paddingLeft: number
paddingRight: number
}
export interface BoundingBox {
@@ -55,7 +61,7 @@ export interface LayerInfo {
export interface GraphNode {
id: string
block: BlockState
dimensions: BlockDimensions
metrics: BlockMetrics
incoming: Set<string>
outgoing: Set<string>
layer: number

View File

@@ -1,33 +1,85 @@
import { TriggerUtils } from '@/lib/workflows/triggers'
import type { BlockState } from '@/stores/workflows/workflow/types'
import type { BlockDimensions, BoundingBox } from './types'
import type { BlockMetrics, BoundingBox, GraphNode } from './types'
export const DEFAULT_BLOCK_WIDTH = 350
export const DEFAULT_BLOCK_WIDTH_WIDE = 480
export const DEFAULT_BLOCK_HEIGHT = 100
export const DEFAULT_CONTAINER_WIDTH = 500
export const DEFAULT_CONTAINER_HEIGHT = 300
const DEFAULT_PADDING = 40
function resolveNumeric(value: number | undefined, fallback: number): number {
return typeof value === 'number' && Number.isFinite(value) ? value : fallback
}
export function isContainerType(blockType: string): boolean {
return blockType === 'loop' || blockType === 'parallel'
}
export function getBlockDimensions(block: BlockState): BlockDimensions {
if (isContainerType(block.type)) {
return {
width: block.data?.width ? Math.max(block.data.width, 400) : DEFAULT_CONTAINER_WIDTH,
height: block.data?.height ? Math.max(block.data.height, 200) : DEFAULT_CONTAINER_HEIGHT,
}
}
function getContainerMetrics(block: BlockState): BlockMetrics {
const measuredWidth = block.layout?.measuredWidth
const measuredHeight = block.layout?.measuredHeight
const containerWidth = Math.max(
measuredWidth ?? 0,
resolveNumeric(block.data?.width, DEFAULT_CONTAINER_WIDTH)
)
const containerHeight = Math.max(
measuredHeight ?? 0,
resolveNumeric(block.data?.height, DEFAULT_CONTAINER_HEIGHT)
)
return {
width: block.isWide ? DEFAULT_BLOCK_WIDTH_WIDE : DEFAULT_BLOCK_WIDTH,
height: Math.max(block.height || DEFAULT_BLOCK_HEIGHT, DEFAULT_BLOCK_HEIGHT),
width: containerWidth,
height: containerHeight,
minWidth: DEFAULT_CONTAINER_WIDTH,
minHeight: DEFAULT_CONTAINER_HEIGHT,
paddingTop: DEFAULT_PADDING,
paddingBottom: DEFAULT_PADDING,
paddingLeft: DEFAULT_PADDING,
paddingRight: DEFAULT_PADDING,
}
}
function getRegularBlockMetrics(block: BlockState): BlockMetrics {
const minWidth = block.isWide ? DEFAULT_BLOCK_WIDTH_WIDE : DEFAULT_BLOCK_WIDTH
const minHeight = DEFAULT_BLOCK_HEIGHT
const measuredH = block.layout?.measuredHeight ?? block.height
const measuredW = block.layout?.measuredWidth
const width = Math.max(measuredW ?? minWidth, minWidth)
const height = Math.max(measuredH ?? minHeight, minHeight)
return {
width,
height,
minWidth,
minHeight,
paddingTop: DEFAULT_PADDING,
paddingBottom: DEFAULT_PADDING,
paddingLeft: DEFAULT_PADDING,
paddingRight: DEFAULT_PADDING,
}
}
export function getBlockMetrics(block: BlockState): BlockMetrics {
if (isContainerType(block.type)) {
return getContainerMetrics(block)
}
return getRegularBlockMetrics(block)
}
export function prepareBlockMetrics(nodes: Map<string, GraphNode>): void {
for (const node of nodes.values()) {
node.metrics = getBlockMetrics(node.block)
}
}
export function createBoundingBox(
position: { x: number; y: number },
dimensions: BlockDimensions
dimensions: Pick<BlockMetrics, 'width' | 'height'>
): BoundingBox {
return {
x: position.x,
@@ -70,5 +122,9 @@ export function getBlocksByParent(blocks: Record<string, BlockState>): {
}
export function isStarterBlock(block: BlockState): boolean {
return block.type === 'starter' || block.type === 'webhook' || block.type === 'schedule'
if (TriggerUtils.isTriggerBlock({ type: block.type, triggerMode: block.triggerMode })) {
return true
}
return false
}

View File

@@ -214,7 +214,6 @@ const mockWorkflowState: WorkflowState = {
lastSaved: Date.now(),
isDeployed: false,
deploymentStatuses: {},
hasActiveWebhook: false,
}
describe('Database Helpers', () => {
@@ -452,11 +451,6 @@ describe('Database Helpers', () => {
)
expect(result.success).toBe(true)
expect(result.jsonBlob).toBeDefined()
expect(result.jsonBlob.blocks).toEqual(mockWorkflowState.blocks)
expect(result.jsonBlob.edges).toEqual(mockWorkflowState.edges)
expect(result.jsonBlob.loops).toEqual(mockWorkflowState.loops)
expect(result.jsonBlob.parallels).toEqual(mockWorkflowState.parallels)
// Verify transaction was called
expect(mockTransaction).toHaveBeenCalledTimes(1)
@@ -471,7 +465,6 @@ describe('Database Helpers', () => {
lastSaved: Date.now(),
isDeployed: false,
deploymentStatuses: {},
hasActiveWebhook: false,
}
const mockTransaction = vi.fn().mockImplementation(async (callback) => {
@@ -494,10 +487,6 @@ describe('Database Helpers', () => {
)
expect(result.success).toBe(true)
expect(result.jsonBlob.blocks).toEqual({})
expect(result.jsonBlob.edges).toEqual([])
expect(result.jsonBlob.loops).toEqual({})
expect(result.jsonBlob.parallels).toEqual({})
})
it('should return error when transaction fails', async () => {
@@ -650,7 +639,6 @@ describe('Database Helpers', () => {
lastSaved: Date.now(),
isDeployed: false,
deploymentStatuses: {},
hasActiveWebhook: false,
}
it('should successfully migrate workflow from JSON to normalized tables', async () => {
@@ -737,7 +725,6 @@ describe('Database Helpers', () => {
lastSaved: Date.now(),
isDeployed: false,
deploymentStatuses: {},
hasActiveWebhook: false,
}
// Create 1000 blocks
@@ -782,8 +769,6 @@ describe('Database Helpers', () => {
)
expect(result.success).toBe(true)
expect(Object.keys(result.jsonBlob.blocks)).toHaveLength(1000)
expect(result.jsonBlob.edges).toHaveLength(999)
})
})
@@ -1020,7 +1005,6 @@ describe('Database Helpers', () => {
loops: {},
parallels: {},
deploymentStatuses: {},
hasActiveWebhook: false,
}
// Mock the transaction for save operation
@@ -1058,10 +1042,6 @@ describe('Database Helpers', () => {
)
expect(saveResult.success).toBe(true)
// Step 6: Verify the JSON blob also preserves advancedMode
expect(saveResult.jsonBlob?.blocks['agent-original'].advancedMode).toBe(true)
expect(saveResult.jsonBlob?.blocks['agent-duplicate'].advancedMode).toBe(true)
// Verify the database insert was called with the correct values
expect(mockTransaction).toHaveBeenCalled()
})
@@ -1161,7 +1141,6 @@ describe('Database Helpers', () => {
loops: {},
parallels: {},
deploymentStatuses: {},
hasActiveWebhook: false,
}
// Mock successful save

View File

@@ -36,10 +36,34 @@ export interface NormalizedWorkflowData {
isFromNormalizedTables: boolean // Flag to indicate source (true = normalized tables, false = deployed state)
}
/**
* Load deployed workflow state for execution
* Returns deployed state if available, otherwise throws error
*/
export async function blockExistsInDeployment(
workflowId: string,
blockId: string
): Promise<boolean> {
try {
const [result] = await db
.select({ state: workflowDeploymentVersion.state })
.from(workflowDeploymentVersion)
.where(
and(
eq(workflowDeploymentVersion.workflowId, workflowId),
eq(workflowDeploymentVersion.isActive, true)
)
)
.limit(1)
if (!result?.state) {
return false
}
const state = result.state as WorkflowState
return !!state.blocks?.[blockId]
} catch (error) {
logger.error(`Error checking block ${blockId} in deployment for workflow ${workflowId}:`, error)
return false
}
}
export async function loadDeployedWorkflowState(
workflowId: string
): Promise<NormalizedWorkflowData> {
@@ -126,12 +150,7 @@ export async function loadWorkflowFromNormalizedTables(
})
// Sanitize any invalid custom tools in agent blocks to prevent client crashes
const { blocks: sanitizedBlocks, warnings } = sanitizeAgentToolsInBlocks(blocksMap)
if (warnings.length > 0) {
logger.warn(`Sanitized workflow ${workflowId} tools with ${warnings.length} warning(s)`, {
warnings,
})
}
const { blocks: sanitizedBlocks } = sanitizeAgentToolsInBlocks(blocksMap)
// Convert edges to the expected format
const edgesArray: Edge[] = edges.map((edge) => ({
@@ -197,12 +216,11 @@ export async function loadWorkflowFromNormalizedTables(
/**
* Save workflow state to normalized tables
* Also returns the JSON blob for backward compatibility
*/
export async function saveWorkflowToNormalizedTables(
workflowId: string,
state: WorkflowState
): Promise<{ success: boolean; jsonBlob?: any; error?: string }> {
): Promise<{ success: boolean; error?: string }> {
try {
// Start a transaction
await db.transaction(async (tx) => {
@@ -278,27 +296,9 @@ export async function saveWorkflowToNormalizedTables(
if (subflowInserts.length > 0) {
await tx.insert(workflowSubflows).values(subflowInserts)
}
return { success: true }
})
// Create JSON blob for backward compatibility
const jsonBlob = {
blocks: state.blocks,
edges: state.edges,
loops: state.loops || {},
parallels: state.parallels || {},
lastSaved: Date.now(),
isDeployed: state.isDeployed,
deployedAt: state.deployedAt,
deploymentStatuses: state.deploymentStatuses,
hasActiveWebhook: state.hasActiveWebhook,
}
return {
success: true,
jsonBlob,
}
return { success: true }
} catch (error) {
logger.error(`Error saving workflow ${workflowId} to normalized tables:`, error)
return {
@@ -335,6 +335,7 @@ export async function migrateWorkflowToNormalizedTables(
): Promise<{ success: boolean; error?: string }> {
try {
// Convert JSON state to WorkflowState format
// Only include fields that are actually persisted to normalized tables
const workflowState: WorkflowState = {
blocks: jsonState.blocks || {},
edges: jsonState.edges || [],
@@ -343,16 +344,9 @@ export async function migrateWorkflowToNormalizedTables(
lastSaved: jsonState.lastSaved,
isDeployed: jsonState.isDeployed,
deployedAt: jsonState.deployedAt,
deploymentStatuses: jsonState.deploymentStatuses || {},
hasActiveWebhook: jsonState.hasActiveWebhook,
}
const result = await saveWorkflowToNormalizedTables(workflowId, workflowState)
if (result.success) {
return { success: true }
}
return { success: false, error: result.error }
return await saveWorkflowToNormalizedTables(workflowId, workflowState)
} catch (error) {
logger.error(`Error migrating workflow ${workflowId} to normalized tables:`, error)
return {

View File

@@ -68,7 +68,6 @@ export function useWorkflowDiff(): UseWorkflowDiffReturn {
isDeployed: currentState.isDeployed,
deployedAt: currentState.deployedAt,
deploymentStatuses: { ...currentState.deploymentStatuses },
hasActiveWebhook: currentState.hasActiveWebhook,
},
subblockValues: JSON.parse(JSON.stringify(currentSubblockValues)), // Deep copy
timestamp: Date.now(),
@@ -107,7 +106,6 @@ export function useWorkflowDiff(): UseWorkflowDiffReturn {
isDeployed: backup.workflowState.isDeployed,
deployedAt: backup.workflowState.deployedAt,
deploymentStatuses: backup.workflowState.deploymentStatuses,
hasActiveWebhook: backup.workflowState.hasActiveWebhook,
})
// Restore subblock values

View File

@@ -1,43 +1,30 @@
import type { Edge } from 'reactflow'
import type {
BlockState,
Loop,
Parallel,
Position,
WorkflowState,
} from '@/stores/workflows/workflow/types'
import type { BlockState, Loop, Parallel, WorkflowState } from '@/stores/workflows/workflow/types'
/**
* Sanitized workflow state for copilot (removes all UI-specific data)
* Connections are embedded in blocks for consistency with operations format
* Loops and parallels use nested structure - no separate loops/parallels objects
*/
export interface CopilotWorkflowState {
blocks: Record<string, CopilotBlockState>
edges: CopilotEdge[]
loops: Record<string, Loop>
parallels: Record<string, Parallel>
}
/**
* Block state for copilot (no positions, no UI dimensions)
* Block state for copilot (no positions, no UI dimensions, no redundant IDs)
* Connections are embedded here instead of separate edges array
* Loops and parallels have nested structure for clarity
*/
export interface CopilotBlockState {
id: string
type: string
name: string
subBlocks: BlockState['subBlocks']
inputs?: Record<string, string | number | string[][]>
outputs: BlockState['outputs']
connections?: Record<string, string | string[]>
nestedNodes?: Record<string, CopilotBlockState>
enabled: boolean
advancedMode?: boolean
triggerMode?: boolean
// Keep semantic data only (no width/height)
data?: {
parentId?: string
extent?: 'parent'
loopType?: 'for' | 'forEach'
parallelType?: 'collection' | 'count'
collection?: any
count?: number
}
}
/**
@@ -66,55 +53,208 @@ export interface ExportWorkflowState {
}
/**
* Sanitize workflow state for copilot by removing all UI-specific data
* Copilot doesn't need to see positions, dimensions, or visual styling
* Check if a subblock contains sensitive/secret data
*/
export function sanitizeForCopilot(state: WorkflowState): CopilotWorkflowState {
const sanitizedBlocks: Record<string, CopilotBlockState> = {}
function isSensitiveSubBlock(key: string, subBlock: BlockState['subBlocks'][string]): boolean {
// Check if it's an OAuth input type
if (subBlock.type === 'oauth-input') {
return true
}
// Sanitize blocks - remove position and UI-only fields
Object.entries(state.blocks).forEach(([blockId, block]) => {
const sanitizedData: CopilotBlockState['data'] = block.data
? {
// Keep semantic fields only
...(block.data.parentId !== undefined && { parentId: block.data.parentId }),
...(block.data.extent !== undefined && { extent: block.data.extent }),
...(block.data.loopType !== undefined && { loopType: block.data.loopType }),
...(block.data.parallelType !== undefined && { parallelType: block.data.parallelType }),
...(block.data.collection !== undefined && { collection: block.data.collection }),
...(block.data.count !== undefined && { count: block.data.count }),
}
: undefined
// Check if the field name suggests it contains sensitive data
const sensitivePattern = /credential|oauth|api[_-]?key|token|secret|auth|password|bearer/i
if (sensitivePattern.test(key)) {
return true
}
sanitizedBlocks[blockId] = {
id: block.id,
type: block.type,
name: block.name,
subBlocks: block.subBlocks,
outputs: block.outputs,
enabled: block.enabled,
...(block.advancedMode !== undefined && { advancedMode: block.advancedMode }),
...(block.triggerMode !== undefined && { triggerMode: block.triggerMode }),
...(sanitizedData && Object.keys(sanitizedData).length > 0 && { data: sanitizedData }),
// Check if the value itself looks like a secret (but not environment variable references)
if (typeof subBlock.value === 'string' && subBlock.value.length > 0) {
// Don't sanitize environment variable references like {{VAR_NAME}}
if (subBlock.value.startsWith('{{') && subBlock.value.endsWith('}}')) {
return false
}
// If it matches sensitive patterns in the value, it's likely a hardcoded secret
if (sensitivePattern.test(subBlock.value)) {
return true
}
}
return false
}
/**
* Sanitize subblocks by removing null values, secrets, and simplifying structure
* Maps each subblock key directly to its value instead of the full object
*/
function sanitizeSubBlocks(
subBlocks: BlockState['subBlocks']
): Record<string, string | number | string[][]> {
const sanitized: Record<string, string | number | string[][]> = {}
Object.entries(subBlocks).forEach(([key, subBlock]) => {
// Skip null/undefined values
if (subBlock.value === null || subBlock.value === undefined) {
return
}
// For sensitive fields, either omit or replace with placeholder
if (isSensitiveSubBlock(key, subBlock)) {
// If it's an environment variable reference, keep it
if (
typeof subBlock.value === 'string' &&
subBlock.value.startsWith('{{') &&
subBlock.value.endsWith('}}')
) {
sanitized[key] = subBlock.value
}
// Otherwise omit the sensitive value entirely
return
}
// For non-sensitive, non-null values, include them
sanitized[key] = subBlock.value
})
return sanitized
}
/**
* Reconstruct full subBlock structure from simplified copilot format
* Uses existing block structure as template for id and type fields
*/
function reconstructSubBlocks(
simplifiedSubBlocks: Record<string, string | number | string[][]>,
existingSubBlocks?: BlockState['subBlocks']
): BlockState['subBlocks'] {
const reconstructed: BlockState['subBlocks'] = {}
Object.entries(simplifiedSubBlocks).forEach(([key, value]) => {
const existingSubBlock = existingSubBlocks?.[key]
reconstructed[key] = {
id: existingSubBlock?.id || key,
type: existingSubBlock?.type || 'short-input',
value,
}
})
// Sanitize edges - keep only semantic connection data
const sanitizedEdges: CopilotEdge[] = state.edges.map((edge) => ({
id: edge.id,
source: edge.source,
target: edge.target,
...(edge.sourceHandle !== undefined &&
edge.sourceHandle !== null && { sourceHandle: edge.sourceHandle }),
...(edge.targetHandle !== undefined &&
edge.targetHandle !== null && { targetHandle: edge.targetHandle }),
}))
return reconstructed
}
/**
* Extract connections for a block from edges and format as operations-style connections
*/
function extractConnectionsForBlock(
blockId: string,
edges: WorkflowState['edges']
): Record<string, string | string[]> | undefined {
const connections: Record<string, string[]> = {}
// Find all outgoing edges from this block
const outgoingEdges = edges.filter((edge) => edge.source === blockId)
if (outgoingEdges.length === 0) {
return undefined
}
// Group by source handle
for (const edge of outgoingEdges) {
const handle = edge.sourceHandle || 'source'
if (!connections[handle]) {
connections[handle] = []
}
connections[handle].push(edge.target)
}
// Simplify single-element arrays to just the string
const simplified: Record<string, string | string[]> = {}
for (const [handle, targets] of Object.entries(connections)) {
simplified[handle] = targets.length === 1 ? targets[0] : targets
}
return simplified
}
/**
* Sanitize workflow state for copilot by removing all UI-specific data
* Creates nested structure for loops/parallels with their child blocks inside
*/
export function sanitizeForCopilot(state: WorkflowState): CopilotWorkflowState {
const sanitizedBlocks: Record<string, CopilotBlockState> = {}
const processedBlocks = new Set<string>()
// Helper to find child blocks of a parent (loop/parallel container)
const findChildBlocks = (parentId: string): string[] => {
return Object.keys(state.blocks).filter(
(blockId) => state.blocks[blockId].data?.parentId === parentId
)
}
// Helper to recursively sanitize a block and its children
const sanitizeBlock = (blockId: string, block: BlockState): CopilotBlockState => {
const connections = extractConnectionsForBlock(blockId, state.edges)
// For loop/parallel blocks, extract config from block.data instead of subBlocks
let inputs: Record<string, string | number | string[][]> = {}
if (block.type === 'loop' || block.type === 'parallel') {
// Extract configuration from block.data
if (block.data?.loopType) inputs.loopType = block.data.loopType
if (block.data?.count !== undefined) inputs.iterations = block.data.count
if (block.data?.collection !== undefined) inputs.collection = block.data.collection
if (block.data?.parallelType) inputs.parallelType = block.data.parallelType
} else {
// For regular blocks, sanitize subBlocks
inputs = sanitizeSubBlocks(block.subBlocks)
}
// Check if this is a loop or parallel (has children)
const childBlockIds = findChildBlocks(blockId)
const nestedNodes: Record<string, CopilotBlockState> = {}
if (childBlockIds.length > 0) {
// Recursively sanitize child blocks
childBlockIds.forEach((childId) => {
const childBlock = state.blocks[childId]
if (childBlock) {
nestedNodes[childId] = sanitizeBlock(childId, childBlock)
processedBlocks.add(childId)
}
})
}
const result: CopilotBlockState = {
type: block.type,
name: block.name,
outputs: block.outputs,
enabled: block.enabled,
}
if (Object.keys(inputs).length > 0) result.inputs = inputs
if (connections) result.connections = connections
if (Object.keys(nestedNodes).length > 0) result.nestedNodes = nestedNodes
if (block.advancedMode !== undefined) result.advancedMode = block.advancedMode
if (block.triggerMode !== undefined) result.triggerMode = block.triggerMode
return result
}
// Process only root-level blocks (those without a parent)
Object.entries(state.blocks).forEach(([blockId, block]) => {
// Skip if already processed as a child
if (processedBlocks.has(blockId)) return
// Skip if it has a parent (it will be processed as nested)
if (block.data?.parentId) return
sanitizedBlocks[blockId] = sanitizeBlock(blockId, block)
})
return {
blocks: sanitizedBlocks,
edges: sanitizedEdges,
loops: state.loops || {},
parallels: state.parallels || {},
}
}
@@ -167,204 +307,3 @@ export function sanitizeForExport(state: WorkflowState): ExportWorkflowState {
state: clonedState,
}
}
/**
* Validate that edges reference existing blocks
*/
export function validateEdges(
blocks: Record<string, any>,
edges: CopilotEdge[]
): {
valid: boolean
errors: string[]
} {
const errors: string[] = []
const blockIds = new Set(Object.keys(blocks))
edges.forEach((edge, index) => {
if (!blockIds.has(edge.source)) {
errors.push(`Edge ${index} references non-existent source block: ${edge.source}`)
}
if (!blockIds.has(edge.target)) {
errors.push(`Edge ${index} references non-existent target block: ${edge.target}`)
}
})
return {
valid: errors.length === 0,
errors,
}
}
/**
* Generate position for a new block based on its connections
* Uses compact horizontal spacing and intelligent positioning
*/
export function generatePositionForNewBlock(
blockId: string,
edges: CopilotEdge[],
existingBlocks: Record<string, BlockState>
): Position {
const HORIZONTAL_SPACING = 550
const VERTICAL_SPACING = 200
const incomingEdges = edges.filter((e) => e.target === blockId)
if (incomingEdges.length > 0) {
const sourceBlocks = incomingEdges
.map((e) => existingBlocks[e.source])
.filter((b) => b !== undefined)
if (sourceBlocks.length > 0) {
const rightmostX = Math.max(...sourceBlocks.map((b) => b.position.x))
const avgY = sourceBlocks.reduce((sum, b) => sum + b.position.y, 0) / sourceBlocks.length
return {
x: rightmostX + HORIZONTAL_SPACING,
y: avgY,
}
}
}
const outgoingEdges = edges.filter((e) => e.source === blockId)
if (outgoingEdges.length > 0) {
const targetBlocks = outgoingEdges
.map((e) => existingBlocks[e.target])
.filter((b) => b !== undefined)
if (targetBlocks.length > 0) {
const leftmostX = Math.min(...targetBlocks.map((b) => b.position.x))
const avgY = targetBlocks.reduce((sum, b) => sum + b.position.y, 0) / targetBlocks.length
return {
x: Math.max(150, leftmostX - HORIZONTAL_SPACING),
y: avgY,
}
}
}
const existingPositions = Object.values(existingBlocks).map((b) => b.position)
if (existingPositions.length > 0) {
const maxY = Math.max(...existingPositions.map((p) => p.y))
return {
x: 150,
y: maxY + VERTICAL_SPACING,
}
}
return { x: 150, y: 300 }
}
/**
* Merge sanitized copilot state with full UI state
* Preserves positions for existing blocks, generates positions for new blocks
*/
export function mergeWithUIState(
sanitized: CopilotWorkflowState,
fullState: WorkflowState
): WorkflowState {
const mergedBlocks: Record<string, BlockState> = {}
const existingBlocks = fullState.blocks
// Convert sanitized edges to full edges for position generation
const sanitizedEdges = sanitized.edges
// Process each block from sanitized state
Object.entries(sanitized.blocks).forEach(([blockId, sanitizedBlock]) => {
const existingBlock = existingBlocks[blockId]
if (existingBlock) {
// Existing block - preserve position and UI fields, update semantic fields
mergedBlocks[blockId] = {
...existingBlock,
// Update semantic fields from sanitized
type: sanitizedBlock.type,
name: sanitizedBlock.name,
subBlocks: sanitizedBlock.subBlocks,
outputs: sanitizedBlock.outputs,
enabled: sanitizedBlock.enabled,
advancedMode: sanitizedBlock.advancedMode,
triggerMode: sanitizedBlock.triggerMode,
// Merge data carefully
data: sanitizedBlock.data
? {
...existingBlock.data,
...sanitizedBlock.data,
}
: existingBlock.data,
}
} else {
// New block - generate position
const position = generatePositionForNewBlock(blockId, sanitizedEdges, existingBlocks)
mergedBlocks[blockId] = {
id: sanitizedBlock.id,
type: sanitizedBlock.type,
name: sanitizedBlock.name,
position,
subBlocks: sanitizedBlock.subBlocks,
outputs: sanitizedBlock.outputs,
enabled: sanitizedBlock.enabled,
horizontalHandles: true,
isWide: false,
height: 0,
advancedMode: sanitizedBlock.advancedMode,
triggerMode: sanitizedBlock.triggerMode,
data: sanitizedBlock.data
? {
...sanitizedBlock.data,
// Add UI dimensions if it's a container
...(sanitizedBlock.type === 'loop' || sanitizedBlock.type === 'parallel'
? {
width: 500,
height: 300,
type: 'subflowNode',
}
: {}),
}
: undefined,
}
}
})
// Convert sanitized edges to full edges
const mergedEdges: Edge[] = sanitized.edges.map((edge) => {
// Try to find existing edge to preserve styling
const existingEdge = fullState.edges.find(
(e) =>
e.source === edge.source &&
e.target === edge.target &&
e.sourceHandle === edge.sourceHandle &&
e.targetHandle === edge.targetHandle
)
if (existingEdge) {
return existingEdge
}
// New edge - create with defaults
return {
id: edge.id,
source: edge.source,
target: edge.target,
sourceHandle: edge.sourceHandle,
targetHandle: edge.targetHandle,
type: 'default',
data: {},
} as Edge
})
return {
blocks: mergedBlocks,
edges: mergedEdges,
loops: sanitized.loops,
parallels: sanitized.parallels,
lastSaved: Date.now(),
// Preserve deployment info
isDeployed: fullState.isDeployed,
deployedAt: fullState.deployedAt,
deploymentStatuses: fullState.deploymentStatuses,
hasActiveWebhook: fullState.hasActiveWebhook,
}
}

View File

@@ -0,0 +1,73 @@
import { normalizeBlockName } from '@/stores/workflows/utils'
export const SYSTEM_REFERENCE_PREFIXES = new Set(['start', 'loop', 'parallel', 'variable'])
const INVALID_REFERENCE_CHARS = /[+*/=<>!]/
export function isLikelyReferenceSegment(segment: string): boolean {
if (!segment.startsWith('<') || !segment.endsWith('>')) {
return false
}
const inner = segment.slice(1, -1)
if (inner.startsWith(' ')) {
return false
}
if (inner.match(/^\s*[<>=!]+\s*$/) || inner.match(/\s[<>=!]+\s/)) {
return false
}
if (inner.match(/^[<>=!]+\s/)) {
return false
}
if (inner.includes('.')) {
const dotIndex = inner.indexOf('.')
const beforeDot = inner.substring(0, dotIndex)
const afterDot = inner.substring(dotIndex + 1)
if (afterDot.includes(' ')) {
return false
}
if (INVALID_REFERENCE_CHARS.test(beforeDot) || INVALID_REFERENCE_CHARS.test(afterDot)) {
return false
}
} else if (INVALID_REFERENCE_CHARS.test(inner) || inner.match(/^\d/) || inner.match(/\s\d/)) {
return false
}
return true
}
export function extractReferencePrefixes(value: string): Array<{ raw: string; prefix: string }> {
if (!value || typeof value !== 'string') {
return []
}
const matches = value.match(/<[^>]+>/g)
if (!matches) {
return []
}
const references: Array<{ raw: string; prefix: string }> = []
for (const match of matches) {
if (!isLikelyReferenceSegment(match)) {
continue
}
const inner = match.slice(1, -1)
const [rawPrefix] = inner.split('.')
if (!rawPrefix) {
continue
}
const normalized = normalizeBlockName(rawPrefix)
references.push({ raw: match, prefix: normalized })
}
return references
}

View File

@@ -1,29 +1,19 @@
import type { CopilotWorkflowState } from '@/lib/workflows/json-sanitizer'
export interface EditOperation {
operation_type: 'add' | 'edit' | 'delete'
operation_type: 'add' | 'edit' | 'delete' | 'insert_into_subflow' | 'extract_from_subflow'
block_id: string
params?: {
type?: string
name?: string
outputs?: Record<string, any>
enabled?: boolean
triggerMode?: boolean
advancedMode?: boolean
inputs?: Record<string, any>
connections?: Record<string, any>
removeEdges?: Array<{ targetBlockId: string; sourceHandle?: string }>
loopConfig?: {
nodes?: string[]
iterations?: number
loopType?: 'for' | 'forEach'
forEachItems?: any
}
parallelConfig?: {
nodes?: string[]
distribution?: any
count?: number
parallelType?: 'count' | 'collection'
}
parentId?: string
extent?: 'parent'
nestedNodes?: Record<string, any>
subflowId?: string
}
}
@@ -38,6 +28,79 @@ export interface WorkflowDiff {
}
}
/**
* Flatten nested blocks into a single-level map for comparison
* Returns map of blockId -> {block, parentId}
*/
function flattenBlocks(
blocks: Record<string, any>
): Record<string, { block: any; parentId?: string }> {
const flattened: Record<string, { block: any; parentId?: string }> = {}
const processBlock = (blockId: string, block: any, parentId?: string) => {
flattened[blockId] = { block, parentId }
// Recursively process nested nodes
if (block.nestedNodes) {
Object.entries(block.nestedNodes).forEach(([nestedId, nestedBlock]) => {
processBlock(nestedId, nestedBlock, blockId)
})
}
}
Object.entries(blocks).forEach(([blockId, block]) => {
processBlock(blockId, block)
})
return flattened
}
/**
* Extract all edges from blocks with embedded connections (including nested)
*/
function extractAllEdgesFromBlocks(blocks: Record<string, any>): Array<{
source: string
target: string
sourceHandle?: string | null
targetHandle?: string | null
}> {
const edges: Array<{
source: string
target: string
sourceHandle?: string | null
targetHandle?: string | null
}> = []
const processBlockConnections = (block: any, blockId: string) => {
if (block.connections) {
Object.entries(block.connections).forEach(([sourceHandle, targets]) => {
const targetArray = Array.isArray(targets) ? targets : [targets]
targetArray.forEach((target: string) => {
edges.push({
source: blockId,
target,
sourceHandle,
targetHandle: 'target',
})
})
})
}
// Process nested nodes
if (block.nestedNodes) {
Object.entries(block.nestedNodes).forEach(([nestedId, nestedBlock]) => {
processBlockConnections(nestedBlock, nestedId)
})
}
}
Object.entries(blocks).forEach(([blockId, block]) => {
processBlockConnections(block, blockId)
})
return edges
}
/**
* Compute the edit sequence (operations) needed to transform startState into endState
* This analyzes the differences and generates operations that can recreate the changes
@@ -51,12 +114,14 @@ export function computeEditSequence(
const startBlocks = startState.blocks || {}
const endBlocks = endState.blocks || {}
const startEdges = startState.edges || []
const endEdges = endState.edges || []
const startLoops = startState.loops || {}
const endLoops = endState.loops || {}
const startParallels = startState.parallels || {}
const endParallels = endState.parallels || {}
// Flatten nested blocks for comparison (includes nested nodes at top level)
const startFlattened = flattenBlocks(startBlocks)
const endFlattened = flattenBlocks(endBlocks)
// Extract edges from connections for tracking
const startEdges = extractAllEdgesFromBlocks(startBlocks)
const endEdges = extractAllEdgesFromBlocks(endBlocks)
// Track statistics
let blocksAdded = 0
@@ -65,74 +130,171 @@ export function computeEditSequence(
let edgesChanged = 0
let subflowsChanged = 0
// Track which blocks are being deleted (including subflows)
const deletedBlocks = new Set<string>()
for (const blockId in startFlattened) {
if (!(blockId in endFlattened)) {
deletedBlocks.add(blockId)
}
}
// 1. Find deleted blocks (exist in start but not in end)
for (const blockId in startBlocks) {
if (!(blockId in endBlocks)) {
operations.push({
operation_type: 'delete',
block_id: blockId,
})
blocksDeleted++
for (const blockId in startFlattened) {
if (!(blockId in endFlattened)) {
const { parentId } = startFlattened[blockId]
// Skip if parent is also being deleted (cascade delete is implicit)
if (parentId && deletedBlocks.has(parentId)) {
continue
}
if (parentId) {
// Block was inside a subflow and was removed (but subflow still exists)
operations.push({
operation_type: 'extract_from_subflow',
block_id: blockId,
params: {
subflowId: parentId,
},
})
subflowsChanged++
} else {
// Regular block deletion
operations.push({
operation_type: 'delete',
block_id: blockId,
})
blocksDeleted++
}
}
}
// 2. Find added blocks (exist in end but not in start)
for (const blockId in endBlocks) {
if (!(blockId in startBlocks)) {
const block = endBlocks[blockId]
const addParams: EditOperation['params'] = {
type: block.type,
name: block.name,
inputs: extractInputValues(block),
connections: extractConnections(blockId, endEdges),
triggerMode: Boolean(block?.triggerMode),
}
for (const blockId in endFlattened) {
if (!(blockId in startFlattened)) {
const { block, parentId } = endFlattened[blockId]
if (parentId) {
// Block was added inside a subflow - include full block state
const addParams: EditOperation['params'] = {
subflowId: parentId,
type: block.type,
name: block.name,
outputs: block.outputs,
enabled: block.enabled !== undefined ? block.enabled : true,
...(block?.triggerMode !== undefined && { triggerMode: Boolean(block.triggerMode) }),
...(block?.advancedMode !== undefined && { advancedMode: Boolean(block.advancedMode) }),
}
// Add loop/parallel configuration if this block is in a subflow
const loopConfig = findLoopConfigForBlock(blockId, endLoops)
if (loopConfig) {
;(addParams as any).loopConfig = loopConfig
// Add inputs if present
const inputs = extractInputValues(block)
if (Object.keys(inputs).length > 0) {
addParams.inputs = inputs
}
// Add connections if present
const connections = extractConnections(blockId, endEdges)
if (connections && Object.keys(connections).length > 0) {
addParams.connections = connections
}
operations.push({
operation_type: 'insert_into_subflow',
block_id: blockId,
params: addParams,
})
subflowsChanged++
}
} else {
// Regular block addition at root level
const addParams: EditOperation['params'] = {
type: block.type,
name: block.name,
...(block?.triggerMode !== undefined && { triggerMode: Boolean(block.triggerMode) }),
...(block?.advancedMode !== undefined && { advancedMode: Boolean(block.advancedMode) }),
}
const parallelConfig = findParallelConfigForBlock(blockId, endParallels)
if (parallelConfig) {
;(addParams as any).parallelConfig = parallelConfig
subflowsChanged++
}
// Add inputs if present
const inputs = extractInputValues(block)
if (Object.keys(inputs).length > 0) {
addParams.inputs = inputs
}
// Add parent-child relationship if present
if (block.data?.parentId) {
addParams.parentId = block.data.parentId
addParams.extent = block.data.extent
}
// Add connections if present
const connections = extractConnections(blockId, endEdges)
if (connections && Object.keys(connections).length > 0) {
addParams.connections = connections
}
operations.push({
operation_type: 'add',
block_id: blockId,
params: addParams,
})
blocksAdded++
// Add nested nodes if present (for loops/parallels created from scratch)
if (block.nestedNodes && Object.keys(block.nestedNodes).length > 0) {
addParams.nestedNodes = block.nestedNodes
subflowsChanged++
}
operations.push({
operation_type: 'add',
block_id: blockId,
params: addParams,
})
blocksAdded++
}
}
}
// 3. Find modified blocks (exist in both but have changes)
for (const blockId in endBlocks) {
if (blockId in startBlocks) {
const startBlock = startBlocks[blockId]
const endBlock = endBlocks[blockId]
const changes = computeBlockChanges(
startBlock,
endBlock,
blockId,
startEdges,
endEdges,
startLoops,
endLoops,
startParallels,
endParallels
)
for (const blockId in endFlattened) {
if (blockId in startFlattened) {
const { block: startBlock, parentId: startParentId } = startFlattened[blockId]
const { block: endBlock, parentId: endParentId } = endFlattened[blockId]
// Check if parent changed (moved in/out of subflow)
if (startParentId !== endParentId) {
// Extract from old parent if it had one
if (startParentId) {
operations.push({
operation_type: 'extract_from_subflow',
block_id: blockId,
params: { subflowId: startParentId },
})
subflowsChanged++
}
// Insert into new parent if it has one - include full block state
if (endParentId) {
const addParams: EditOperation['params'] = {
subflowId: endParentId,
type: endBlock.type,
name: endBlock.name,
outputs: endBlock.outputs,
enabled: endBlock.enabled !== undefined ? endBlock.enabled : true,
...(endBlock?.triggerMode !== undefined && {
triggerMode: Boolean(endBlock.triggerMode),
}),
...(endBlock?.advancedMode !== undefined && {
advancedMode: Boolean(endBlock.advancedMode),
}),
}
const inputs = extractInputValues(endBlock)
if (Object.keys(inputs).length > 0) {
addParams.inputs = inputs
}
const connections = extractConnections(blockId, endEdges)
if (connections && Object.keys(connections).length > 0) {
addParams.connections = connections
}
operations.push({
operation_type: 'insert_into_subflow',
block_id: blockId,
params: addParams,
})
subflowsChanged++
}
}
// Check for other changes (only if parent didn't change)
const changes = computeBlockChanges(startBlock, endBlock, blockId, startEdges, endEdges)
if (changes) {
operations.push({
operation_type: 'edit',
@@ -140,24 +302,13 @@ export function computeEditSequence(
params: changes,
})
blocksModified++
if (changes.connections || changes.removeEdges) {
if (changes.connections) {
edgesChanged++
}
if (changes.loopConfig || changes.parallelConfig) {
subflowsChanged++
}
}
}
}
// 4. Check for standalone loop/parallel changes (not tied to specific blocks)
const loopChanges = detectSubflowChanges(startLoops, endLoops, 'loop')
const parallelChanges = detectSubflowChanges(startParallels, endParallels, 'parallel')
if (loopChanges > 0 || parallelChanges > 0) {
subflowsChanged += loopChanges + parallelChanges
}
return {
operations,
summary: {
@@ -171,20 +322,21 @@ export function computeEditSequence(
}
/**
* Extract input values from a block's subBlocks
* Extract input values from a block
* Works with sanitized format where inputs is Record<string, value>
*/
function extractInputValues(block: any): Record<string, any> {
const inputs: Record<string, any> = {}
if (block.subBlocks) {
for (const [subBlockId, subBlock] of Object.entries(block.subBlocks)) {
if ((subBlock as any).value !== undefined && (subBlock as any).value !== null) {
inputs[subBlockId] = (subBlock as any).value
}
}
// New sanitized format uses 'inputs' field
if (block.inputs) {
return { ...block.inputs }
}
return inputs
// Fallback for any legacy data
if (block.subBlocks) {
return { ...block.subBlocks }
}
return {}
}
/**
@@ -233,101 +385,6 @@ function extractConnections(
return connections
}
/**
* Find loop configuration for a block
*/
function findLoopConfigForBlock(
blockId: string,
loops: Record<string, any>
):
| {
nodes?: string[]
iterations?: number
loopType?: 'for' | 'forEach'
forEachItems?: any
}
| undefined {
for (const loop of Object.values(loops)) {
if (loop.id === blockId || loop.nodes?.includes(blockId)) {
return {
nodes: loop.nodes,
iterations: loop.iterations,
loopType: loop.loopType,
forEachItems: loop.forEachItems,
}
}
}
return undefined
}
/**
* Find parallel configuration for a block
*/
function findParallelConfigForBlock(
blockId: string,
parallels: Record<string, any>
):
| {
nodes?: string[]
distribution?: any
count?: number
parallelType?: 'count' | 'collection'
}
| undefined {
for (const parallel of Object.values(parallels)) {
if (parallel.id === blockId || parallel.nodes?.includes(blockId)) {
return {
nodes: parallel.nodes,
distribution: parallel.distribution,
count: parallel.count,
parallelType: parallel.parallelType,
}
}
}
return undefined
}
/**
* Detect changes in subflow configurations
*/
function detectSubflowChanges(
startSubflows: Record<string, any>,
endSubflows: Record<string, any>,
type: 'loop' | 'parallel'
): number {
let changes = 0
// Check for added/removed subflows
const startIds = new Set(Object.keys(startSubflows))
const endIds = new Set(Object.keys(endSubflows))
for (const id of endIds) {
if (!startIds.has(id)) {
changes++ // New subflow
}
}
for (const id of startIds) {
if (!endIds.has(id)) {
changes++ // Removed subflow
}
}
// Check for modified subflows
for (const id of endIds) {
if (startIds.has(id)) {
const startSubflow = startSubflows[id]
const endSubflow = endSubflows[id]
if (JSON.stringify(startSubflow) !== JSON.stringify(endSubflow)) {
changes++ // Modified subflow
}
}
}
return changes
}
/**
* Compute what changed in a block between two states
*/
@@ -346,11 +403,7 @@ function computeBlockChanges(
target: string
sourceHandle?: string | null
targetHandle?: string | null
}>,
startLoops: Record<string, any>,
endLoops: Record<string, any>,
startParallels: Record<string, any>,
endParallels: Record<string, any>
}>
): Record<string, any> | null {
const changes: Record<string, any> = {}
let hasChanges = false
@@ -375,6 +428,14 @@ function computeBlockChanges(
hasChanges = true
}
// Check advanced mode change
const startAdvanced = Boolean(startBlock?.advancedMode)
const endAdvanced = Boolean(endBlock?.advancedMode)
if (startAdvanced !== endAdvanced) {
changes.advancedMode = endAdvanced
hasChanges = true
}
// Check input value changes
const startInputs = extractInputValues(startBlock)
const endInputs = extractInputValues(endBlock)
@@ -389,79 +450,7 @@ function computeBlockChanges(
const endConnections = extractConnections(blockId, endEdges)
if (JSON.stringify(startConnections) !== JSON.stringify(endConnections)) {
// Compute which edges were removed
const removedEdges: Array<{ targetBlockId: string; sourceHandle?: string }> = []
for (const handle in startConnections) {
const startTargets = Array.isArray(startConnections[handle])
? startConnections[handle]
: [startConnections[handle]]
const endTargets = endConnections[handle]
? Array.isArray(endConnections[handle])
? endConnections[handle]
: [endConnections[handle]]
: []
for (const target of startTargets) {
const targetId = typeof target === 'object' ? target.block : target
const isPresent = endTargets.some(
(t: any) => (typeof t === 'object' ? t.block : t) === targetId
)
if (!isPresent) {
removedEdges.push({
targetBlockId: targetId,
sourceHandle: handle !== 'default' ? handle : undefined,
})
}
}
}
if (removedEdges.length > 0) {
changes.removeEdges = removedEdges
}
// Add new connections
if (Object.keys(endConnections).length > 0) {
changes.connections = endConnections
}
hasChanges = true
}
// Check loop membership changes
const startLoopConfig = findLoopConfigForBlock(blockId, startLoops)
const endLoopConfig = findLoopConfigForBlock(blockId, endLoops)
if (JSON.stringify(startLoopConfig) !== JSON.stringify(endLoopConfig)) {
if (endLoopConfig) {
;(changes as any).loopConfig = endLoopConfig
}
hasChanges = true
}
// Check parallel membership changes
const startParallelConfig = findParallelConfigForBlock(blockId, startParallels)
const endParallelConfig = findParallelConfigForBlock(blockId, endParallels)
if (JSON.stringify(startParallelConfig) !== JSON.stringify(endParallelConfig)) {
if (endParallelConfig) {
;(changes as any).parallelConfig = endParallelConfig
}
hasChanges = true
}
// Check parent-child relationship changes
const startParentId = startBlock.data?.parentId
const endParentId = endBlock.data?.parentId
const startExtent = startBlock.data?.extent
const endExtent = endBlock.data?.extent
if (startParentId !== endParentId || startExtent !== endExtent) {
if (endParentId) {
changes.parentId = endParentId
changes.extent = endExtent
}
changes.connections = endConnections
hasChanges = true
}
@@ -478,20 +467,29 @@ export function formatEditSequence(operations: EditOperation[]): string[] {
return `Add block "${op.params?.name || op.block_id}" (${op.params?.type || 'unknown'})`
case 'delete':
return `Delete block "${op.block_id}"`
case 'insert_into_subflow':
return `Insert "${op.params?.name || op.block_id}" into subflow "${op.params?.subflowId}"`
case 'extract_from_subflow':
return `Extract "${op.block_id}" from subflow "${op.params?.subflowId}"`
case 'edit': {
const changes: string[] = []
if (op.params?.type) changes.push(`type to ${op.params.type}`)
if (op.params?.name) changes.push(`name to "${op.params.name}"`)
if (op.params?.inputs) changes.push('inputs')
if (op.params?.triggerMode !== undefined)
changes.push(`trigger mode to ${op.params.triggerMode}`)
if (op.params?.advancedMode !== undefined)
changes.push(`advanced mode to ${op.params.advancedMode}`)
if (op.params?.inputs) {
const inputKeys = Object.keys(op.params.inputs)
if (inputKeys.length > 0) {
changes.push(`inputs (${inputKeys.join(', ')})`)
}
}
if (op.params?.connections) changes.push('connections')
if (op.params?.removeEdges) changes.push(`remove ${op.params.removeEdges.length} edge(s)`)
if ((op.params as any)?.loopConfig) changes.push('loop configuration')
if ((op.params as any)?.parallelConfig) changes.push('parallel configuration')
if (op.params?.parentId) changes.push('parent-child relationship')
return `Edit block "${op.block_id}": ${changes.join(', ')}`
}
default:
return `Unknown operation on block "${op.block_id}"`
return `Unknown operation: ${op.operation_type}`
}
})
}

View File

@@ -147,6 +147,10 @@ export async function middleware(request: NextRequest) {
return NextResponse.next()
}
if (url.pathname.startsWith('/chat/')) {
return NextResponse.next()
}
if (url.pathname.startsWith('/workspace')) {
if (!hasActiveSession) {
return NextResponse.redirect(new URL('/login', request.url))

View File

@@ -238,6 +238,22 @@ const nextConfig: NextConfig = {
return redirects
},
async rewrites() {
if (!isTruthy(env.POSTHOG_ENABLED)) {
return []
}
return [
{
source: '/ingest/static/:path*',
destination: 'https://us-assets.i.posthog.com/static/:path*',
},
{
source: '/ingest/:path*',
destination: 'https://us.i.posthog.com/:path*',
},
]
},
}
export default nextConfig

View File

@@ -28,8 +28,8 @@
"@aws-sdk/s3-request-presigner": "^3.779.0",
"@azure/communication-email": "1.0.0",
"@azure/storage-blob": "12.27.0",
"@better-auth/stripe": "1.3.12",
"@better-auth/sso": "1.3.12",
"@better-auth/stripe": "1.3.12",
"@browserbasehq/stagehand": "^2.0.0",
"@cerebras/cerebras_cloud_sdk": "^1.23.0",
"@e2b/code-interpreter": "^2.0.0",
@@ -93,6 +93,8 @@
"openai": "^4.91.1",
"papaparse": "5.5.3",
"pdf-parse": "1.1.1",
"posthog-js": "1.268.9",
"posthog-node": "5.9.2",
"prismjs": "^1.30.0",
"react": "19.1.0",
"react-colorful": "5.6.1",

View File

@@ -1,4 +1,5 @@
import type { Edge } from 'reactflow'
import { BlockPathCalculator } from '@/lib/block-path-calculator'
import { createLogger } from '@/lib/logs/console/logger'
import { getBlock } from '@/blocks'
import type { SubBlockConfig } from '@/blocks/types'
@@ -44,22 +45,36 @@ export class Serializer {
parallels?: Record<string, Parallel>,
validateRequired = false
): SerializedWorkflow {
// Validate subflow requirements (loops/parallels) before serialization if requested
const safeLoops = loops || {}
const safeParallels = parallels || {}
const accessibleBlocksMap = this.computeAccessibleBlockIds(
blocks,
edges,
safeLoops,
safeParallels
)
if (validateRequired) {
this.validateSubflowsBeforeExecution(blocks, loops || {}, parallels || {})
this.validateSubflowsBeforeExecution(blocks, safeLoops, safeParallels)
}
return {
version: '1.0',
blocks: Object.values(blocks).map((block) => this.serializeBlock(block, validateRequired)),
blocks: Object.values(blocks).map((block) =>
this.serializeBlock(block, {
validateRequired,
allBlocks: blocks,
accessibleBlocksMap,
})
),
connections: edges.map((edge) => ({
source: edge.source,
target: edge.target,
sourceHandle: edge.sourceHandle || undefined,
targetHandle: edge.targetHandle || undefined,
})),
loops,
parallels,
loops: safeLoops,
parallels: safeParallels,
}
}
@@ -156,7 +171,14 @@ export class Serializer {
})
}
private serializeBlock(block: BlockState, validateRequired = false): SerializedBlock {
private serializeBlock(
block: BlockState,
options: {
validateRequired: boolean
allBlocks: Record<string, BlockState>
accessibleBlocksMap: Map<string, Set<string>>
}
): SerializedBlock {
// Special handling for subflow blocks (loops, parallels, etc.)
if (block.type === 'loop' || block.type === 'parallel') {
return {
@@ -197,7 +219,7 @@ export class Serializer {
}
// Validate required fields that only users can provide (before execution starts)
if (validateRequired) {
if (options.validateRequired) {
this.validateRequiredFieldsBeforeExecution(block, blockConfig, params)
}
@@ -541,6 +563,46 @@ export class Serializer {
}
}
private computeAccessibleBlockIds(
blocks: Record<string, BlockState>,
edges: Edge[],
loops: Record<string, Loop>,
parallels: Record<string, Parallel>
): Map<string, Set<string>> {
const accessibleMap = new Map<string, Set<string>>()
const simplifiedEdges = edges.map((edge) => ({ source: edge.source, target: edge.target }))
const starterBlock = Object.values(blocks).find((block) => block.type === 'starter')
Object.keys(blocks).forEach((blockId) => {
const ancestorIds = BlockPathCalculator.findAllPathNodes(simplifiedEdges, blockId)
const accessibleIds = new Set<string>(ancestorIds)
accessibleIds.add(blockId)
if (starterBlock) {
accessibleIds.add(starterBlock.id)
}
Object.values(loops).forEach((loop) => {
if (!loop?.nodes) return
if (loop.nodes.includes(blockId)) {
loop.nodes.forEach((nodeId) => accessibleIds.add(nodeId))
}
})
Object.values(parallels).forEach((parallel) => {
if (!parallel?.nodes) return
if (parallel.nodes.includes(blockId)) {
parallel.nodes.forEach((nodeId) => accessibleIds.add(nodeId))
}
})
accessibleMap.set(blockId, accessibleIds)
})
return accessibleMap
}
deserializeWorkflow(workflow: SerializedWorkflow): {
blocks: Record<string, BlockState>
edges: Edge[]

Some files were not shown because too many files have changed in this diff Show More