v0.5.33: loops, chat fixes, subflow resizing refactor, terminal updates

This commit is contained in:
Waleed
2025-12-17 15:45:39 -08:00
committed by GitHub
136 changed files with 14724 additions and 1882 deletions

View File

@@ -3335,6 +3335,24 @@ export function SalesforceIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function ServiceNowIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
xmlns='http://www.w3.org/2000/svg'
viewBox='0 0 1570 1403'
width='48'
height='48'
>
<path
fill='#62d84e'
fillRule='evenodd'
d='M1228.4 138.9c129.2 88.9 228.9 214.3 286.3 360.2 57.5 145.8 70 305.5 36 458.5S1437.8 1250 1324 1357.9c-13.3 12.9-28.8 23.4-45.8 30.8-17 7.5-35.2 11.9-53.7 12.9-18.5 1.1-37.1-1.1-54.8-6.6-17.7-5.4-34.3-13.9-49.1-25.2-48.2-35.9-101.8-63.8-158.8-82.6-57.1-18.9-116.7-28.5-176.8-28.5s-119.8 9.6-176.8 28.5c-57 18.8-110.7 46.7-158.9 82.6-14.6 11.2-31 19.8-48.6 25.3s-36 7.8-54.4 6.8c-18.4-.9-36.5-5.1-53.4-12.4s-32.4-17.5-45.8-30.2C132.5 1251 53 1110.8 19 956.8s-20.9-314.6 37.6-461c58.5-146.5 159.6-272 290.3-360.3S631.8.1 789.6.5c156.8 1.3 309.6 49.6 438.8 138.4m-291.8 1014c48.2-19.2 92-48 128.7-84.6 36.7-36.7 65.5-80.4 84.7-128.6 19.2-48.1 28.4-99.7 27-151.5 0-103.9-41.3-203.5-114.8-277S889 396.4 785 396.4s-203.7 41.3-277.2 114.8S393 684.3 393 788.2c-1.4 51.8 7.8 103.4 27 151.5 19.2 48.2 48 91.9 84.7 128.6 36.7 36.6 80.5 65.4 128.6 84.6 48.2 19.2 99.8 28.4 151.7 27 51.8 1.4 103.4-7.8 151.6-27'
/>
</svg>
)
}
export function ApolloIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg

View File

@@ -85,6 +85,7 @@ import {
SendgridIcon,
SentryIcon,
SerperIcon,
ServiceNowIcon,
SftpIcon,
ShopifyIcon,
SlackIcon,
@@ -139,6 +140,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
webflow: WebflowIcon,
pinecone: PineconeIcon,
apollo: ApolloIcon,
servicenow: ServiceNowIcon,
whatsapp: WhatsAppIcon,
typeform: TypeformIcon,
qdrant: QdrantIcon,

View File

@@ -0,0 +1,108 @@
---
title: ServiceNow
description: Erstellen, lesen, aktualisieren, löschen und Massenimport von
ServiceNow-Datensätzen
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
## Nutzungsanleitung
Integrieren Sie ServiceNow in Ihren Workflow. Kann Datensätze in jeder ServiceNow-Tabelle erstellen, lesen, aktualisieren und löschen (Vorfälle, Aufgaben, Benutzer usw.). Unterstützt Massenimport-Operationen für Datenmigration und ETL.
## Tools
### `servicenow_create_record`
Erstellen eines neuen Datensatzes in einer ServiceNow-Tabelle
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Ja | ServiceNow-Instanz-URL \(z. B. https://instance.service-now.com\) |
| `credential` | string | Nein | ServiceNow OAuth-Anmeldeinformations-ID |
| `tableName` | string | Ja | Tabellenname \(z. B. incident, task, sys_user\) |
| `fields` | json | Ja | Felder, die für den Datensatz festgelegt werden sollen \(JSON-Objekt\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `record` | json | Erstellter ServiceNow-Datensatz mit sys_id und anderen Feldern |
| `metadata` | json | Metadaten der Operation |
### `servicenow_read_record`
Lesen von Datensätzen aus einer ServiceNow-Tabelle
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Nein | ServiceNow-Instanz-URL \(automatisch aus OAuth erkannt, falls nicht angegeben\) |
| `credential` | string | Nein | ServiceNow OAuth-Anmeldeinformations-ID |
| `tableName` | string | Ja | Tabellenname |
| `sysId` | string | Nein | Spezifische Datensatz-sys_id |
| `number` | string | Nein | Datensatznummer \(z. B. INC0010001\) |
| `query` | string | Nein | Kodierte Abfragezeichenfolge \(z. B. "active=true^priority=1"\) |
| `limit` | number | Nein | Maximale Anzahl der zurückzugebenden Datensätze |
| `fields` | string | Nein | Durch Kommas getrennte Liste der zurückzugebenden Felder |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `records` | array | Array von ServiceNow-Datensätzen |
| `metadata` | json | Metadaten der Operation |
### `servicenow_update_record`
Einen bestehenden Datensatz in einer ServiceNow-Tabelle aktualisieren
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Nein | ServiceNow-Instanz-URL \(wird automatisch aus OAuth erkannt, falls nicht angegeben\) |
| `credential` | string | Nein | ServiceNow-OAuth-Credential-ID |
| `tableName` | string | Ja | Tabellenname |
| `sysId` | string | Ja | Sys_id des zu aktualisierenden Datensatzes |
| `fields` | json | Ja | Zu aktualisierende Felder \(JSON-Objekt\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `record` | json | Aktualisierter ServiceNow-Datensatz |
| `metadata` | json | Metadaten der Operation |
### `servicenow_delete_record`
Einen Datensatz aus einer ServiceNow-Tabelle löschen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Nein | ServiceNow-Instanz-URL \(wird automatisch aus OAuth erkannt, falls nicht angegeben\) |
| `credential` | string | Nein | ServiceNow-OAuth-Credential-ID |
| `tableName` | string | Ja | Tabellenname |
| `sysId` | string | Ja | Sys_id des zu löschenden Datensatzes |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `success` | boolean | Ob das Löschen erfolgreich war |
| `metadata` | json | Metadaten der Operation |
## Hinweise
- Kategorie: `tools`
- Typ: `servicenow`

View File

@@ -80,6 +80,7 @@
"sendgrid",
"sentry",
"serper",
"servicenow",
"sftp",
"sharepoint",
"shopify",

View File

@@ -0,0 +1,111 @@
---
title: ServiceNow
description: Create, read, update, delete, and bulk import ServiceNow records
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
## Usage Instructions
Integrate ServiceNow into your workflow. Can create, read, update, and delete records in any ServiceNow table (incidents, tasks, users, etc.). Supports bulk import operations for data migration and ETL.
## Tools
### `servicenow_create_record`
Create a new record in a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Yes | ServiceNow instance URL \(e.g., https://instance.service-now.com\) |
| `credential` | string | No | ServiceNow OAuth credential ID |
| `tableName` | string | Yes | Table name \(e.g., incident, task, sys_user\) |
| `fields` | json | Yes | Fields to set on the record \(JSON object\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Created ServiceNow record with sys_id and other fields |
| `metadata` | json | Operation metadata |
### `servicenow_read_record`
Read records from a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | No | ServiceNow instance URL \(auto-detected from OAuth if not provided\) |
| `credential` | string | No | ServiceNow OAuth credential ID |
| `tableName` | string | Yes | Table name |
| `sysId` | string | No | Specific record sys_id |
| `number` | string | No | Record number \(e.g., INC0010001\) |
| `query` | string | No | Encoded query string \(e.g., "active=true^priority=1"\) |
| `limit` | number | No | Maximum number of records to return |
| `fields` | string | No | Comma-separated list of fields to return |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `records` | array | Array of ServiceNow records |
| `metadata` | json | Operation metadata |
### `servicenow_update_record`
Update an existing record in a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | No | ServiceNow instance URL \(auto-detected from OAuth if not provided\) |
| `credential` | string | No | ServiceNow OAuth credential ID |
| `tableName` | string | Yes | Table name |
| `sysId` | string | Yes | Record sys_id to update |
| `fields` | json | Yes | Fields to update \(JSON object\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Updated ServiceNow record |
| `metadata` | json | Operation metadata |
### `servicenow_delete_record`
Delete a record from a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | No | ServiceNow instance URL \(auto-detected from OAuth if not provided\) |
| `credential` | string | No | ServiceNow OAuth credential ID |
| `tableName` | string | Yes | Table name |
| `sysId` | string | Yes | Record sys_id to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Whether the deletion was successful |
| `metadata` | json | Operation metadata |
## Notes
- Category: `tools`
- Type: `servicenow`

View File

@@ -0,0 +1,107 @@
---
title: ServiceNow
description: Crea, lee, actualiza, elimina e importa masivamente registros de ServiceNow
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
## Instrucciones de uso
Integra ServiceNow en tu flujo de trabajo. Puede crear, leer, actualizar y eliminar registros en cualquier tabla de ServiceNow (incidentes, tareas, usuarios, etc.). Admite operaciones de importación masiva para migración de datos y ETL.
## Herramientas
### `servicenow_create_record`
Crea un nuevo registro en una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Sí | URL de la instancia de ServiceNow \(ej., https://instance.service-now.com\) |
| `credential` | string | No | ID de credencial OAuth de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla \(ej., incident, task, sys_user\) |
| `fields` | json | Sí | Campos a establecer en el registro \(objeto JSON\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `record` | json | Registro de ServiceNow creado con sys_id y otros campos |
| `metadata` | json | Metadatos de la operación |
### `servicenow_read_record`
Lee registros de una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | No | URL de la instancia de ServiceNow \(detectada automáticamente desde OAuth si no se proporciona\) |
| `credential` | string | No | ID de credencial OAuth de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla |
| `sysId` | string | No | sys_id específico del registro |
| `number` | string | No | Número de registro \(ej., INC0010001\) |
| `query` | string | No | Cadena de consulta codificada \(ej., "active=true^priority=1"\) |
| `limit` | number | No | Número máximo de registros a devolver |
| `fields` | string | No | Lista de campos separados por comas a devolver |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `records` | array | Array de registros de ServiceNow |
| `metadata` | json | Metadatos de la operación |
### `servicenow_update_record`
Actualizar un registro existente en una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | No | URL de la instancia de ServiceNow \(detectada automáticamente desde OAuth si no se proporciona\) |
| `credential` | string | No | ID de credencial OAuth de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla |
| `sysId` | string | Sí | sys_id del registro a actualizar |
| `fields` | json | Sí | Campos a actualizar \(objeto JSON\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `record` | json | Registro de ServiceNow actualizado |
| `metadata` | json | Metadatos de la operación |
### `servicenow_delete_record`
Eliminar un registro de una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | No | URL de la instancia de ServiceNow \(detectada automáticamente desde OAuth si no se proporciona\) |
| `credential` | string | No | ID de credencial OAuth de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla |
| `sysId` | string | Sí | sys_id del registro a eliminar |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `success` | boolean | Si la eliminación fue exitosa |
| `metadata` | json | Metadatos de la operación |
## Notas
- Categoría: `tools`
- Tipo: `servicenow`

View File

@@ -0,0 +1,108 @@
---
title: ServiceNow
description: Créer, lire, mettre à jour, supprimer et importer en masse des
enregistrements ServiceNow
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
## Instructions d'utilisation
Intégrez ServiceNow dans votre flux de travail. Permet de créer, lire, mettre à jour et supprimer des enregistrements dans n'importe quelle table ServiceNow (incidents, tâches, utilisateurs, etc.). Prend en charge les opérations d'importation en masse pour la migration de données et l'ETL.
## Outils
### `servicenow_create_record`
Créer un nouvel enregistrement dans une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Oui | URL de l'instance ServiceNow \(par exemple, https://instance.service-now.com\) |
| `credential` | string | Non | ID d'identification OAuth ServiceNow |
| `tableName` | string | Oui | Nom de la table \(par exemple, incident, task, sys_user\) |
| `fields` | json | Oui | Champs à définir sur l'enregistrement \(objet JSON\) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Enregistrement ServiceNow créé avec sys_id et autres champs |
| `metadata` | json | Métadonnées de l'opération |
### `servicenow_read_record`
Lire des enregistrements d'une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Non | URL de l'instance ServiceNow \(détectée automatiquement depuis OAuth si non fournie\) |
| `credential` | string | Non | ID d'identification OAuth ServiceNow |
| `tableName` | string | Oui | Nom de la table |
| `sysId` | string | Non | sys_id spécifique de l'enregistrement |
| `number` | string | Non | Numéro d'enregistrement \(par exemple, INC0010001\) |
| `query` | string | Non | Chaîne de requête encodée \(par exemple, "active=true^priority=1"\) |
| `limit` | number | Non | Nombre maximum d'enregistrements à retourner |
| `fields` | string | Non | Liste de champs séparés par des virgules à retourner |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `records` | array | Tableau des enregistrements ServiceNow |
| `metadata` | json | Métadonnées de l'opération |
### `servicenow_update_record`
Mettre à jour un enregistrement existant dans une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Non | URL de l'instance ServiceNow (détectée automatiquement depuis OAuth si non fournie) |
| `credential` | string | Non | ID des identifiants OAuth ServiceNow |
| `tableName` | string | Oui | Nom de la table |
| `sysId` | string | Oui | sys_id de l'enregistrement à mettre à jour |
| `fields` | json | Oui | Champs à mettre à jour (objet JSON) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Enregistrement ServiceNow mis à jour |
| `metadata` | json | Métadonnées de l'opération |
### `servicenow_delete_record`
Supprimer un enregistrement d'une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Non | URL de l'instance ServiceNow (détectée automatiquement depuis OAuth si non fournie) |
| `credential` | string | Non | ID des identifiants OAuth ServiceNow |
| `tableName` | string | Oui | Nom de la table |
| `sysId` | string | Oui | sys_id de l'enregistrement à supprimer |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Indique si la suppression a réussi |
| `metadata` | json | Métadonnées de l'opération |
## Notes
- Catégorie : `tools`
- Type : `servicenow`

View File

@@ -0,0 +1,107 @@
---
title: ServiceNow
description: ServiceNowレコードの作成、読み取り、更新、削除、一括インポート
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
## 使用方法
ServiceNowをワークフローに統合します。任意のServiceNowテーブルインシデント、タスク、ユーザーなどのレコードを作成、読み取り、更新、削除できます。データ移行とETLのための一括インポート操作をサポートします。
## ツール
### `servicenow_create_record`
ServiceNowテーブルに新しいレコードを作成
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | はい | ServiceNowインスタンスURLhttps://instance.service-now.com |
| `credential` | string | いいえ | ServiceNow OAuth認証情報ID |
| `tableName` | string | はい | テーブル名incident、task、sys_user |
| `fields` | json | はい | レコードに設定するフィールドJSONオブジェクト |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `record` | json | sys_idおよびその他のフィールドを含む作成されたServiceNowレコード |
| `metadata` | json | 操作メタデータ |
### `servicenow_read_record`
ServiceNowテーブルからレコードを読み取り
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | いいえ | ServiceNowインスタンスURL指定されていない場合はOAuthから自動検出 |
| `credential` | string | いいえ | ServiceNow OAuth認証情報ID |
| `tableName` | string | はい | テーブル名 |
| `sysId` | string | いいえ | 特定のレコードsys_id |
| `number` | string | いいえ | レコード番号INC0010001 |
| `query` | string | いいえ | エンコードされたクエリ文字列(例:"active=true^priority=1" |
| `limit` | number | いいえ | 返す最大レコード数 |
| `fields` | string | いいえ | 返すフィールドのカンマ区切りリスト |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `records` | array | ServiceNowレコードの配列 |
| `metadata` | json | 操作メタデータ |
### `servicenow_update_record`
ServiceNowテーブル内の既存のレコードを更新します
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | いいえ | ServiceNowインスタンスURL(指定されていない場合はOAuthから自動検出) |
| `credential` | string | いいえ | ServiceNow OAuth認証情報ID |
| `tableName` | string | はい | テーブル名 |
| `sysId` | string | はい | 更新するレコードのsys_id |
| `fields` | json | はい | 更新するフィールド(JSONオブジェクト) |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `record` | json | 更新されたServiceNowレコード |
| `metadata` | json | 操作メタデータ |
### `servicenow_delete_record`
ServiceNowテーブルからレコードを削除します
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | いいえ | ServiceNowインスタンスURL(指定されていない場合はOAuthから自動検出) |
| `credential` | string | いいえ | ServiceNow OAuth認証情報ID |
| `tableName` | string | はい | テーブル名 |
| `sysId` | string | はい | 削除するレコードのsys_id |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `success` | boolean | 削除が成功したかどうか |
| `metadata` | json | 操作メタデータ |
## 注記
- カテゴリー: `tools`
- タイプ: `servicenow`

View File

@@ -0,0 +1,107 @@
---
title: ServiceNow
description: 创建、读取、更新、删除及批量导入 ServiceNow 记录
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
## 使用说明
将 ServiceNow 集成到您的工作流程中。可在任意 ServiceNow 表(如事件、任务、用户等)中创建、读取、更新和删除记录。支持批量导入操作,便于数据迁移和 ETL。
## 工具
### `servicenow_create_record`
在 ServiceNow 表中创建新记录
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 是 | ServiceNow 实例 URL例如https://instance.service-now.com |
| `credential` | string | 否 | ServiceNow OAuth 凭证 ID |
| `tableName` | string | 是 | 表名例如incident、task、sys_user |
| `fields` | json | 是 | 要设置在记录上的字段JSON 对象) |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `record` | json | 创建的 ServiceNow 记录,包含 sys_id 及其他字段 |
| `metadata` | json | 操作元数据 |
### `servicenow_read_record`
从 ServiceNow 表中读取记录
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 否 | ServiceNow 实例 URL如未提供将通过 OAuth 自动检测) |
| `credential` | string | 否 | ServiceNow OAuth 凭证 ID |
| `tableName` | string | 是 | 表名 |
| `sysId` | string | 否 | 指定记录 sys_id |
| `number` | string | 否 | 记录编号例如INC0010001 |
| `query` | string | 否 | 编码查询字符串(例如:"active=true^priority=1" |
| `limit` | number | 否 | 返回的最大记录数 |
| `fields` | string | 否 | 要返回的字段列表(以逗号分隔) |
#### 输出
| 参数 | 类型 | 描述 |
| --------- | ---- | ----------- |
| `records` | array | ServiceNow 记录数组 |
| `metadata` | json | 操作元数据 |
### `servicenow_update_record`
更新 ServiceNow 表中的现有记录
#### 输入
| 参数 | 类型 | 是否必填 | 描述 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 否 | ServiceNow 实例 URL如果未提供将通过 OAuth 自动检测) |
| `credential` | string | 否 | ServiceNow OAuth 凭证 ID |
| `tableName` | string | 是 | 表名 |
| `sysId` | string | 是 | 要更新的记录 sys_id |
| `fields` | json | 是 | 要更新的字段JSON 对象) |
#### 输出
| 参数 | 类型 | 描述 |
| --------- | ---- | ----------- |
| `record` | json | 已更新的 ServiceNow 记录 |
| `metadata` | json | 操作元数据 |
### `servicenow_delete_record`
从 ServiceNow 表中删除记录
#### 输入
| 参数 | 类型 | 是否必填 | 描述 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 否 | ServiceNow 实例 URL如果未提供将通过 OAuth 自动检测) |
| `credential` | string | 否 | ServiceNow OAuth 凭证 ID |
| `tableName` | string | 是 | 表名 |
| `sysId` | string | 是 | 要删除的记录 sys_id |
#### 输出
| 参数 | 类型 | 描述 |
| --------- | ---- | ----------- |
| `success` | boolean | 删除是否成功 |
| `metadata` | json | 操作元数据 |
## 注意事项
- 分类:`tools`
- 类型:`servicenow`

View File

@@ -49822,3 +49822,37 @@ checksums:
content/472: dbc5fceeefb3ab5fa505394becafef4e
content/473: b3f310d5ef115bea5a8b75bf25d7ea9a
content/474: 27c398e669b297cea076e4ce4cc0c5eb
9a28da736b42bf8de55126d4c06b6150:
meta/title: 418d5c8a18ad73520b38765741601f32
meta/description: 2b5a9723c7a45d2be5001d5d056b7c7b
content/0: 1b031fb0c62c46b177aeed5c3d3f8f80
content/1: e72670f88454b5b1c955b029de5fa8b5
content/2: 821e6394b0a953e2b0842b04ae8f3105
content/3: 7fa671d05a60d4f25b4980405c2c7278
content/4: 9c8aa3f09c9b2bd50ea4cdff3598ea4e
content/5: 263633aee6db9332de806ae50d87de05
content/6: 5a7e2171e5f73fec5eae21a50e5de661
content/7: 371d0e46b4bd2c23f559b8bc112f6955
content/8: 10d2d4eccb4b8923f048980dc16e43e1
content/9: bcadfc362b69078beee0088e5936c98b
content/10: d81ef802f80143282cf4e534561a9570
content/11: 02233e6212003c1d121424cfd8b86b62
content/12: efe2c6dd368708de68a1addbfdb11b0c
content/13: 371d0e46b4bd2c23f559b8bc112f6955
content/14: 0f3295854b7de5dbfab1ebd2a130b498
content/15: bcadfc362b69078beee0088e5936c98b
content/16: 953f353184dc27db1f20156db2a9ad90
content/17: 2011e87d0555cd0ab133ef2d35e7a37b
content/18: dbf08acb413d845ec419e45b1f986bdb
content/19: 371d0e46b4bd2c23f559b8bc112f6955
content/20: 3a8417b390ec7d3d55b1920c721e9006
content/21: bcadfc362b69078beee0088e5936c98b
content/22: c06a5bb458242baa23d34957034c2fe7
content/23: ff043e912417bc29ac7c64520160c07d
content/24: 9c2175ab469cb6ff9e62bc8bdcf7621d
content/25: 371d0e46b4bd2c23f559b8bc112f6955
content/26: 67e6ba04cf67f92e714ed94e7483dec5
content/27: bcadfc362b69078beee0088e5936c98b
content/28: fd0f38eb3fe5cf95be366a4ff6b4fb90
content/29: b3f310d5ef115bea5a8b75bf25d7ea9a
content/30: 4a7b2c644e487f3d12b6a6b54f8c6773

View File

@@ -4,7 +4,7 @@
"private": true,
"license": "Apache-2.0",
"scripts": {
"dev": "next dev --port 3001",
"dev": "next dev --port 7322",
"build": "fumadocs-mdx && NODE_OPTIONS='--max-old-space-size=8192' next build",
"start": "next start",
"postinstall": "fumadocs-mdx",

View File

@@ -159,7 +159,7 @@ describe('OAuth Utils', () => {
const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token', undefined)
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'new-token', refreshed: true })
@@ -239,7 +239,7 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token')
expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token', undefined)
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(token).toBe('new-token')

View File

@@ -18,6 +18,7 @@ interface AccountInsertData {
updatedAt: Date
refreshToken?: string
idToken?: string
accessTokenExpiresAt?: Date
}
/**
@@ -103,6 +104,7 @@ export async function getOAuthToken(userId: string, providerId: string): Promise
accessToken: account.accessToken,
refreshToken: account.refreshToken,
accessTokenExpiresAt: account.accessTokenExpiresAt,
idToken: account.idToken,
})
.from(account)
.where(and(eq(account.userId, userId), eq(account.providerId, providerId)))
@@ -130,7 +132,14 @@ export async function getOAuthToken(userId: string, providerId: string): Promise
try {
// Use the existing refreshOAuthToken function
const refreshResult = await refreshOAuthToken(providerId, credential.refreshToken!)
// For ServiceNow, pass the instance URL (stored in idToken) for the token endpoint
const instanceUrl =
providerId === 'servicenow' ? (credential.idToken ?? undefined) : undefined
const refreshResult = await refreshOAuthToken(
providerId,
credential.refreshToken!,
instanceUrl
)
if (!refreshResult) {
logger.error(`Failed to refresh token for user ${userId}, provider ${providerId}`, {
@@ -213,9 +222,13 @@ export async function refreshAccessTokenIfNeeded(
if (shouldRefresh) {
logger.info(`[${requestId}] Token expired, attempting to refresh for credential`)
try {
// For ServiceNow, pass the instance URL (stored in idToken) for the token endpoint
const instanceUrl =
credential.providerId === 'servicenow' ? (credential.idToken ?? undefined) : undefined
const refreshedToken = await refreshOAuthToken(
credential.providerId,
credential.refreshToken!
credential.refreshToken!,
instanceUrl
)
if (!refreshedToken) {
@@ -287,7 +300,14 @@ export async function refreshTokenIfNeeded(
}
try {
const refreshResult = await refreshOAuthToken(credential.providerId, credential.refreshToken!)
// For ServiceNow, pass the instance URL (stored in idToken) for the token endpoint
const instanceUrl =
credential.providerId === 'servicenow' ? (credential.idToken ?? undefined) : undefined
const refreshResult = await refreshOAuthToken(
credential.providerId,
credential.refreshToken!,
instanceUrl
)
if (!refreshResult) {
logger.error(`[${requestId}] Failed to refresh token for credential`)

View File

@@ -0,0 +1,166 @@
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { env } from '@/lib/core/config/env'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('ServiceNowCallback')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
const baseUrl = getBaseUrl()
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.redirect(`${baseUrl}/workspace?error=unauthorized`)
}
const { searchParams } = request.nextUrl
const code = searchParams.get('code')
const state = searchParams.get('state')
const error = searchParams.get('error')
const errorDescription = searchParams.get('error_description')
// Handle OAuth errors from ServiceNow
if (error) {
logger.error('ServiceNow OAuth error:', { error, errorDescription })
return NextResponse.redirect(
`${baseUrl}/workspace?error=servicenow_auth_error&message=${encodeURIComponent(errorDescription || error)}`
)
}
const storedState = request.cookies.get('servicenow_oauth_state')?.value
const storedInstanceUrl = request.cookies.get('servicenow_instance_url')?.value
const clientId = env.SERVICENOW_CLIENT_ID
const clientSecret = env.SERVICENOW_CLIENT_SECRET
if (!clientId || !clientSecret) {
logger.error('ServiceNow credentials not configured')
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_config_error`)
}
// Validate state parameter
if (!state || state !== storedState) {
logger.error('State mismatch in ServiceNow OAuth callback')
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_state_mismatch`)
}
// Validate authorization code
if (!code) {
logger.error('No code received from ServiceNow')
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_no_code`)
}
// Validate instance URL
if (!storedInstanceUrl) {
logger.error('No instance URL stored')
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_no_instance`)
}
const redirectUri = `${baseUrl}/api/auth/oauth2/callback/servicenow`
// Exchange authorization code for access token
const tokenResponse = await fetch(`${storedInstanceUrl}/oauth_token.do`, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
body: new URLSearchParams({
grant_type: 'authorization_code',
code: code,
redirect_uri: redirectUri,
client_id: clientId,
client_secret: clientSecret,
}).toString(),
})
if (!tokenResponse.ok) {
const errorText = await tokenResponse.text()
logger.error('Failed to exchange code for token:', {
status: tokenResponse.status,
body: errorText,
})
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_token_error`)
}
const tokenData = await tokenResponse.json()
const accessToken = tokenData.access_token
const refreshToken = tokenData.refresh_token
const expiresIn = tokenData.expires_in
// ServiceNow always grants 'useraccount' scope but returns empty string
const scope = tokenData.scope || 'useraccount'
logger.info('ServiceNow token exchange successful:', {
hasAccessToken: !!accessToken,
hasRefreshToken: !!refreshToken,
expiresIn,
})
if (!accessToken) {
logger.error('No access token in response')
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_no_token`)
}
// Redirect to store endpoint with token data in cookies
const storeUrl = new URL(`${baseUrl}/api/auth/oauth2/servicenow/store`)
const response = NextResponse.redirect(storeUrl)
// Store token data in secure cookies for the store endpoint
response.cookies.set('servicenow_pending_token', accessToken, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60, // 1 minute
path: '/',
})
if (refreshToken) {
response.cookies.set('servicenow_pending_refresh_token', refreshToken, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60,
path: '/',
})
}
response.cookies.set('servicenow_pending_instance', storedInstanceUrl, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60,
path: '/',
})
response.cookies.set('servicenow_pending_scope', scope || '', {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60,
path: '/',
})
if (expiresIn) {
response.cookies.set('servicenow_pending_expires_in', expiresIn.toString(), {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60,
path: '/',
})
}
// Clean up OAuth state cookies
response.cookies.delete('servicenow_oauth_state')
response.cookies.delete('servicenow_instance_url')
return response
} catch (error) {
logger.error('Error in ServiceNow OAuth callback:', error)
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_callback_error`)
}
}

View File

@@ -0,0 +1,142 @@
import { db } from '@sim/db'
import { account } from '@sim/db/schema'
import { and, eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { createLogger } from '@/lib/logs/console/logger'
import { safeAccountInsert } from '@/app/api/auth/oauth/utils'
const logger = createLogger('ServiceNowStore')
export const dynamic = 'force-dynamic'
export async function GET(request: NextRequest) {
const baseUrl = getBaseUrl()
try {
const session = await getSession()
if (!session?.user?.id) {
logger.warn('Unauthorized attempt to store ServiceNow token')
return NextResponse.redirect(`${baseUrl}/workspace?error=unauthorized`)
}
// Retrieve token data from cookies
const accessToken = request.cookies.get('servicenow_pending_token')?.value
const refreshToken = request.cookies.get('servicenow_pending_refresh_token')?.value
const instanceUrl = request.cookies.get('servicenow_pending_instance')?.value
const scope = request.cookies.get('servicenow_pending_scope')?.value
const expiresInStr = request.cookies.get('servicenow_pending_expires_in')?.value
if (!accessToken || !instanceUrl) {
logger.error('Missing token or instance URL in cookies')
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_missing_data`)
}
// Validate the token by fetching user info from ServiceNow
const userResponse = await fetch(
`${instanceUrl}/api/now/table/sys_user?sysparm_query=user_name=${encodeURIComponent('javascript:gs.getUserName()')}&sysparm_limit=1`,
{
headers: {
Authorization: `Bearer ${accessToken}`,
Accept: 'application/json',
},
}
)
// Alternative: Use the instance info endpoint instead
let accountIdentifier = instanceUrl
let userInfo: Record<string, unknown> | null = null
// Try to get current user info
try {
const whoamiResponse = await fetch(`${instanceUrl}/api/now/ui/user/current_user`, {
headers: {
Authorization: `Bearer ${accessToken}`,
Accept: 'application/json',
},
})
if (whoamiResponse.ok) {
const whoamiData = await whoamiResponse.json()
userInfo = whoamiData.result
if (userInfo?.user_sys_id) {
accountIdentifier = userInfo.user_sys_id as string
} else if (userInfo?.user_name) {
accountIdentifier = userInfo.user_name as string
}
logger.info('Retrieved ServiceNow user info', { accountIdentifier })
}
} catch (e) {
logger.warn('Could not retrieve ServiceNow user info, using instance URL as identifier')
}
// Calculate expiration time
const now = new Date()
const expiresIn = expiresInStr ? Number.parseInt(expiresInStr, 10) : 3600 // Default to 1 hour
const accessTokenExpiresAt = new Date(now.getTime() + expiresIn * 1000)
// Check for existing ServiceNow account for this user
const existing = await db.query.account.findFirst({
where: and(eq(account.userId, session.user.id), eq(account.providerId, 'servicenow')),
})
// ServiceNow always grants 'useraccount' scope but returns empty string
const effectiveScope = scope?.trim() ? scope : 'useraccount'
const accountData = {
accessToken: accessToken,
refreshToken: refreshToken || null,
accountId: accountIdentifier,
scope: effectiveScope,
updatedAt: now,
accessTokenExpiresAt: accessTokenExpiresAt,
idToken: instanceUrl, // Store instance URL in idToken for API calls
}
if (existing) {
await db.update(account).set(accountData).where(eq(account.id, existing.id))
logger.info('Updated existing ServiceNow account', { accountId: existing.id })
} else {
await safeAccountInsert(
{
id: `servicenow_${session.user.id}_${Date.now()}`,
userId: session.user.id,
providerId: 'servicenow',
accountId: accountData.accountId,
accessToken: accountData.accessToken,
refreshToken: accountData.refreshToken || undefined,
accessTokenExpiresAt: accountData.accessTokenExpiresAt,
scope: accountData.scope,
idToken: accountData.idToken,
createdAt: now,
updatedAt: now,
},
{ provider: 'ServiceNow', identifier: instanceUrl }
)
logger.info('Created new ServiceNow account')
}
// Get return URL from cookie
const returnUrl = request.cookies.get('servicenow_return_url')?.value
const redirectUrl = returnUrl || `${baseUrl}/workspace`
const finalUrl = new URL(redirectUrl)
finalUrl.searchParams.set('servicenow_connected', 'true')
const response = NextResponse.redirect(finalUrl.toString())
// Clean up all ServiceNow cookies
response.cookies.delete('servicenow_pending_token')
response.cookies.delete('servicenow_pending_refresh_token')
response.cookies.delete('servicenow_pending_instance')
response.cookies.delete('servicenow_pending_scope')
response.cookies.delete('servicenow_pending_expires_in')
response.cookies.delete('servicenow_return_url')
return response
} catch (error) {
logger.error('Error storing ServiceNow token:', error)
return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_store_error`)
}
}

View File

@@ -0,0 +1,264 @@
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { env } from '@/lib/core/config/env'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { createLogger } from '@/lib/logs/console/logger'
const logger = createLogger('ServiceNowAuthorize')
export const dynamic = 'force-dynamic'
/**
* ServiceNow OAuth scopes
* useraccount - Default scope for user account access
* Note: ServiceNow always returns 'useraccount' in OAuth responses regardless of requested scopes.
* Table API permissions are configured at the OAuth application level in ServiceNow.
*/
const SERVICENOW_SCOPES = 'useraccount'
/**
* Validates a ServiceNow instance URL format
*/
function isValidInstanceUrl(url: string): boolean {
try {
const parsed = new URL(url)
return (
parsed.protocol === 'https:' &&
(parsed.hostname.endsWith('.service-now.com') || parsed.hostname.endsWith('.servicenow.com'))
)
} catch {
return false
}
}
export async function GET(request: NextRequest) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const clientId = env.SERVICENOW_CLIENT_ID
if (!clientId) {
logger.error('SERVICENOW_CLIENT_ID not configured')
return NextResponse.json({ error: 'ServiceNow client ID not configured' }, { status: 500 })
}
const instanceUrl = request.nextUrl.searchParams.get('instanceUrl')
const returnUrl = request.nextUrl.searchParams.get('returnUrl')
if (!instanceUrl) {
const returnUrlParam = returnUrl ? encodeURIComponent(returnUrl) : ''
return new NextResponse(
`<!DOCTYPE html>
<html>
<head>
<title>Connect ServiceNow Instance</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
display: flex;
align-items: center;
justify-content: center;
height: 100vh;
margin: 0;
background: linear-gradient(135deg, #81B5A1 0%, #5A8A75 100%);
}
.container {
background: white;
padding: 2rem;
border-radius: 12px;
box-shadow: 0 10px 40px rgba(0,0,0,0.1);
text-align: center;
max-width: 450px;
width: 90%;
}
h2 {
color: #111827;
margin: 0 0 0.5rem 0;
}
p {
color: #6b7280;
margin: 0 0 1.5rem 0;
}
input {
width: 100%;
padding: 0.75rem;
border: 1px solid #d1d5db;
border-radius: 8px;
font-size: 1rem;
margin-bottom: 1rem;
box-sizing: border-box;
}
input:focus {
outline: none;
border-color: #81B5A1;
box-shadow: 0 0 0 3px rgba(129, 181, 161, 0.2);
}
button {
width: 100%;
padding: 0.75rem;
background: #81B5A1;
color: white;
border: none;
border-radius: 8px;
font-size: 1rem;
cursor: pointer;
font-weight: 500;
}
button:hover {
background: #6A9A87;
}
.help {
font-size: 0.875rem;
color: #9ca3af;
margin-top: 1rem;
}
.error {
color: #dc2626;
font-size: 0.875rem;
margin-bottom: 1rem;
display: none;
}
</style>
</head>
<body>
<div class="container">
<h2>Connect Your ServiceNow Instance</h2>
<p>Enter your ServiceNow instance URL to continue</p>
<div id="error" class="error"></div>
<form onsubmit="handleSubmit(event)">
<input
type="text"
id="instanceUrl"
placeholder="https://mycompany.service-now.com"
required
/>
<button type="submit">Connect Instance</button>
</form>
<p class="help">Your instance URL looks like: https://yourcompany.service-now.com</p>
</div>
<script>
const returnUrl = '${returnUrlParam}';
function handleSubmit(e) {
e.preventDefault();
const errorEl = document.getElementById('error');
let instanceUrl = document.getElementById('instanceUrl').value.trim();
// Ensure https:// prefix
if (!instanceUrl.startsWith('https://') && !instanceUrl.startsWith('http://')) {
instanceUrl = 'https://' + instanceUrl;
}
// Validate the URL format
try {
const parsed = new URL(instanceUrl);
if (!parsed.hostname.endsWith('.service-now.com') && !parsed.hostname.endsWith('.servicenow.com')) {
errorEl.textContent = 'Please enter a valid ServiceNow instance URL (e.g., https://yourcompany.service-now.com)';
errorEl.style.display = 'block';
return;
}
// Clean the URL (remove trailing slashes, paths)
instanceUrl = parsed.origin;
} catch {
errorEl.textContent = 'Please enter a valid URL';
errorEl.style.display = 'block';
return;
}
let url = window.location.pathname + '?instanceUrl=' + encodeURIComponent(instanceUrl);
if (returnUrl) {
url += '&returnUrl=' + returnUrl;
}
window.location.href = url;
}
</script>
</body>
</html>`,
{
headers: {
'Content-Type': 'text/html; charset=utf-8',
'Cache-Control': 'no-store, no-cache, must-revalidate',
},
}
)
}
// Validate instance URL
if (!isValidInstanceUrl(instanceUrl)) {
logger.error('Invalid ServiceNow instance URL:', { instanceUrl })
return NextResponse.json(
{
error:
'Invalid ServiceNow instance URL. Must be a valid .service-now.com or .servicenow.com domain.',
},
{ status: 400 }
)
}
// Clean the instance URL
const parsedUrl = new URL(instanceUrl)
const cleanInstanceUrl = parsedUrl.origin
const baseUrl = getBaseUrl()
const redirectUri = `${baseUrl}/api/auth/oauth2/callback/servicenow`
const state = crypto.randomUUID()
// ServiceNow OAuth authorization URL
const oauthUrl =
`${cleanInstanceUrl}/oauth_auth.do?` +
new URLSearchParams({
response_type: 'code',
client_id: clientId,
redirect_uri: redirectUri,
state: state,
scope: SERVICENOW_SCOPES,
}).toString()
logger.info('Initiating ServiceNow OAuth:', {
instanceUrl: cleanInstanceUrl,
requestedScopes: SERVICENOW_SCOPES,
redirectUri,
returnUrl: returnUrl || 'not specified',
})
const response = NextResponse.redirect(oauthUrl)
// Store state and instance URL in cookies for validation in callback
response.cookies.set('servicenow_oauth_state', state, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60 * 10, // 10 minutes
path: '/',
})
response.cookies.set('servicenow_instance_url', cleanInstanceUrl, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60 * 10,
path: '/',
})
if (returnUrl) {
response.cookies.set('servicenow_return_url', returnUrl, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'lax',
maxAge: 60 * 10,
path: '/',
})
}
return response
} catch (error) {
logger.error('Error initiating ServiceNow authorization:', error)
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
}
}

View File

@@ -303,6 +303,14 @@ export async function POST(req: NextRequest) {
apiVersion: 'preview',
endpoint: env.AZURE_OPENAI_ENDPOINT,
}
} else if (providerEnv === 'vertex') {
providerConfig = {
provider: 'vertex',
model: modelToUse,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
}
} else {
providerConfig = {
provider: providerEnv,

View File

@@ -66,6 +66,14 @@ export async function POST(req: NextRequest) {
apiVersion: env.AZURE_OPENAI_API_VERSION,
endpoint: env.AZURE_OPENAI_ENDPOINT,
}
} else if (providerEnv === 'vertex') {
providerConfig = {
provider: 'vertex',
model: modelToUse,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
}
} else {
providerConfig = {
provider: providerEnv,

View File

@@ -6,7 +6,22 @@ import {
workflowDeploymentVersion,
workflowExecutionLogs,
} from '@sim/db/schema'
import { and, desc, eq, gte, inArray, isNotNull, isNull, lte, or, type SQL, sql } from 'drizzle-orm'
import {
and,
desc,
eq,
gt,
gte,
inArray,
isNotNull,
isNull,
lt,
lte,
ne,
or,
type SQL,
sql,
} from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
@@ -22,14 +37,19 @@ const QueryParamsSchema = z.object({
limit: z.coerce.number().optional().default(100),
offset: z.coerce.number().optional().default(0),
level: z.string().optional(),
workflowIds: z.string().optional(), // Comma-separated list of workflow IDs
folderIds: z.string().optional(), // Comma-separated list of folder IDs
triggers: z.string().optional(), // Comma-separated list of trigger types
workflowIds: z.string().optional(),
folderIds: z.string().optional(),
triggers: z.string().optional(),
startDate: z.string().optional(),
endDate: z.string().optional(),
search: z.string().optional(),
workflowName: z.string().optional(),
folderName: z.string().optional(),
executionId: z.string().optional(),
costOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
costValue: z.coerce.number().optional(),
durationOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
durationValue: z.coerce.number().optional(),
workspaceId: z.string(),
})
@@ -49,7 +69,6 @@ export async function GET(request: NextRequest) {
const { searchParams } = new URL(request.url)
const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries()))
// Conditionally select columns based on detail level to optimize performance
const selectColumns =
params.details === 'full'
? {
@@ -63,9 +82,9 @@ export async function GET(request: NextRequest) {
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
executionData: workflowExecutionLogs.executionData, // Large field - only in full mode
executionData: workflowExecutionLogs.executionData,
cost: workflowExecutionLogs.cost,
files: workflowExecutionLogs.files, // Large field - only in full mode
files: workflowExecutionLogs.files,
createdAt: workflowExecutionLogs.createdAt,
workflowName: workflow.name,
workflowDescription: workflow.description,
@@ -82,7 +101,6 @@ export async function GET(request: NextRequest) {
deploymentVersionName: workflowDeploymentVersion.name,
}
: {
// Basic mode - exclude large fields for better performance
id: workflowExecutionLogs.id,
workflowId: workflowExecutionLogs.workflowId,
executionId: workflowExecutionLogs.executionId,
@@ -93,9 +111,9 @@ export async function GET(request: NextRequest) {
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
executionData: sql<null>`NULL`, // Exclude large execution data in basic mode
executionData: sql<null>`NULL`,
cost: workflowExecutionLogs.cost,
files: sql<null>`NULL`, // Exclude files in basic mode
files: sql<null>`NULL`,
createdAt: workflowExecutionLogs.createdAt,
workflowName: workflow.name,
workflowDescription: workflow.description,
@@ -109,7 +127,7 @@ export async function GET(request: NextRequest) {
pausedTotalPauseCount: pausedExecutions.totalPauseCount,
pausedResumedCount: pausedExecutions.resumedCount,
deploymentVersion: workflowDeploymentVersion.version,
deploymentVersionName: sql<null>`NULL`, // Only needed in full mode for details panel
deploymentVersionName: sql<null>`NULL`,
}
const baseQuery = db
@@ -139,34 +157,28 @@ export async function GET(request: NextRequest) {
)
)
// Build additional conditions for the query
let conditions: SQL | undefined
// Filter by level with support for derived statuses (running, pending)
if (params.level && params.level !== 'all') {
const levels = params.level.split(',').filter(Boolean)
const levelConditions: SQL[] = []
for (const level of levels) {
if (level === 'error') {
// Direct database field
levelConditions.push(eq(workflowExecutionLogs.level, 'error'))
} else if (level === 'info') {
// Completed info logs only (not running, not pending)
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
isNotNull(workflowExecutionLogs.endedAt)
)
if (condition) levelConditions.push(condition)
} else if (level === 'running') {
// Running logs: info level with no endedAt
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
isNull(workflowExecutionLogs.endedAt)
)
if (condition) levelConditions.push(condition)
} else if (level === 'pending') {
// Pending logs: info level with pause status indicators
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
or(
@@ -189,7 +201,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by specific workflow IDs
if (params.workflowIds) {
const workflowIds = params.workflowIds.split(',').filter(Boolean)
if (workflowIds.length > 0) {
@@ -197,7 +208,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by folder IDs
if (params.folderIds) {
const folderIds = params.folderIds.split(',').filter(Boolean)
if (folderIds.length > 0) {
@@ -205,7 +215,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by triggers
if (params.triggers) {
const triggers = params.triggers.split(',').filter(Boolean)
if (triggers.length > 0 && !triggers.includes('all')) {
@@ -213,7 +222,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by date range
if (params.startDate) {
conditions = and(
conditions,
@@ -224,33 +232,79 @@ export async function GET(request: NextRequest) {
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
}
// Filter by search query
if (params.search) {
const searchTerm = `%${params.search}%`
// With message removed, restrict search to executionId only
conditions = and(conditions, sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`)
}
// Filter by workflow name (from advanced search input)
if (params.workflowName) {
const nameTerm = `%${params.workflowName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${nameTerm}`)
}
// Filter by folder name (best-effort text match when present on workflows)
if (params.folderName) {
const folderTerm = `%${params.folderName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${folderTerm}`)
}
// Execute the query using the optimized join
if (params.executionId) {
conditions = and(conditions, eq(workflowExecutionLogs.executionId, params.executionId))
}
if (params.costOperator && params.costValue !== undefined) {
const costField = sql`(${workflowExecutionLogs.cost}->>'total')::numeric`
switch (params.costOperator) {
case '=':
conditions = and(conditions, sql`${costField} = ${params.costValue}`)
break
case '>':
conditions = and(conditions, sql`${costField} > ${params.costValue}`)
break
case '<':
conditions = and(conditions, sql`${costField} < ${params.costValue}`)
break
case '>=':
conditions = and(conditions, sql`${costField} >= ${params.costValue}`)
break
case '<=':
conditions = and(conditions, sql`${costField} <= ${params.costValue}`)
break
case '!=':
conditions = and(conditions, sql`${costField} != ${params.costValue}`)
break
}
}
if (params.durationOperator && params.durationValue !== undefined) {
const durationField = workflowExecutionLogs.totalDurationMs
switch (params.durationOperator) {
case '=':
conditions = and(conditions, eq(durationField, params.durationValue))
break
case '>':
conditions = and(conditions, gt(durationField, params.durationValue))
break
case '<':
conditions = and(conditions, lt(durationField, params.durationValue))
break
case '>=':
conditions = and(conditions, gte(durationField, params.durationValue))
break
case '<=':
conditions = and(conditions, lte(durationField, params.durationValue))
break
case '!=':
conditions = and(conditions, ne(durationField, params.durationValue))
break
}
}
const logs = await baseQuery
.where(conditions)
.orderBy(desc(workflowExecutionLogs.startedAt))
.limit(params.limit)
.offset(params.offset)
// Get total count for pagination using the same join structure
const countQuery = db
.select({ count: sql<number>`count(*)` })
.from(workflowExecutionLogs)
@@ -279,13 +333,10 @@ export async function GET(request: NextRequest) {
const count = countResult[0]?.count || 0
// Block executions are now extracted from trace spans instead of separate table
const blockExecutionsByExecution: Record<string, any[]> = {}
// Create clean trace spans from block executions
const createTraceSpans = (blockExecutions: any[]) => {
return blockExecutions.map((block, index) => {
// For error blocks, include error information in the output
let output = block.outputData
if (block.status === 'error' && block.errorMessage) {
output = {
@@ -314,7 +365,6 @@ export async function GET(request: NextRequest) {
})
}
// Extract cost information from block executions
const extractCostSummary = (blockExecutions: any[]) => {
let totalCost = 0
let totalInputCost = 0
@@ -333,7 +383,6 @@ export async function GET(request: NextRequest) {
totalPromptTokens += block.cost.tokens?.prompt || 0
totalCompletionTokens += block.cost.tokens?.completion || 0
// Track per-model costs
if (block.cost.model) {
if (!models.has(block.cost.model)) {
models.set(block.cost.model, {
@@ -363,34 +412,29 @@ export async function GET(request: NextRequest) {
prompt: totalPromptTokens,
completion: totalCompletionTokens,
},
models: Object.fromEntries(models), // Convert Map to object for JSON serialization
models: Object.fromEntries(models),
}
}
// Transform to clean log format with workflow data included
const enhancedLogs = logs.map((log) => {
const blockExecutions = blockExecutionsByExecution[log.executionId] || []
// Only process trace spans and detailed cost in full mode
let traceSpans = []
let finalOutput: any
let costSummary = (log.cost as any) || { total: 0 }
if (params.details === 'full' && log.executionData) {
// Use stored trace spans if available, otherwise create from block executions
const storedTraceSpans = (log.executionData as any)?.traceSpans
traceSpans =
storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0
? storedTraceSpans
: createTraceSpans(blockExecutions)
// Prefer stored cost JSON; otherwise synthesize from blocks
costSummary =
log.cost && Object.keys(log.cost as any).length > 0
? (log.cost as any)
: extractCostSummary(blockExecutions)
// Include finalOutput if present on executionData
try {
const fo = (log.executionData as any)?.finalOutput
if (fo !== undefined) finalOutput = fo

View File

@@ -5,6 +5,7 @@ import type { NextRequest } from 'next/server'
import { createLogger } from '@/lib/logs/console/logger'
import { withMcpAuth } from '@/lib/mcp/middleware'
import { mcpService } from '@/lib/mcp/service'
import type { McpServerStatusConfig } from '@/lib/mcp/types'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
const logger = createLogger('McpServerRefreshAPI')
@@ -50,6 +51,12 @@ export const POST = withMcpAuth<{ id: string }>('read')(
let toolCount = 0
let lastError: string | null = null
const currentStatusConfig: McpServerStatusConfig =
(server.statusConfig as McpServerStatusConfig | null) ?? {
consecutiveFailures: 0,
lastSuccessfulDiscovery: null,
}
try {
const tools = await mcpService.discoverServerTools(userId, serverId, workspaceId)
connectionStatus = 'connected'
@@ -63,20 +70,40 @@ export const POST = withMcpAuth<{ id: string }>('read')(
logger.warn(`[${requestId}] Failed to connect to server ${serverId}:`, error)
}
const now = new Date()
const newStatusConfig =
connectionStatus === 'connected'
? { consecutiveFailures: 0, lastSuccessfulDiscovery: now.toISOString() }
: {
consecutiveFailures: currentStatusConfig.consecutiveFailures + 1,
lastSuccessfulDiscovery: currentStatusConfig.lastSuccessfulDiscovery,
}
const [refreshedServer] = await db
.update(mcpServers)
.set({
lastToolsRefresh: new Date(),
lastToolsRefresh: now,
connectionStatus,
lastError,
lastConnected: connectionStatus === 'connected' ? new Date() : server.lastConnected,
lastConnected: connectionStatus === 'connected' ? now : server.lastConnected,
toolCount,
updatedAt: new Date(),
statusConfig: newStatusConfig,
updatedAt: now,
})
.where(eq(mcpServers.id, serverId))
.returning()
logger.info(`[${requestId}] Successfully refreshed MCP server: ${serverId}`)
if (connectionStatus === 'connected') {
logger.info(
`[${requestId}] Successfully refreshed MCP server: ${serverId} (${toolCount} tools)`
)
await mcpService.clearCache(workspaceId)
} else {
logger.warn(
`[${requestId}] Refresh completed for MCP server ${serverId} but connection failed: ${lastError}`
)
}
return createMcpSuccessResponse({
status: connectionStatus,
toolCount,

View File

@@ -48,6 +48,19 @@ export const PATCH = withMcpAuth<{ id: string }>('write')(
// Remove workspaceId from body to prevent it from being updated
const { workspaceId: _, ...updateData } = body
// Get the current server to check if URL is changing
const [currentServer] = await db
.select({ url: mcpServers.url })
.from(mcpServers)
.where(
and(
eq(mcpServers.id, serverId),
eq(mcpServers.workspaceId, workspaceId),
isNull(mcpServers.deletedAt)
)
)
.limit(1)
const [updatedServer] = await db
.update(mcpServers)
.set({
@@ -71,8 +84,12 @@ export const PATCH = withMcpAuth<{ id: string }>('write')(
)
}
// Clear MCP service cache after update
mcpService.clearCache(workspaceId)
// Only clear cache if URL changed (requires re-discovery)
const urlChanged = body.url && currentServer?.url !== body.url
if (urlChanged) {
await mcpService.clearCache(workspaceId)
logger.info(`[${requestId}] Cleared cache due to URL change`)
}
logger.info(`[${requestId}] Successfully updated MCP server: ${serverId}`)
return createMcpSuccessResponse({ server: updatedServer })

View File

@@ -117,12 +117,14 @@ export const POST = withMcpAuth('write')(
timeout: body.timeout || 30000,
retries: body.retries || 3,
enabled: body.enabled !== false,
connectionStatus: 'connected',
lastConnected: new Date(),
updatedAt: new Date(),
deletedAt: null,
})
.where(eq(mcpServers.id, serverId))
mcpService.clearCache(workspaceId)
await mcpService.clearCache(workspaceId)
logger.info(
`[${requestId}] Successfully updated MCP server: ${body.name} (ID: ${serverId})`
@@ -145,12 +147,14 @@ export const POST = withMcpAuth('write')(
timeout: body.timeout || 30000,
retries: body.retries || 3,
enabled: body.enabled !== false,
connectionStatus: 'connected',
lastConnected: new Date(),
createdAt: new Date(),
updatedAt: new Date(),
})
.returning()
mcpService.clearCache(workspaceId)
await mcpService.clearCache(workspaceId)
logger.info(
`[${requestId}] Successfully registered MCP server: ${body.name} (ID: ${serverId})`
@@ -212,7 +216,7 @@ export const DELETE = withMcpAuth('admin')(
)
}
mcpService.clearCache(workspaceId)
await mcpService.clearCache(workspaceId)
logger.info(`[${requestId}] Successfully deleted MCP server: ${serverId}`)
return createMcpSuccessResponse({ message: `Server ${serverId} deleted successfully` })

View File

@@ -0,0 +1,103 @@
import { db } from '@sim/db'
import { workflow, workflowBlocks } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { createLogger } from '@/lib/logs/console/logger'
import { withMcpAuth } from '@/lib/mcp/middleware'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
const logger = createLogger('McpStoredToolsAPI')
export const dynamic = 'force-dynamic'
interface StoredMcpTool {
workflowId: string
workflowName: string
serverId: string
serverUrl?: string
toolName: string
schema?: Record<string, unknown>
}
/**
* GET - Get all stored MCP tools from workflows in the workspace
*
* Scans all workflows in the workspace and extracts MCP tools that have been
* added to agent blocks. Returns the stored state of each tool for comparison
* against current server state.
*/
export const GET = withMcpAuth('read')(
async (request: NextRequest, { userId, workspaceId, requestId }) => {
try {
logger.info(`[${requestId}] Fetching stored MCP tools for workspace ${workspaceId}`)
// Get all workflows in workspace
const workflows = await db
.select({
id: workflow.id,
name: workflow.name,
})
.from(workflow)
.where(eq(workflow.workspaceId, workspaceId))
const workflowMap = new Map(workflows.map((w) => [w.id, w.name]))
const workflowIds = workflows.map((w) => w.id)
if (workflowIds.length === 0) {
return createMcpSuccessResponse({ tools: [] })
}
// Get all agent blocks from these workflows
const agentBlocks = await db
.select({
workflowId: workflowBlocks.workflowId,
subBlocks: workflowBlocks.subBlocks,
})
.from(workflowBlocks)
.where(eq(workflowBlocks.type, 'agent'))
const storedTools: StoredMcpTool[] = []
for (const block of agentBlocks) {
if (!workflowMap.has(block.workflowId)) continue
const subBlocks = block.subBlocks as Record<string, unknown> | null
if (!subBlocks) continue
const toolsSubBlock = subBlocks.tools as Record<string, unknown> | undefined
const toolsValue = toolsSubBlock?.value
if (!toolsValue || !Array.isArray(toolsValue)) continue
for (const tool of toolsValue) {
if (tool.type !== 'mcp') continue
const params = tool.params as Record<string, unknown> | undefined
if (!params?.serverId || !params?.toolName) continue
storedTools.push({
workflowId: block.workflowId,
workflowName: workflowMap.get(block.workflowId) || 'Untitled',
serverId: params.serverId as string,
serverUrl: params.serverUrl as string | undefined,
toolName: params.toolName as string,
schema: tool.schema as Record<string, unknown> | undefined,
})
}
}
logger.info(
`[${requestId}] Found ${storedTools.length} stored MCP tools across ${workflows.length} workflows`
)
return createMcpSuccessResponse({ tools: storedTools })
} catch (error) {
logger.error(`[${requestId}] Error fetching stored MCP tools:`, error)
return createMcpErrorResponse(
error instanceof Error ? error : new Error('Failed to fetch stored MCP tools'),
'Failed to fetch stored MCP tools',
500
)
}
}
)

View File

@@ -35,6 +35,8 @@ export async function POST(request: NextRequest) {
apiKey,
azureEndpoint,
azureApiVersion,
vertexProject,
vertexLocation,
responseFormat,
workflowId,
workspaceId,
@@ -58,6 +60,8 @@ export async function POST(request: NextRequest) {
hasApiKey: !!apiKey,
hasAzureEndpoint: !!azureEndpoint,
hasAzureApiVersion: !!azureApiVersion,
hasVertexProject: !!vertexProject,
hasVertexLocation: !!vertexLocation,
hasResponseFormat: !!responseFormat,
workflowId,
stream: !!stream,
@@ -104,6 +108,8 @@ export async function POST(request: NextRequest) {
apiKey: finalApiKey,
azureEndpoint,
azureApiVersion,
vertexProject,
vertexLocation,
responseFormat,
workflowId,
workspaceId,

View File

@@ -2,11 +2,9 @@
import { useEffect, useMemo, useRef, useState } from 'react'
import { Search, X } from 'lucide-react'
import { useParams } from 'next/navigation'
import { Button, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn'
import { Badge, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import { createLogger } from '@/lib/logs/console/logger'
import { getIntegrationMetadata } from '@/lib/logs/get-trigger-options'
import { getTriggerOptions } from '@/lib/logs/get-trigger-options'
import { type ParsedFilter, parseQuery } from '@/lib/logs/query-parser'
import {
type FolderData,
@@ -18,7 +16,15 @@ import { useSearchState } from '@/app/workspace/[workspaceId]/logs/hooks/use-sea
import { useFolderStore } from '@/stores/folders/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('AutocompleteSearch')
function truncateFilterValue(field: string, value: string): string {
if ((field === 'executionId' || field === 'workflowId') && value.length > 12) {
return `...${value.slice(-6)}`
}
if (value.length > 20) {
return `${value.slice(0, 17)}...`
}
return value
}
interface AutocompleteSearchProps {
value: string
@@ -35,11 +41,8 @@ export function AutocompleteSearch({
className,
onOpenChange,
}: AutocompleteSearchProps) {
const params = useParams()
const workspaceId = params.workspaceId as string
const workflows = useWorkflowRegistry((state) => state.workflows)
const folders = useFolderStore((state) => state.folders)
const [triggersData, setTriggersData] = useState<TriggerData[]>([])
const workflowsData = useMemo<WorkflowData[]>(() => {
return Object.values(workflows).map((w) => ({
@@ -56,32 +59,13 @@ export function AutocompleteSearch({
}))
}, [folders])
useEffect(() => {
if (!workspaceId) return
const fetchTriggers = async () => {
try {
const response = await fetch(`/api/logs/triggers?workspaceId=${workspaceId}`)
if (!response.ok) return
const data = await response.json()
const triggers: TriggerData[] = data.triggers.map((trigger: string) => {
const metadata = getIntegrationMetadata(trigger)
return {
value: trigger,
label: metadata.label,
color: metadata.color,
}
})
setTriggersData(triggers)
} catch (error) {
logger.error('Failed to fetch triggers:', error)
}
}
fetchTriggers()
}, [workspaceId])
const triggersData = useMemo<TriggerData[]>(() => {
return getTriggerOptions().map((t) => ({
value: t.value,
label: t.label,
color: t.color,
}))
}, [])
const suggestionEngine = useMemo(() => {
return new SearchSuggestions(workflowsData, foldersData, triggersData)
@@ -103,7 +87,6 @@ export function AutocompleteSearch({
suggestions,
sections,
highlightedIndex,
highlightedBadgeIndex,
inputRef,
dropdownRef,
handleInputChange,
@@ -122,7 +105,6 @@ export function AutocompleteSearch({
const lastExternalValue = useRef(value)
useEffect(() => {
// Only re-initialize if value changed externally (not from user typing)
if (value !== lastExternalValue.current) {
lastExternalValue.current = value
const parsed = parseQuery(value)
@@ -130,7 +112,6 @@ export function AutocompleteSearch({
}
}, [value, initializeFromQuery])
// Initial sync on mount
useEffect(() => {
if (value) {
const parsed = parseQuery(value)
@@ -189,40 +170,49 @@ export function AutocompleteSearch({
<div className='flex flex-1 items-center gap-[6px] overflow-x-auto pr-[6px] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden'>
{/* Applied Filter Badges */}
{appliedFilters.map((filter, index) => (
<Button
<Badge
key={`${filter.field}-${filter.value}-${index}`}
variant='outline'
className={cn(
'h-6 flex-shrink-0 gap-1 rounded-[6px] px-2 text-[11px]',
highlightedBadgeIndex === index && 'border'
)}
onClick={(e) => {
e.preventDefault()
removeBadge(index)
role='button'
tabIndex={0}
className='h-6 shrink-0 cursor-pointer whitespace-nowrap rounded-md px-2 text-[11px]'
onClick={() => removeBadge(index)}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
removeBadge(index)
}
}}
>
<span className='text-[var(--text-muted)]'>{filter.field}:</span>
<span className='text-[var(--text-primary)]'>
{filter.operator !== '=' && filter.operator}
{filter.originalValue}
{truncateFilterValue(filter.field, filter.originalValue)}
</span>
<X className='h-3 w-3' />
</Button>
<X className='h-3 w-3 shrink-0' />
</Badge>
))}
{/* Text Search Badge (if present) */}
{hasTextSearch && (
<Button
<Badge
variant='outline'
className='h-6 flex-shrink-0 gap-1 rounded-[6px] px-2 text-[11px]'
onClick={(e) => {
e.preventDefault()
handleFiltersChange(appliedFilters, '')
role='button'
tabIndex={0}
className='h-6 shrink-0 cursor-pointer whitespace-nowrap rounded-md px-2 text-[11px]'
onClick={() => handleFiltersChange(appliedFilters, '')}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
handleFiltersChange(appliedFilters, '')
}
}}
>
<span className='text-[var(--text-primary)]'>"{textSearch}"</span>
<X className='h-3 w-3' />
</Button>
<span className='max-w-[150px] truncate text-[var(--text-primary)]'>
"{textSearch}"
</span>
<X className='h-3 w-3 shrink-0' />
</Badge>
)}
{/* Input - only current typing */}
@@ -261,9 +251,8 @@ export function AutocompleteSearch({
sideOffset={4}
onOpenAutoFocus={(e) => e.preventDefault()}
>
<div className='max-h-96 overflow-y-auto'>
<div className='max-h-96 overflow-y-auto px-1'>
{sections.length > 0 ? (
// Multi-section layout
<div className='py-1'>
{/* Show all results (no header) */}
{suggestions[0]?.category === 'show-all' && (
@@ -271,9 +260,9 @@ export function AutocompleteSearch({
key={suggestions[0].id}
data-index={0}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
highlightedIndex === 0 && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
highlightedIndex === 0 && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(0)}
onMouseDown={(e) => {
@@ -287,7 +276,7 @@ export function AutocompleteSearch({
{sections.map((section) => (
<div key={section.title}>
<div className='border-[var(--divider)] border-t px-3 py-1.5 font-medium text-[11px] text-[var(--text-tertiary)] uppercase tracking-wide'>
<div className='px-3 py-1.5 font-medium text-[12px] text-[var(--text-tertiary)] uppercase tracking-wide'>
{section.title}
</div>
{section.suggestions.map((suggestion) => {
@@ -301,9 +290,9 @@ export function AutocompleteSearch({
key={suggestion.id}
data-index={index}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
isHighlighted && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
isHighlighted && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(index)}
onMouseDown={(e) => {
@@ -312,19 +301,11 @@ export function AutocompleteSearch({
}}
>
<div className='flex items-center justify-between gap-3'>
<div className='flex min-w-0 flex-1 items-center gap-2'>
{suggestion.category === 'trigger' && suggestion.color && (
<div
className='h-2 w-2 flex-shrink-0 rounded-full'
style={{ backgroundColor: suggestion.color }}
/>
)}
<div className='min-w-0 flex-1 truncate text-[13px]'>
{suggestion.label}
</div>
<div className='min-w-0 flex-1 truncate text-[13px]'>
{suggestion.label}
</div>
{suggestion.value !== suggestion.label && (
<div className='flex-shrink-0 font-mono text-[11px] text-[var(--text-muted)]'>
<div className='shrink-0 font-mono text-[11px] text-[var(--text-muted)]'>
{suggestion.category === 'workflow' ||
suggestion.category === 'folder'
? `${suggestion.category}:`
@@ -342,7 +323,7 @@ export function AutocompleteSearch({
// Single section layout
<div className='py-1'>
{suggestionType === 'filters' && (
<div className='border-[var(--divider)] border-b px-3 py-1.5 font-medium text-[11px] text-[var(--text-tertiary)] uppercase tracking-wide'>
<div className='px-3 py-1.5 font-medium text-[12px] text-[var(--text-tertiary)] uppercase tracking-wide'>
SUGGESTED FILTERS
</div>
)}
@@ -352,10 +333,9 @@ export function AutocompleteSearch({
key={suggestion.id}
data-index={index}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
index === highlightedIndex &&
'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
index === highlightedIndex && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(index)}
onMouseDown={(e) => {
@@ -364,17 +344,9 @@ export function AutocompleteSearch({
}}
>
<div className='flex items-center justify-between gap-3'>
<div className='flex min-w-0 flex-1 items-center gap-2'>
{suggestion.category === 'trigger' && suggestion.color && (
<div
className='h-2 w-2 flex-shrink-0 rounded-full'
style={{ backgroundColor: suggestion.color }}
/>
)}
<div className='min-w-0 flex-1 text-[13px]'>{suggestion.label}</div>
</div>
<div className='min-w-0 flex-1 text-[13px]'>{suggestion.label}</div>
{suggestion.description && (
<div className='flex-shrink-0 text-[11px] text-[var(--text-muted)]'>
<div className='shrink-0 text-[11px] text-[var(--text-muted)]'>
{suggestion.value}
</div>
)}

View File

@@ -21,21 +21,15 @@ export function useSearchState({
const [currentInput, setCurrentInput] = useState('')
const [textSearch, setTextSearch] = useState('')
// Dropdown state
const [isOpen, setIsOpen] = useState(false)
const [suggestions, setSuggestions] = useState<Suggestion[]>([])
const [sections, setSections] = useState<SuggestionSection[]>([])
const [highlightedIndex, setHighlightedIndex] = useState(-1)
// Badge interaction
const [highlightedBadgeIndex, setHighlightedBadgeIndex] = useState<number | null>(null)
// Refs
const inputRef = useRef<HTMLInputElement>(null)
const dropdownRef = useRef<HTMLDivElement>(null)
const debounceRef = useRef<NodeJS.Timeout | null>(null)
// Update suggestions when input changes
const updateSuggestions = useCallback(
(input: string) => {
const suggestionGroup = getSuggestions(input)
@@ -55,13 +49,10 @@ export function useSearchState({
[getSuggestions]
)
// Handle input changes
const handleInputChange = useCallback(
(value: string) => {
setCurrentInput(value)
setHighlightedBadgeIndex(null) // Clear badge highlight on any input
// Debounce suggestion updates
if (debounceRef.current) {
clearTimeout(debounceRef.current)
}
@@ -73,11 +64,9 @@ export function useSearchState({
[updateSuggestions, debounceMs]
)
// Handle suggestion selection
const handleSuggestionSelect = useCallback(
(suggestion: Suggestion) => {
if (suggestion.category === 'show-all') {
// Treat as text search
setTextSearch(suggestion.value)
setCurrentInput('')
setIsOpen(false)
@@ -85,15 +74,12 @@ export function useSearchState({
return
}
// Check if this is a filter-key suggestion (ends with ':')
if (suggestion.category === 'filters' && suggestion.value.endsWith(':')) {
// Set input to the filter key and keep dropdown open for values
setCurrentInput(suggestion.value)
updateSuggestions(suggestion.value)
return
}
// For filter values, workflows, folders - add as a filter
const newFilter: ParsedFilter = {
field: suggestion.value.split(':')[0] as any,
operator: '=',
@@ -110,15 +96,12 @@ export function useSearchState({
setCurrentInput('')
setTextSearch('')
// Notify parent
onFiltersChange(updatedFilters, '')
// Focus back on input and reopen dropdown with empty suggestions
if (inputRef.current) {
inputRef.current.focus()
}
// Show filter keys dropdown again after selection
setTimeout(() => {
updateSuggestions('')
}, 50)
@@ -126,12 +109,10 @@ export function useSearchState({
[appliedFilters, onFiltersChange, updateSuggestions]
)
// Remove a badge
const removeBadge = useCallback(
(index: number) => {
const updatedFilters = appliedFilters.filter((_, i) => i !== index)
setAppliedFilters(updatedFilters)
setHighlightedBadgeIndex(null)
onFiltersChange(updatedFilters, textSearch)
if (inputRef.current) {
@@ -141,39 +122,22 @@ export function useSearchState({
[appliedFilters, textSearch, onFiltersChange]
)
// Handle keyboard navigation
const handleKeyDown = useCallback(
(event: React.KeyboardEvent) => {
// Backspace on empty input - badge deletion
if (event.key === 'Backspace' && currentInput === '') {
event.preventDefault()
if (highlightedBadgeIndex !== null) {
// Delete highlighted badge
removeBadge(highlightedBadgeIndex)
} else if (appliedFilters.length > 0) {
// Highlight last badge
setHighlightedBadgeIndex(appliedFilters.length - 1)
if (appliedFilters.length > 0) {
event.preventDefault()
removeBadge(appliedFilters.length - 1)
}
return
}
// Clear badge highlight on any other key when not in dropdown navigation
if (
highlightedBadgeIndex !== null &&
!['ArrowDown', 'ArrowUp', 'Enter'].includes(event.key)
) {
setHighlightedBadgeIndex(null)
}
// Enter key
if (event.key === 'Enter') {
event.preventDefault()
if (isOpen && highlightedIndex >= 0 && suggestions[highlightedIndex]) {
handleSuggestionSelect(suggestions[highlightedIndex])
} else if (currentInput.trim()) {
// Submit current input as text search
setTextSearch(currentInput.trim())
setCurrentInput('')
setIsOpen(false)
@@ -182,7 +146,6 @@ export function useSearchState({
return
}
// Dropdown navigation
if (!isOpen) return
switch (event.key) {
@@ -216,7 +179,6 @@ export function useSearchState({
},
[
currentInput,
highlightedBadgeIndex,
appliedFilters,
isOpen,
highlightedIndex,
@@ -227,12 +189,10 @@ export function useSearchState({
]
)
// Handle focus
const handleFocus = useCallback(() => {
updateSuggestions(currentInput)
}, [currentInput, updateSuggestions])
// Handle blur
const handleBlur = useCallback(() => {
setTimeout(() => {
setIsOpen(false)
@@ -240,7 +200,6 @@ export function useSearchState({
}, 150)
}, [])
// Clear all filters
const clearAll = useCallback(() => {
setAppliedFilters([])
setCurrentInput('')
@@ -253,7 +212,6 @@ export function useSearchState({
}
}, [onFiltersChange])
// Initialize from external value (URL params, etc.)
const initializeFromQuery = useCallback((query: string, filters: ParsedFilter[]) => {
setAppliedFilters(filters)
setTextSearch(query)
@@ -261,7 +219,6 @@ export function useSearchState({
}, [])
return {
// State
appliedFilters,
currentInput,
textSearch,
@@ -269,13 +226,10 @@ export function useSearchState({
suggestions,
sections,
highlightedIndex,
highlightedBadgeIndex,
// Refs
inputRef,
dropdownRef,
// Handlers
handleInputChange,
handleSuggestionSelect,
handleKeyDown,
@@ -285,7 +239,6 @@ export function useSearchState({
clearAll,
initializeFromQuery,
// Setters for external control
setHighlightedIndex,
}
}

View File

@@ -347,6 +347,13 @@ export function OAuthRequiredModal({
return
}
if (providerId === 'servicenow') {
// Pass the current URL so we can redirect back after OAuth
const returnUrl = encodeURIComponent(window.location.href)
window.location.href = `/api/auth/servicenow/authorize?returnUrl=${returnUrl}`
return
}
await client.oauth2.link({
providerId,
callbackURL: window.location.href,

View File

@@ -18,12 +18,18 @@ interface McpTool {
inputSchema?: any
}
interface McpServer {
id: string
url?: string
}
interface StoredTool {
type: 'mcp'
title: string
toolId: string
params: {
serverId: string
serverUrl?: string
toolName: string
serverName: string
}
@@ -34,6 +40,7 @@ interface StoredTool {
interface McpToolsListProps {
mcpTools: McpTool[]
mcpServers?: McpServer[]
searchQuery: string
customFilter: (name: string, query: string) => number
onToolSelect: (tool: StoredTool) => void
@@ -45,6 +52,7 @@ interface McpToolsListProps {
*/
export function McpToolsList({
mcpTools,
mcpServers = [],
searchQuery,
customFilter,
onToolSelect,
@@ -59,44 +67,48 @@ export function McpToolsList({
return (
<>
<PopoverSection>MCP Tools</PopoverSection>
{filteredTools.map((mcpTool) => (
<ToolCommand.Item
key={mcpTool.id}
value={mcpTool.name}
onSelect={() => {
if (disabled) return
{filteredTools.map((mcpTool) => {
const server = mcpServers.find((s) => s.id === mcpTool.serverId)
return (
<ToolCommand.Item
key={mcpTool.id}
value={mcpTool.name}
onSelect={() => {
if (disabled) return
const newTool: StoredTool = {
type: 'mcp',
title: mcpTool.name,
toolId: mcpTool.id,
params: {
serverId: mcpTool.serverId,
toolName: mcpTool.name,
serverName: mcpTool.serverName,
},
isExpanded: true,
usageControl: 'auto',
schema: {
...mcpTool.inputSchema,
description: mcpTool.description,
},
}
const newTool: StoredTool = {
type: 'mcp',
title: mcpTool.name,
toolId: mcpTool.id,
params: {
serverId: mcpTool.serverId,
serverUrl: server?.url,
toolName: mcpTool.name,
serverName: mcpTool.serverName,
},
isExpanded: true,
usageControl: 'auto',
schema: {
...mcpTool.inputSchema,
description: mcpTool.description,
},
}
onToolSelect(newTool)
}}
>
<div
className='flex h-[15px] w-[15px] flex-shrink-0 items-center justify-center rounded'
style={{ background: mcpTool.bgColor }}
onToolSelect(newTool)
}}
>
<IconComponent icon={mcpTool.icon} className='h-[11px] w-[11px] text-white' />
</div>
<span className='truncate' title={`${mcpTool.name} (${mcpTool.serverName})`}>
{mcpTool.name}
</span>
</ToolCommand.Item>
))}
<div
className='flex h-[15px] w-[15px] flex-shrink-0 items-center justify-center rounded'
style={{ background: mcpTool.bgColor }}
>
<IconComponent icon={mcpTool.icon} className='h-[11px] w-[11px] text-white' />
</div>
<span className='truncate' title={`${mcpTool.name} (${mcpTool.serverName})`}>
{mcpTool.name}
</span>
</ToolCommand.Item>
)
})}
</>
)
}

View File

@@ -4,6 +4,7 @@ import { useQuery } from '@tanstack/react-query'
import { Loader2, PlusIcon, WrenchIcon, XIcon } from 'lucide-react'
import { useParams } from 'next/navigation'
import {
Badge,
Combobox,
Popover,
PopoverContent,
@@ -12,6 +13,7 @@ import {
PopoverSearch,
PopoverSection,
PopoverTrigger,
Tooltip,
} from '@/components/emcn'
import { McpIcon } from '@/components/icons'
import { Switch } from '@/components/ui/switch'
@@ -55,9 +57,11 @@ import {
type CustomTool as CustomToolDefinition,
useCustomTools,
} from '@/hooks/queries/custom-tools'
import { useMcpServers } from '@/hooks/queries/mcp'
import { useWorkflows } from '@/hooks/queries/workflows'
import { useMcpTools } from '@/hooks/use-mcp-tools'
import { getProviderFromModel, supportsToolUsageControl } from '@/providers/utils'
import { useSettingsModalStore } from '@/stores/settings-modal/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import {
formatParameterLabel,
@@ -802,6 +806,66 @@ export function ToolInput({
refreshTools,
} = useMcpTools(workspaceId)
const { data: mcpServers = [], isLoading: mcpServersLoading } = useMcpServers(workspaceId)
const openSettingsModal = useSettingsModalStore((state) => state.openModal)
const mcpDataLoading = mcpLoading || mcpServersLoading
/**
* Returns issue info for an MCP tool using shared validation logic.
*/
const getMcpToolIssue = useCallback(
(tool: StoredTool) => {
if (tool.type !== 'mcp') return null
const { getMcpToolIssue: validateTool } = require('@/lib/mcp/tool-validation')
return validateTool(
{
serverId: tool.params?.serverId as string,
serverUrl: tool.params?.serverUrl as string | undefined,
toolName: tool.params?.toolName as string,
schema: tool.schema,
},
mcpServers.map((s) => ({
id: s.id,
url: s.url,
connectionStatus: s.connectionStatus,
lastError: s.lastError,
})),
mcpTools.map((t) => ({
serverId: t.serverId,
name: t.name,
inputSchema: t.inputSchema,
}))
)
},
[mcpTools, mcpServers]
)
const isMcpToolUnavailable = useCallback(
(tool: StoredTool): boolean => {
const { isToolUnavailable } = require('@/lib/mcp/tool-validation')
return isToolUnavailable(getMcpToolIssue(tool))
},
[getMcpToolIssue]
)
const hasMcpToolIssue = useCallback(
(tool: StoredTool): boolean => {
return getMcpToolIssue(tool) !== null
},
[getMcpToolIssue]
)
// Filter out MCP tools from unavailable servers for the dropdown
const availableMcpTools = useMemo(() => {
return mcpTools.filter((mcpTool) => {
const server = mcpServers.find((s) => s.id === mcpTool.serverId)
// Only include tools from connected servers
return server && server.connectionStatus === 'connected'
})
}, [mcpTools, mcpServers])
// Reset search query when popover opens
useEffect(() => {
if (open) {
@@ -1849,9 +1913,10 @@ export function ToolInput({
)
})()}
{/* Display MCP tools */}
{/* Display MCP tools (only from available servers) */}
<McpToolsList
mcpTools={mcpTools}
mcpTools={availableMcpTools}
mcpServers={mcpServers}
searchQuery={searchQuery || ''}
customFilter={customFilter}
onToolSelect={handleMcpToolSelect}
@@ -2040,9 +2105,46 @@ export function ToolInput({
<span className='truncate font-medium text-[13px] text-[var(--text-primary)]'>
{isCustomTool ? customToolTitle : tool.title}
</span>
{isMcpTool &&
!mcpDataLoading &&
(() => {
const issue = getMcpToolIssue(tool)
if (!issue) return null
const { getIssueBadgeLabel } = require('@/lib/mcp/tool-validation')
const serverId = tool.params?.serverId
return (
<div
onClick={(e: React.MouseEvent) => {
e.stopPropagation()
e.preventDefault()
openSettingsModal({ section: 'mcp', mcpServerId: serverId })
}}
>
<Tooltip.Root>
<Tooltip.Trigger asChild>
<Badge
variant='outline'
className='cursor-pointer transition-colors hover:bg-[var(--warning)]/10'
style={{
borderColor: 'var(--warning)',
color: 'var(--warning)',
}}
>
{getIssueBadgeLabel(issue)}
</Badge>
</Tooltip.Trigger>
<Tooltip.Content>
<span className='text-sm'>
{issue.message} · Click to open settings
</span>
</Tooltip.Content>
</Tooltip.Root>
</div>
)
})()}
</div>
<div className='flex flex-shrink-0 items-center gap-[8px]'>
{supportsToolControl && (
{supportsToolControl && !(isMcpTool && isMcpToolUnavailable(tool)) && (
<Popover
open={usageControlPopoverIndex === toolIndex}
onOpenChange={(open) =>
@@ -2386,9 +2488,10 @@ export function ToolInput({
)
})()}
{/* Display MCP tools */}
{/* Display MCP tools (only from available servers) */}
<McpToolsList
mcpTools={mcpTools}
mcpTools={availableMcpTools}
mcpServers={mcpServers}
searchQuery={searchQuery || ''}
customFilter={customFilter}
onToolSelect={handleMcpToolSelect}

View File

@@ -26,7 +26,7 @@ const SUBFLOW_CONFIG = {
},
typeKey: 'loopType' as const,
storeKey: 'loops' as const,
maxIterations: 100,
maxIterations: 1000,
configKeys: {
iterations: 'iterations' as const,
items: 'forEachItems' as const,

View File

@@ -1741,7 +1741,7 @@ export function Terminal() {
)}
{/* Content */}
<div className='flex-1 overflow-x-auto overflow-y-auto'>
<div className={clsx('flex-1 overflow-y-auto', !wrapText && 'overflow-x-auto')}>
{shouldShowCodeDisplay ? (
<OutputCodeContent
code={selectedEntry.input.code}

View File

@@ -252,23 +252,12 @@ export function useNodeUtilities(blocks: Record<string, any>) {
*/
const calculateLoopDimensions = useCallback(
(nodeId: string): { width: number; height: number } => {
const minWidth = CONTAINER_DIMENSIONS.DEFAULT_WIDTH
const minHeight = CONTAINER_DIMENSIONS.DEFAULT_HEIGHT
// Match styling in subflow-node.tsx:
// - Header section: 50px total height
// - Content area: px-[16px] pb-[0px] pt-[16px] pr-[70px]
// Left padding: 16px, Right padding: 64px, Top padding: 16px, Bottom padding: -6px (reduced by additional 6px from 0 to achieve 14px total reduction from original 8px)
// - Children are positioned relative to the content area (after header, inside padding)
const headerHeight = 50
const leftPadding = 16
const rightPadding = 80
const topPadding = 16
const bottomPadding = 16
const childNodes = getNodes().filter((node) => node.parentId === nodeId)
if (childNodes.length === 0) {
return { width: minWidth, height: minHeight }
return {
width: CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
height: CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
}
}
let maxRight = 0
@@ -276,21 +265,21 @@ export function useNodeUtilities(blocks: Record<string, any>) {
childNodes.forEach((node) => {
const { width: nodeWidth, height: nodeHeight } = getBlockDimensions(node.id)
// Child positions are relative to content area's inner top-left (inside padding)
// Calculate the rightmost and bottommost edges of children
const rightEdge = node.position.x + nodeWidth
const bottomEdge = node.position.y + nodeHeight
maxRight = Math.max(maxRight, rightEdge)
maxBottom = Math.max(maxBottom, bottomEdge)
maxRight = Math.max(maxRight, node.position.x + nodeWidth)
maxBottom = Math.max(maxBottom, node.position.y + nodeHeight)
})
// Container dimensions = header + padding + children bounds + padding
// Width: left padding + max child right edge + right padding (64px)
const width = Math.max(minWidth, leftPadding + maxRight + rightPadding)
// Height: header + top padding + max child bottom edge + bottom padding (8px)
const height = Math.max(minHeight, headerHeight + topPadding + maxBottom + bottomPadding)
const width = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
CONTAINER_DIMENSIONS.LEFT_PADDING + maxRight + CONTAINER_DIMENSIONS.RIGHT_PADDING
)
const height = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
CONTAINER_DIMENSIONS.HEADER_HEIGHT +
CONTAINER_DIMENSIONS.TOP_PADDING +
maxBottom +
CONTAINER_DIMENSIONS.BOTTOM_PADDING
)
return { width, height }
},

View File

@@ -655,6 +655,7 @@ export function useWorkflowExecution() {
setExecutor,
setPendingBlocks,
setActiveBlocks,
workflows,
]
)

View File

@@ -18,6 +18,7 @@ import { useShallow } from 'zustand/react/shallow'
import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access'
import { createLogger } from '@/lib/logs/console/logger'
import type { OAuthProvider } from '@/lib/oauth'
import { CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
import { useWorkspacePermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import {
@@ -176,6 +177,7 @@ const WorkflowContent = React.memo(() => {
resizeLoopNodes,
updateNodeParent: updateNodeParentUtil,
getNodeAnchorPosition,
getBlockDimensions,
} = useNodeUtilities(blocks)
/** Triggers immediate subflow resize without delays. */
@@ -1501,6 +1503,66 @@ const WorkflowContent = React.memo(() => {
// Only sync non-position changes (like selection) to store if needed
}, [])
/**
* Updates container dimensions in displayNodes during drag.
* This allows live resizing of containers as their children are dragged.
*/
const updateContainerDimensionsDuringDrag = useCallback(
(draggedNodeId: string, draggedNodePosition: { x: number; y: number }) => {
const parentId = blocks[draggedNodeId]?.data?.parentId
if (!parentId) return
setDisplayNodes((currentNodes) => {
const childNodes = currentNodes.filter((n) => n.parentId === parentId)
if (childNodes.length === 0) return currentNodes
let maxRight = 0
let maxBottom = 0
childNodes.forEach((node) => {
const nodePosition = node.id === draggedNodeId ? draggedNodePosition : node.position
const { width: nodeWidth, height: nodeHeight } = getBlockDimensions(node.id)
maxRight = Math.max(maxRight, nodePosition.x + nodeWidth)
maxBottom = Math.max(maxBottom, nodePosition.y + nodeHeight)
})
const newWidth = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
CONTAINER_DIMENSIONS.LEFT_PADDING + maxRight + CONTAINER_DIMENSIONS.RIGHT_PADDING
)
const newHeight = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
CONTAINER_DIMENSIONS.HEADER_HEIGHT +
CONTAINER_DIMENSIONS.TOP_PADDING +
maxBottom +
CONTAINER_DIMENSIONS.BOTTOM_PADDING
)
return currentNodes.map((node) => {
if (node.id === parentId) {
const currentWidth = node.data?.width || CONTAINER_DIMENSIONS.DEFAULT_WIDTH
const currentHeight = node.data?.height || CONTAINER_DIMENSIONS.DEFAULT_HEIGHT
// Only update if dimensions changed
if (newWidth !== currentWidth || newHeight !== currentHeight) {
return {
...node,
data: {
...node.data,
width: newWidth,
height: newHeight,
},
}
}
}
return node
})
})
},
[blocks, getBlockDimensions]
)
/**
* Effect to resize loops when nodes change (add/remove/position change).
* Runs on structural changes only - not during drag (position-only changes).
@@ -1681,6 +1743,11 @@ const WorkflowContent = React.memo(() => {
// Get the current parent ID of the node being dragged
const currentParentId = blocks[node.id]?.data?.parentId || null
// If the node is inside a container, update container dimensions during drag
if (currentParentId) {
updateContainerDimensionsDuringDrag(node.id, node.position)
}
// Check if this is a starter block - starter blocks should never be in containers
const isStarterBlock = node.data?.type === 'starter'
if (isStarterBlock) {
@@ -1812,7 +1879,14 @@ const WorkflowContent = React.memo(() => {
}
}
},
[getNodes, potentialParentId, blocks, getNodeAbsolutePosition, getNodeDepth]
[
getNodes,
potentialParentId,
blocks,
getNodeAbsolutePosition,
getNodeDepth,
updateContainerDimensionsDuringDrag,
]
)
/** Captures initial parent ID and position when drag starts. */

View File

@@ -423,7 +423,21 @@ export function SearchModal({
}
break
case 'workspace':
if (item.isCurrent) {
break
}
if (item.href) {
router.push(item.href)
}
break
case 'workflow':
if (!item.isCurrent && item.href) {
router.push(item.href)
window.dispatchEvent(
new CustomEvent(SIDEBAR_SCROLL_EVENT, { detail: { itemId: item.id } })
)
}
break
case 'page':
case 'doc':
if (item.href) {
@@ -431,12 +445,6 @@ export function SearchModal({
window.open(item.href, '_blank', 'noopener,noreferrer')
} else {
router.push(item.href)
// Scroll to the workflow in the sidebar after navigation
if (item.type === 'workflow') {
window.dispatchEvent(
new CustomEvent(SIDEBAR_SCROLL_EVENT, { detail: { itemId: item.id } })
)
}
}
}
break

View File

@@ -1,8 +1,5 @@
import { Button } from '@/components/emcn'
/**
* Formats transport type for display (e.g., "streamable-http" -> "Streamable-HTTP").
*/
export function formatTransportLabel(transport: string): string {
return transport
.split('-')
@@ -14,10 +11,10 @@ export function formatTransportLabel(transport: string): string {
.join('-')
}
/**
* Formats tools count and names for display.
*/
function formatToolsLabel(tools: any[]): string {
function formatToolsLabel(tools: any[], connectionStatus?: string): string {
if (connectionStatus === 'error') {
return 'Unable to connect'
}
const count = tools.length
const plural = count !== 1 ? 's' : ''
const names = count > 0 ? `: ${tools.map((t) => t.name).join(', ')}` : ''
@@ -29,35 +26,41 @@ interface ServerListItemProps {
tools: any[]
isDeleting: boolean
isLoadingTools?: boolean
isRefreshing?: boolean
onRemove: () => void
onViewDetails: () => void
}
/**
* Renders a single MCP server list item with details and delete actions.
*/
export function ServerListItem({
server,
tools,
isDeleting,
isLoadingTools = false,
isRefreshing = false,
onRemove,
onViewDetails,
}: ServerListItemProps) {
const transportLabel = formatTransportLabel(server.transport || 'http')
const toolsLabel = formatToolsLabel(tools)
const toolsLabel = formatToolsLabel(tools, server.connectionStatus)
const isError = server.connectionStatus === 'error'
return (
<div className='flex items-center justify-between gap-[12px]'>
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
<div className='flex items-center gap-[6px]'>
<span className='max-w-[280px] truncate font-medium text-[14px]'>
<span className='max-w-[200px] truncate font-medium text-[14px]'>
{server.name || 'Unnamed Server'}
</span>
<span className='text-[13px] text-[var(--text-secondary)]'>({transportLabel})</span>
</div>
<p className='truncate text-[13px] text-[var(--text-muted)]'>
{isLoadingTools && tools.length === 0 ? 'Loading...' : toolsLabel}
<p
className={`truncate text-[13px] ${isError ? 'text-red-500 dark:text-red-400' : 'text-[var(--text-muted)]'}`}
>
{isRefreshing
? 'Refreshing...'
: isLoadingTools && tools.length === 0
? 'Loading...'
: toolsLabel}
</p>
</div>
<div className='flex flex-shrink-0 items-center gap-[4px]'>

View File

@@ -1,9 +1,10 @@
'use client'
import { useCallback, useMemo, useRef, useState } from 'react'
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { Plus, Search } from 'lucide-react'
import { useParams } from 'next/navigation'
import {
Badge,
Button,
Input as EmcnInput,
Modal,
@@ -14,6 +15,7 @@ import {
} from '@/components/emcn'
import { Input } from '@/components/ui'
import { createLogger } from '@/lib/logs/console/logger'
import { getIssueBadgeLabel, getMcpToolIssue, type McpToolIssue } from '@/lib/mcp/tool-validation'
import { checkEnvVarTrigger } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/env-var-dropdown'
import {
useCreateMcpServer,
@@ -21,6 +23,7 @@ import {
useMcpServers,
useMcpToolsQuery,
useRefreshMcpServer,
useStoredMcpTools,
} from '@/hooks/queries/mcp'
import { useMcpServerTest } from '@/hooks/use-mcp-server-test'
import type { InputFieldType, McpServerFormData, McpServerTestResult } from './components'
@@ -44,6 +47,9 @@ interface McpServer {
name?: string
transport?: string
url?: string
connectionStatus?: 'connected' | 'disconnected' | 'error'
lastError?: string | null
lastConnected?: string
}
const logger = createLogger('McpSettings')
@@ -69,11 +75,15 @@ function getTestButtonLabel(
return 'Test Connection'
}
interface MCPProps {
initialServerId?: string | null
}
/**
* MCP Settings component for managing Model Context Protocol servers.
* Handles server CRUD operations, connection testing, and environment variable integration.
*/
export function MCP() {
export function MCP({ initialServerId }: MCPProps) {
const params = useParams()
const workspaceId = params.workspaceId as string
@@ -88,6 +98,7 @@ export function MCP() {
isLoading: toolsLoading,
isFetching: toolsFetching,
} = useMcpToolsQuery(workspaceId)
const { data: storedTools = [] } = useStoredMcpTools(workspaceId)
const createServerMutation = useCreateMcpServer()
const deleteServerMutation = useDeleteMcpServer()
const refreshServerMutation = useRefreshMcpServer()
@@ -106,7 +117,9 @@ export function MCP() {
const [serverToDelete, setServerToDelete] = useState<{ id: string; name: string } | null>(null)
const [selectedServerId, setSelectedServerId] = useState<string | null>(null)
const [refreshStatus, setRefreshStatus] = useState<'idle' | 'refreshing' | 'refreshed'>('idle')
const [refreshingServers, setRefreshingServers] = useState<
Record<string, 'refreshing' | 'refreshed'>
>({})
const [showEnvVars, setShowEnvVars] = useState(false)
const [envSearchTerm, setEnvSearchTerm] = useState('')
@@ -114,10 +127,16 @@ export function MCP() {
const [activeInputField, setActiveInputField] = useState<InputFieldType | null>(null)
const [activeHeaderIndex, setActiveHeaderIndex] = useState<number | null>(null)
// Scroll position state for formatted text overlays
const [urlScrollLeft, setUrlScrollLeft] = useState(0)
const [headerScrollLeft, setHeaderScrollLeft] = useState<Record<string, number>>({})
// Auto-select server when initialServerId is provided
useEffect(() => {
if (initialServerId && servers.some((s) => s.id === initialServerId)) {
setSelectedServerId(initialServerId)
}
}, [initialServerId, servers])
/**
* Resets environment variable dropdown state.
*/
@@ -237,6 +256,7 @@ export function MCP() {
/**
* Adds a new MCP server after validating and testing the connection.
* Only creates the server if connection test succeeds.
*/
const handleAddServer = useCallback(async () => {
if (!formData.name.trim()) return
@@ -253,12 +273,12 @@ export function MCP() {
workspaceId,
}
if (!testResult) {
const result = await testConnection(serverConfig)
if (!result.success) return
}
const connectionResult = await testConnection(serverConfig)
if (testResult && !testResult.success) return
if (!connectionResult.success) {
logger.error('Connection test failed, server not added:', connectionResult.error)
return
}
await createServerMutation.mutateAsync({
workspaceId,
@@ -279,15 +299,7 @@ export function MCP() {
} finally {
setIsAddingServer(false)
}
}, [
formData,
testResult,
testConnection,
createServerMutation,
workspaceId,
headersToRecord,
resetForm,
])
}, [formData, testConnection, createServerMutation, workspaceId, headersToRecord, resetForm])
/**
* Opens the delete confirmation dialog for an MCP server.
@@ -297,9 +309,6 @@ export function MCP() {
setShowDeleteDialog(true)
}, [])
/**
* Confirms and executes the server deletion.
*/
const confirmDeleteServer = useCallback(async () => {
if (!serverToDelete) return
@@ -399,14 +408,24 @@ export function MCP() {
const handleRefreshServer = useCallback(
async (serverId: string) => {
try {
setRefreshStatus('refreshing')
setRefreshingServers((prev) => ({ ...prev, [serverId]: 'refreshing' }))
await refreshServerMutation.mutateAsync({ workspaceId, serverId })
logger.info(`Refreshed MCP server: ${serverId}`)
setRefreshStatus('refreshed')
setTimeout(() => setRefreshStatus('idle'), 2000)
setRefreshingServers((prev) => ({ ...prev, [serverId]: 'refreshed' }))
setTimeout(() => {
setRefreshingServers((prev) => {
const newState = { ...prev }
delete newState[serverId]
return newState
})
}, 2000)
} catch (error) {
logger.error('Failed to refresh MCP server:', error)
setRefreshStatus('idle')
setRefreshingServers((prev) => {
const newState = { ...prev }
delete newState[serverId]
return newState
})
}
},
[refreshServerMutation, workspaceId]
@@ -432,6 +451,53 @@ export function MCP() {
const isSubmitDisabled = serversLoading || isAddingServer || !isFormValid
const testButtonLabel = getTestButtonLabel(testResult, isTestingConnection)
/**
* Gets issues for stored tools that reference a specific server tool.
* Returns issues from all workflows that have stored this tool.
*/
const getStoredToolIssues = useCallback(
(serverId: string, toolName: string): { issue: McpToolIssue; workflowName: string }[] => {
const relevantStoredTools = storedTools.filter(
(st) => st.serverId === serverId && st.toolName === toolName
)
const serverStates = servers.map((s) => ({
id: s.id,
url: s.url,
connectionStatus: s.connectionStatus,
lastError: s.lastError || undefined,
}))
const discoveredTools = mcpToolsData.map((t) => ({
serverId: t.serverId,
name: t.name,
inputSchema: t.inputSchema,
}))
const issues: { issue: McpToolIssue; workflowName: string }[] = []
for (const storedTool of relevantStoredTools) {
const issue = getMcpToolIssue(
{
serverId: storedTool.serverId,
serverUrl: storedTool.serverUrl,
toolName: storedTool.toolName,
schema: storedTool.schema,
},
serverStates,
discoveredTools
)
if (issue) {
issues.push({ issue, workflowName: storedTool.workflowName })
}
}
return issues
},
[storedTools, servers, mcpToolsData]
)
if (selectedServer) {
const { server, tools } = selectedServer
const transportLabel = formatTransportLabel(server.transport || 'http')
@@ -463,6 +529,15 @@ export function MCP() {
</div>
)}
{server.connectionStatus === 'error' && (
<div className='flex flex-col gap-[8px]'>
<span className='font-medium text-[13px] text-[var(--text-primary)]'>Status</span>
<p className='text-[14px] text-red-500 dark:text-red-400'>
{server.lastError || 'Unable to connect'}
</p>
</div>
)}
<div className='flex flex-col gap-[8px]'>
<span className='font-medium text-[13px] text-[var(--text-primary)]'>
Tools ({tools.length})
@@ -471,21 +546,37 @@ export function MCP() {
<p className='text-[13px] text-[var(--text-muted)]'>No tools available</p>
) : (
<div className='flex flex-col gap-[8px]'>
{tools.map((tool) => (
<div
key={tool.name}
className='rounded-[6px] border bg-[var(--surface-3)] px-[10px] py-[8px]'
>
<p className='font-medium text-[13px] text-[var(--text-primary)]'>
{tool.name}
</p>
{tool.description && (
<p className='mt-[4px] text-[13px] text-[var(--text-tertiary)]'>
{tool.description}
</p>
)}
</div>
))}
{tools.map((tool) => {
const issues = getStoredToolIssues(server.id, tool.name)
return (
<div
key={tool.name}
className='rounded-[6px] border bg-[var(--surface-3)] px-[10px] py-[8px]'
>
<div className='flex items-center justify-between'>
<p className='font-medium text-[13px] text-[var(--text-primary)]'>
{tool.name}
</p>
{issues.length > 0 && (
<Badge
variant='outline'
style={{
borderColor: 'var(--warning)',
color: 'var(--warning)',
}}
>
{getIssueBadgeLabel(issues[0].issue)}
</Badge>
)}
</div>
{tool.description && (
<p className='mt-[4px] text-[13px] text-[var(--text-tertiary)]'>
{tool.description}
</p>
)}
</div>
)
})}
</div>
)}
</div>
@@ -496,11 +587,11 @@ export function MCP() {
<Button
onClick={() => handleRefreshServer(server.id)}
variant='default'
disabled={refreshStatus !== 'idle'}
disabled={!!refreshingServers[server.id]}
>
{refreshStatus === 'refreshing'
{refreshingServers[server.id] === 'refreshing'
? 'Refreshing...'
: refreshStatus === 'refreshed'
: refreshingServers[server.id] === 'refreshed'
? 'Refreshed'
: 'Refresh Tools'}
</Button>
@@ -672,6 +763,7 @@ export function MCP() {
tools={tools}
isDeleting={deletingServers.has(server.id)}
isLoadingTools={isLoadingTools}
isRefreshing={refreshingServers[server.id] === 'refreshing'}
onRemove={() => handleRemoveServer(server.id, server.name || 'this server')}
onViewDetails={() => handleViewDetails(server.id)}
/>

View File

@@ -46,6 +46,7 @@ import { generalSettingsKeys, useGeneralSettings } from '@/hooks/queries/general
import { organizationKeys, useOrganizations } from '@/hooks/queries/organization'
import { ssoKeys, useSSOProviders } from '@/hooks/queries/sso'
import { subscriptionKeys, useSubscriptionData } from '@/hooks/queries/subscription'
import { useSettingsModalStore } from '@/stores/settings-modal/store'
const isBillingEnabled = isTruthy(getEnv('NEXT_PUBLIC_BILLING_ENABLED'))
const isSSOEnabled = isTruthy(getEnv('NEXT_PUBLIC_SSO_ENABLED'))
@@ -134,6 +135,8 @@ const allNavigationItems: NavigationItem[] = [
export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
const [activeSection, setActiveSection] = useState<SettingsSection>('general')
const { initialSection, mcpServerId, clearInitialState } = useSettingsModalStore()
const [pendingMcpServerId, setPendingMcpServerId] = useState<string | null>(null)
const { data: session } = useSession()
const queryClient = useQueryClient()
const { data: organizationsData } = useOrganizations()
@@ -247,6 +250,24 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
// React Query hook automatically loads and syncs settings
useGeneralSettings()
// Apply initial section from store when modal opens
useEffect(() => {
if (open && initialSection) {
setActiveSection(initialSection)
if (mcpServerId) {
setPendingMcpServerId(mcpServerId)
}
clearInitialState()
}
}, [open, initialSection, mcpServerId, clearInitialState])
// Clear pending server ID when section changes away from MCP
useEffect(() => {
if (activeSection !== 'mcp') {
setPendingMcpServerId(null)
}
}, [activeSection])
useEffect(() => {
const handleOpenSettings = (event: CustomEvent<{ tab: SettingsSection }>) => {
setActiveSection(event.detail.tab)
@@ -436,7 +457,7 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
{isBillingEnabled && activeSection === 'team' && <TeamManagement />}
{activeSection === 'sso' && <SSO />}
{activeSection === 'copilot' && <Copilot />}
{activeSection === 'mcp' && <MCP />}
{activeSection === 'mcp' && <MCP initialServerId={pendingMcpServerId} />}
{activeSection === 'custom-tools' && <CustomTools />}
</SModalMainBody>
</SModalMain>

View File

@@ -32,6 +32,7 @@ import {
} from '@/app/workspace/[workspaceId]/w/hooks'
import { useFolderStore } from '@/stores/folders/store'
import { useSearchModalStore } from '@/stores/search-modal/store'
import { useSettingsModalStore } from '@/stores/settings-modal/store'
import { MIN_SIDEBAR_WIDTH, useSidebarStore } from '@/stores/sidebar/store'
const logger = createLogger('Sidebar')
@@ -88,7 +89,11 @@ export function Sidebar() {
const [isWorkspaceMenuOpen, setIsWorkspaceMenuOpen] = useState(false)
const [isHelpModalOpen, setIsHelpModalOpen] = useState(false)
const [isSettingsModalOpen, setIsSettingsModalOpen] = useState(false)
const {
isOpen: isSettingsModalOpen,
openModal: openSettingsModal,
closeModal: closeSettingsModal,
} = useSettingsModalStore()
/** Listens for external events to open help modal */
useEffect(() => {
@@ -219,7 +224,7 @@ export function Sidebar() {
id: 'settings',
label: 'Settings',
icon: Settings,
onClick: () => setIsSettingsModalOpen(true),
onClick: () => openSettingsModal(),
},
],
[workspaceId]
@@ -654,7 +659,10 @@ export function Sidebar() {
{/* Footer Navigation Modals */}
<HelpModal open={isHelpModalOpen} onOpenChange={setIsHelpModalOpen} />
<SettingsModal open={isSettingsModalOpen} onOpenChange={setIsSettingsModalOpen} />
<SettingsModal
open={isSettingsModalOpen}
onOpenChange={(open) => (open ? openSettingsModal() : closeSettingsModal())}
/>
{/* Hidden file input for workspace import */}
<input

View File

@@ -8,6 +8,8 @@ import {
getHostedModels,
getMaxTemperature,
getProviderIcon,
getReasoningEffortValuesForModel,
getVerbosityValuesForModel,
MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_VERBOSITY,
providers,
@@ -114,12 +116,47 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
type: 'dropdown',
placeholder: 'Select reasoning effort...',
options: [
{ label: 'none', id: 'none' },
{ label: 'minimal', id: 'minimal' },
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
],
dependsOn: ['model'],
fetchOptions: async (blockId: string) => {
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
if (!activeWorkflowId) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId]
const blockValues = workflowValues?.[blockId]
const modelValue = blockValues?.model as string
if (!modelValue) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const validOptions = getReasoningEffortValuesForModel(modelValue)
if (!validOptions) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
return validOptions.map((opt) => ({ label: opt, id: opt }))
},
value: () => 'medium',
condition: {
field: 'model',
@@ -136,6 +173,43 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
],
dependsOn: ['model'],
fetchOptions: async (blockId: string) => {
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
if (!activeWorkflowId) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId]
const blockValues = workflowValues?.[blockId]
const modelValue = blockValues?.model as string
if (!modelValue) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const validOptions = getVerbosityValuesForModel(modelValue)
if (!validOptions) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
return validOptions.map((opt) => ({ label: opt, id: opt }))
},
value: () => 'medium',
condition: {
field: 'model',
@@ -166,6 +240,28 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'tools',
title: 'Tools',
@@ -465,6 +561,8 @@ Example 3 (Array Input):
apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
responseFormat: {
type: 'json',
description: 'JSON response format schema',

View File

@@ -239,6 +239,28 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'temperature',
title: 'Temperature',
@@ -356,6 +378,14 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
apiKey: { type: 'string' as ParamType, description: 'Provider API key' },
azureEndpoint: { type: 'string' as ParamType, description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string' as ParamType, description: 'Azure API version' },
vertexProject: {
type: 'string' as ParamType,
description: 'Google Cloud project ID for Vertex AI',
},
vertexLocation: {
type: 'string' as ParamType,
description: 'Google Cloud location for Vertex AI',
},
temperature: {
type: 'number' as ParamType,
description: 'Response randomness level (low for consistent evaluation)',

View File

@@ -188,6 +188,28 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'temperature',
title: 'Temperature',
@@ -235,6 +257,8 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
temperature: {
type: 'number',
description: 'Response randomness level (low for consistent routing)',

View File

@@ -155,15 +155,6 @@ export const ScheduleBlock: BlockConfig = {
condition: { field: 'scheduleType', value: ['minutes', 'hourly'], not: true },
},
{
id: 'inputFormat',
title: 'Input Format',
type: 'input-format',
description:
'Define input parameters that will be available when the schedule triggers. Use Value to set default values for scheduled executions.',
mode: 'trigger',
},
{
id: 'scheduleSave',
type: 'schedule-save',

View File

@@ -0,0 +1,258 @@
import { ServiceNowIcon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types'
import { AuthMode } from '@/blocks/types'
import type { ServiceNowResponse } from '@/tools/servicenow/types'
export const ServiceNowBlock: BlockConfig<ServiceNowResponse> = {
type: 'servicenow',
name: 'ServiceNow',
description: 'Create, read, update, delete, and bulk import ServiceNow records',
authMode: AuthMode.OAuth,
hideFromToolbar: true,
longDescription:
'Integrate ServiceNow into your workflow. Can create, read, update, and delete records in any ServiceNow table (incidents, tasks, users, etc.). Supports bulk import operations for data migration and ETL.',
docsLink: 'https://docs.sim.ai/tools/servicenow',
category: 'tools',
bgColor: '#032D42',
icon: ServiceNowIcon,
subBlocks: [
// Operation selector
{
id: 'operation',
title: 'Operation',
type: 'dropdown',
options: [
{ label: 'Create Record', id: 'create' },
{ label: 'Read Records', id: 'read' },
{ label: 'Update Record', id: 'update' },
{ label: 'Delete Record', id: 'delete' },
],
value: () => 'read',
},
// Instance URL
{
id: 'instanceUrl',
title: 'Instance URL',
type: 'short-input',
placeholder: 'https://instance.service-now.com',
required: true,
description: 'Your ServiceNow instance URL',
},
// OAuth Credential
{
id: 'credential',
title: 'ServiceNow Account',
type: 'oauth-input',
serviceId: 'servicenow',
requiredScopes: ['useraccount'],
placeholder: 'Select ServiceNow account',
required: true,
},
// Table Name
{
id: 'tableName',
title: 'Table Name',
type: 'short-input',
placeholder: 'incident, task, sys_user, etc.',
required: true,
description: 'ServiceNow table name',
},
// Create-specific: Fields
{
id: 'fields',
title: 'Fields (JSON)',
type: 'code',
language: 'json',
placeholder: '{\n "short_description": "Issue description",\n "priority": "1"\n}',
condition: { field: 'operation', value: 'create' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert ServiceNow developer. Generate ServiceNow record field objects as JSON based on the user's request.
### CONTEXT
ServiceNow records use specific field names depending on the table. Common tables and their key fields include:
- incident: short_description, description, priority (1-5), urgency (1-3), impact (1-3), caller_id, assignment_group, assigned_to, category, subcategory, state
- task: short_description, description, priority, assignment_group, assigned_to, state
- sys_user: user_name, first_name, last_name, email, active, department, title
- change_request: short_description, description, type, risk, impact, priority, assignment_group
### RULES
- Output ONLY valid JSON object starting with { and ending with }
- Use correct ServiceNow field names for the target table
- Values should be strings unless the field specifically requires another type
- For reference fields (like caller_id, assigned_to), use sys_id values or display values
- Do not include sys_id in create operations (it's auto-generated)
### EXAMPLE
User: "Create a high priority incident for network outage"
Output: {"short_description": "Network outage", "description": "Network connectivity issue affecting users", "priority": "1", "urgency": "1", "impact": "1", "category": "Network"}`,
generationType: 'json-object',
},
},
// Read-specific: Query options
{
id: 'sysId',
title: 'Record sys_id',
type: 'short-input',
placeholder: 'Specific record sys_id (optional)',
condition: { field: 'operation', value: 'read' },
},
{
id: 'number',
title: 'Record Number',
type: 'short-input',
placeholder: 'e.g., INC0010001 (optional)',
condition: { field: 'operation', value: 'read' },
},
{
id: 'query',
title: 'Query String',
type: 'short-input',
placeholder: 'active=true^priority=1',
condition: { field: 'operation', value: 'read' },
description: 'ServiceNow encoded query string',
},
{
id: 'limit',
title: 'Limit',
type: 'short-input',
placeholder: '10',
condition: { field: 'operation', value: 'read' },
},
{
id: 'fields',
title: 'Fields to Return',
type: 'short-input',
placeholder: 'number,short_description,priority',
condition: { field: 'operation', value: 'read' },
description: 'Comma-separated list of fields',
},
// Update-specific: sysId and fields
{
id: 'sysId',
title: 'Record sys_id',
type: 'short-input',
placeholder: 'Record sys_id to update',
condition: { field: 'operation', value: 'update' },
required: true,
},
{
id: 'fields',
title: 'Fields to Update (JSON)',
type: 'code',
language: 'json',
placeholder: '{\n "state": "2",\n "assigned_to": "user.sys_id"\n}',
condition: { field: 'operation', value: 'update' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert ServiceNow developer. Generate ServiceNow record update field objects as JSON based on the user's request.
### CONTEXT
ServiceNow records use specific field names depending on the table. Common update scenarios include:
- incident: state (1=New, 2=In Progress, 3=On Hold, 6=Resolved, 7=Closed), assigned_to, work_notes, close_notes, close_code
- task: state, assigned_to, work_notes, percent_complete
- change_request: state, risk, approval, work_notes
### RULES
- Output ONLY valid JSON object starting with { and ending with }
- Include only the fields that need to be updated
- Use correct ServiceNow field names for the target table
- For state transitions, use the correct numeric state values
- work_notes and comments fields append to existing values
### EXAMPLE
User: "Assign the incident to John and set to in progress"
Output: {"state": "2", "assigned_to": "john.doe", "work_notes": "Assigned and starting investigation"}`,
generationType: 'json-object',
},
},
// Delete-specific: sysId
{
id: 'sysId',
title: 'Record sys_id',
type: 'short-input',
placeholder: 'Record sys_id to delete',
condition: { field: 'operation', value: 'delete' },
required: true,
},
],
tools: {
access: [
'servicenow_create_record',
'servicenow_read_record',
'servicenow_update_record',
'servicenow_delete_record',
],
config: {
tool: (params) => {
switch (params.operation) {
case 'create':
return 'servicenow_create_record'
case 'read':
return 'servicenow_read_record'
case 'update':
return 'servicenow_update_record'
case 'delete':
return 'servicenow_delete_record'
default:
throw new Error(`Invalid ServiceNow operation: ${params.operation}`)
}
},
params: (params) => {
const { operation, fields, records, credential, ...rest } = params
// Parse JSON fields if provided
let parsedFields: Record<string, any> | undefined
if (fields && (operation === 'create' || operation === 'update')) {
try {
parsedFields = typeof fields === 'string' ? JSON.parse(fields) : fields
} catch (error) {
throw new Error(
`Invalid JSON in fields: ${error instanceof Error ? error.message : String(error)}`
)
}
}
// Validate OAuth credential
if (!credential) {
throw new Error('ServiceNow account credential is required')
}
// Build params
const baseParams: Record<string, any> = {
...rest,
credential,
}
if (operation === 'create' || operation === 'update') {
return {
...baseParams,
fields: parsedFields,
}
}
return baseParams
},
},
},
inputs: {
operation: { type: 'string', description: 'Operation to perform' },
instanceUrl: { type: 'string', description: 'ServiceNow instance URL' },
credential: { type: 'string', description: 'ServiceNow OAuth credential ID' },
tableName: { type: 'string', description: 'Table name' },
sysId: { type: 'string', description: 'Record sys_id' },
number: { type: 'string', description: 'Record number' },
query: { type: 'string', description: 'Query string' },
limit: { type: 'number', description: 'Result limit' },
fields: { type: 'json', description: 'Fields object or JSON string' },
},
outputs: {
record: { type: 'json', description: 'Single ServiceNow record' },
records: { type: 'json', description: 'Array of ServiceNow records' },
success: { type: 'boolean', description: 'Operation success status' },
metadata: { type: 'json', description: 'Operation metadata' },
},
}

View File

@@ -99,6 +99,28 @@ export const TranslateBlock: BlockConfig = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'systemPrompt',
title: 'System Prompt',
@@ -120,6 +142,8 @@ export const TranslateBlock: BlockConfig = {
apiKey: params.apiKey,
azureEndpoint: params.azureEndpoint,
azureApiVersion: params.azureApiVersion,
vertexProject: params.vertexProject,
vertexLocation: params.vertexLocation,
}),
},
},
@@ -129,6 +153,8 @@ export const TranslateBlock: BlockConfig = {
apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
systemPrompt: { type: 'string', description: 'Translation instructions' },
},
outputs: {

View File

@@ -96,6 +96,7 @@ import { SearchBlock } from '@/blocks/blocks/search'
import { SendGridBlock } from '@/blocks/blocks/sendgrid'
import { SentryBlock } from '@/blocks/blocks/sentry'
import { SerperBlock } from '@/blocks/blocks/serper'
import { ServiceNowBlock } from '@/blocks/blocks/servicenow'
import { SftpBlock } from '@/blocks/blocks/sftp'
import { SharepointBlock } from '@/blocks/blocks/sharepoint'
import { ShopifyBlock } from '@/blocks/blocks/shopify'
@@ -238,6 +239,7 @@ export const registry: Record<string, BlockConfig> = {
search: SearchBlock,
sendgrid: SendGridBlock,
sentry: SentryBlock,
servicenow: ServiceNowBlock,
serper: SerperBlock,
sharepoint: SharepointBlock,
shopify: ShopifyBlock,

View File

@@ -291,7 +291,7 @@ function CodeRow({ index, style, ...props }: RowComponentProps<CodeRowProps>) {
const line = lines[index]
return (
<div style={style} className='flex' data-row-index={index}>
<div style={style} className={cn('flex', wrapText && 'overflow-hidden')} data-row-index={index}>
{showGutter && (
<div
className='flex-shrink-0 select-none pr-0.5 text-right text-[var(--text-muted)] text-xs tabular-nums leading-[21px] dark:text-[#a8a8a8]'
@@ -303,7 +303,7 @@ function CodeRow({ index, style, ...props }: RowComponentProps<CodeRowProps>) {
<pre
className={cn(
'm-0 flex-1 pr-2 pl-2 font-mono text-[13px] text-[var(--text-primary)] leading-[21px] dark:text-[#eeeeee]',
wrapText ? 'whitespace-pre-wrap break-words' : 'whitespace-pre'
wrapText ? 'min-w-0 whitespace-pre-wrap break-words' : 'whitespace-pre'
)}
dangerouslySetInnerHTML={{ __html: line.html || '&nbsp;' }}
/>
@@ -625,7 +625,7 @@ const VirtualizedViewerInner = memo(function VirtualizedViewerInner({
rowComponent={CodeRow}
rowProps={rowProps}
overscanCount={5}
className='overflow-x-auto'
className={wrapText ? 'overflow-x-hidden' : 'overflow-x-auto'}
/>
</div>
)

View File

@@ -2452,6 +2452,56 @@ export const GeminiIcon = (props: SVGProps<SVGSVGElement>) => (
</svg>
)
export const VertexIcon = (props: SVGProps<SVGSVGElement>) => (
<svg
{...props}
id='standard_product_icon'
xmlns='http://www.w3.org/2000/svg'
version='1.1'
viewBox='0 0 512 512'
>
<g id='bounding_box'>
<rect width='512' height='512' fill='none' />
</g>
<g id='art'>
<path
d='M128,244.99c-8.84,0-16-7.16-16-16v-95.97c0-8.84,7.16-16,16-16s16,7.16,16,16v95.97c0,8.84-7.16,16-16,16Z'
fill='#ea4335'
/>
<path
d='M256,458c-2.98,0-5.97-.83-8.59-2.5l-186-122c-7.46-4.74-9.65-14.63-4.91-22.09,4.75-7.46,14.64-9.65,22.09-4.91l177.41,116.53,177.41-116.53c7.45-4.74,17.34-2.55,22.09,4.91,4.74,7.46,2.55,17.34-4.91,22.09l-186,122c-2.62,1.67-5.61,2.5-8.59,2.5Z'
fill='#fbbc04'
/>
<path
d='M256,388.03c-8.84,0-16-7.16-16-16v-73.06c0-8.84,7.16-16,16-16s16,7.16,16,16v73.06c0,8.84-7.16,16-16,16Z'
fill='#34a853'
/>
<circle cx='128' cy='70' r='16' fill='#ea4335' />
<circle cx='128' cy='292' r='16' fill='#ea4335' />
<path
d='M384.23,308.01c-8.82,0-15.98-7.14-16-15.97l-.23-94.01c-.02-8.84,7.13-16.02,15.97-16.03h.04c8.82,0,15.98,7.14,16,15.97l.23,94.01c.02,8.84-7.13,16.02-15.97,16.03h-.04Z'
fill='#4285f4'
/>
<circle cx='384' cy='70' r='16' fill='#4285f4' />
<circle cx='384' cy='134' r='16' fill='#4285f4' />
<path
d='M320,220.36c-8.84,0-16-7.16-16-16v-103.02c0-8.84,7.16-16,16-16s16,7.16,16,16v103.02c0,8.84-7.16,16-16,16Z'
fill='#fbbc04'
/>
<circle cx='256' cy='171' r='16' fill='#34a853' />
<circle cx='256' cy='235' r='16' fill='#34a853' />
<circle cx='320' cy='265' r='16' fill='#fbbc04' />
<circle cx='320' cy='329' r='16' fill='#fbbc04' />
<path
d='M192,217.36c-8.84,0-16-7.16-16-16v-100.02c0-8.84,7.16-16,16-16s16,7.16,16,16v100.02c0,8.84-7.16,16-16,16Z'
fill='#fbbc04'
/>
<circle cx='192' cy='265' r='16' fill='#fbbc04' />
<circle cx='192' cy='329' r='16' fill='#fbbc04' />
</g>
</svg>
)
export const CerebrasIcon = (props: SVGProps<SVGSVGElement>) => (
<svg
{...props}
@@ -3335,6 +3385,24 @@ export function SalesforceIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function ServiceNowIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg
{...props}
xmlns='http://www.w3.org/2000/svg'
viewBox='0 0 1570 1403'
width='48'
height='48'
>
<path
fill='#62d84e'
fillRule='evenodd'
d='M1228.4 138.9c129.2 88.9 228.9 214.3 286.3 360.2 57.5 145.8 70 305.5 36 458.5S1437.8 1250 1324 1357.9c-13.3 12.9-28.8 23.4-45.8 30.8-17 7.5-35.2 11.9-53.7 12.9-18.5 1.1-37.1-1.1-54.8-6.6-17.7-5.4-34.3-13.9-49.1-25.2-48.2-35.9-101.8-63.8-158.8-82.6-57.1-18.9-116.7-28.5-176.8-28.5s-119.8 9.6-176.8 28.5c-57 18.8-110.7 46.7-158.9 82.6-14.6 11.2-31 19.8-48.6 25.3s-36 7.8-54.4 6.8c-18.4-.9-36.5-5.1-53.4-12.4s-32.4-17.5-45.8-30.2C132.5 1251 53 1110.8 19 956.8s-20.9-314.6 37.6-461c58.5-146.5 159.6-272 290.3-360.3S631.8.1 789.6.5c156.8 1.3 309.6 49.6 438.8 138.4m-291.8 1014c48.2-19.2 92-48 128.7-84.6 36.7-36.7 65.5-80.4 84.7-128.6 19.2-48.1 28.4-99.7 27-151.5 0-103.9-41.3-203.5-114.8-277S889 396.4 785 396.4s-203.7 41.3-277.2 114.8S393 684.3 393 788.2c-1.4 51.8 7.8 103.4 27 151.5 19.2 48.2 48 91.9 84.7 128.6 36.7 36.6 80.5 65.4 128.6 84.6 48.2 19.2 99.8 28.4 151.7 27 51.8 1.4 103.4-7.8 151.6-27'
/>
</svg>
)
}
export function ApolloIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg

View File

@@ -1,3 +1,6 @@
import { db } from '@sim/db'
import { mcpServers } from '@sim/db/schema'
import { and, eq, inArray, isNull } from 'drizzle-orm'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { createLogger } from '@/lib/logs/console/logger'
import {
@@ -72,6 +75,11 @@ export class BlockExecutor {
try {
resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block)
if (block.metadata?.id === BlockType.AGENT && resolvedInputs.tools) {
resolvedInputs = await this.filterUnavailableMcpToolsForLog(ctx, resolvedInputs)
}
if (blockLog) {
blockLog.input = resolvedInputs
}
@@ -395,6 +403,60 @@ export class BlockExecutor {
return undefined
}
/**
* Filters out unavailable MCP tools from agent inputs for logging.
* Only includes tools from servers with 'connected' status.
*/
private async filterUnavailableMcpToolsForLog(
ctx: ExecutionContext,
inputs: Record<string, any>
): Promise<Record<string, any>> {
const tools = inputs.tools
if (!Array.isArray(tools) || tools.length === 0) return inputs
const mcpTools = tools.filter((t: any) => t.type === 'mcp')
if (mcpTools.length === 0) return inputs
const serverIds = [
...new Set(mcpTools.map((t: any) => t.params?.serverId).filter(Boolean)),
] as string[]
if (serverIds.length === 0) return inputs
const availableServerIds = new Set<string>()
if (ctx.workspaceId && serverIds.length > 0) {
try {
const servers = await db
.select({ id: mcpServers.id, connectionStatus: mcpServers.connectionStatus })
.from(mcpServers)
.where(
and(
eq(mcpServers.workspaceId, ctx.workspaceId),
inArray(mcpServers.id, serverIds),
isNull(mcpServers.deletedAt)
)
)
for (const server of servers) {
if (server.connectionStatus === 'connected') {
availableServerIds.add(server.id)
}
}
} catch (error) {
logger.warn('Failed to check MCP server availability for logging:', error)
return inputs
}
}
const filteredTools = tools.filter((tool: any) => {
if (tool.type !== 'mcp') return true
const serverId = tool.params?.serverId
if (!serverId) return false
return availableServerIds.has(serverId)
})
return { ...inputs, tools: filteredTools }
}
private preparePauseResumeSelfReference(
ctx: ExecutionContext,
node: DAGNode,

View File

@@ -1,3 +1,6 @@
import { db } from '@sim/db'
import { mcpServers } from '@sim/db/schema'
import { and, eq, inArray, isNull } from 'drizzle-orm'
import { createLogger } from '@/lib/logs/console/logger'
import { createMcpToolId } from '@/lib/mcp/utils'
import { getAllBlocks } from '@/blocks'
@@ -35,19 +38,23 @@ export class AgentBlockHandler implements BlockHandler {
block: SerializedBlock,
inputs: AgentInputs
): Promise<BlockOutput | StreamingExecution> {
const responseFormat = this.parseResponseFormat(inputs.responseFormat)
const model = inputs.model || AGENT.DEFAULT_MODEL
// Filter out unavailable MCP tools early so they don't appear in logs/inputs
const filteredTools = await this.filterUnavailableMcpTools(ctx, inputs.tools || [])
const filteredInputs = { ...inputs, tools: filteredTools }
const responseFormat = this.parseResponseFormat(filteredInputs.responseFormat)
const model = filteredInputs.model || AGENT.DEFAULT_MODEL
const providerId = getProviderFromModel(model)
const formattedTools = await this.formatTools(ctx, inputs.tools || [])
const formattedTools = await this.formatTools(ctx, filteredInputs.tools || [])
const streamingConfig = this.getStreamingConfig(ctx, block)
const messages = await this.buildMessages(ctx, inputs, block.id)
const messages = await this.buildMessages(ctx, filteredInputs, block.id)
const providerRequest = this.buildProviderRequest({
ctx,
providerId,
model,
messages,
inputs,
inputs: filteredInputs,
formattedTools,
responseFormat,
streaming: streamingConfig.shouldUseStreaming ?? false,
@@ -58,10 +65,10 @@ export class AgentBlockHandler implements BlockHandler {
providerRequest,
block,
responseFormat,
inputs
filteredInputs
)
await this.persistResponseToMemory(ctx, inputs, result, block.id)
await this.persistResponseToMemory(ctx, filteredInputs, result, block.id)
return result
}
@@ -115,6 +122,53 @@ export class AgentBlockHandler implements BlockHandler {
return undefined
}
private async filterUnavailableMcpTools(
ctx: ExecutionContext,
tools: ToolInput[]
): Promise<ToolInput[]> {
if (!Array.isArray(tools) || tools.length === 0) return tools
const mcpTools = tools.filter((t) => t.type === 'mcp')
if (mcpTools.length === 0) return tools
const serverIds = [...new Set(mcpTools.map((t) => t.params?.serverId).filter(Boolean))]
if (serverIds.length === 0) return tools
const availableServerIds = new Set<string>()
if (ctx.workspaceId && serverIds.length > 0) {
try {
const servers = await db
.select({ id: mcpServers.id, connectionStatus: mcpServers.connectionStatus })
.from(mcpServers)
.where(
and(
eq(mcpServers.workspaceId, ctx.workspaceId),
inArray(mcpServers.id, serverIds),
isNull(mcpServers.deletedAt)
)
)
for (const server of servers) {
if (server.connectionStatus === 'connected') {
availableServerIds.add(server.id)
}
}
} catch (error) {
logger.warn('Failed to check MCP server availability, including all tools:', error)
for (const serverId of serverIds) {
availableServerIds.add(serverId)
}
}
}
return tools.filter((tool) => {
if (tool.type !== 'mcp') return true
const serverId = tool.params?.serverId
if (!serverId) return false
return availableServerIds.has(serverId)
})
}
private async formatTools(ctx: ExecutionContext, inputTools: ToolInput[]): Promise<any[]> {
if (!Array.isArray(inputTools)) return []
@@ -304,6 +358,7 @@ export class AgentBlockHandler implements BlockHandler {
/**
* Process MCP tools using cached schemas from build time.
* Note: Unavailable tools are already filtered by filterUnavailableMcpTools.
*/
private async processMcpToolsBatched(
ctx: ExecutionContext,
@@ -312,7 +367,6 @@ export class AgentBlockHandler implements BlockHandler {
if (mcpTools.length === 0) return []
const results: any[] = []
const toolsWithSchema: ToolInput[] = []
const toolsNeedingDiscovery: ToolInput[] = []
@@ -439,7 +493,7 @@ export class AgentBlockHandler implements BlockHandler {
const discoveredTools = await this.discoverMcpToolsForServer(ctx, serverId)
return { serverId, tools, discoveredTools, error: null as Error | null }
} catch (error) {
logger.error(`Failed to discover tools from server ${serverId}:`, error)
logger.error(`Failed to discover tools from server ${serverId}:`)
return { serverId, tools, discoveredTools: [] as any[], error: error as Error }
}
})
@@ -829,6 +883,8 @@ export class AgentBlockHandler implements BlockHandler {
apiKey: inputs.apiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,
vertexProject: inputs.vertexProject,
vertexLocation: inputs.vertexLocation,
responseFormat,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
@@ -921,6 +977,8 @@ export class AgentBlockHandler implements BlockHandler {
apiKey: finalApiKey,
azureEndpoint: providerRequest.azureEndpoint,
azureApiVersion: providerRequest.azureApiVersion,
vertexProject: providerRequest.vertexProject,
vertexLocation: providerRequest.vertexLocation,
responseFormat: providerRequest.responseFormat,
workflowId: providerRequest.workflowId,
workspaceId: providerRequest.workspaceId,

View File

@@ -19,6 +19,8 @@ export interface AgentInputs {
apiKey?: string
azureEndpoint?: string
azureApiVersion?: string
vertexProject?: string
vertexLocation?: string
reasoningEffort?: string
verbosity?: string
}

View File

@@ -1,11 +1,47 @@
import '@/executor/__test-utils__/mock-dependencies'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { BlockType } from '@/executor/constants'
import { ConditionBlockHandler } from '@/executor/handlers/condition/condition-handler'
import type { BlockState, ExecutionContext } from '@/executor/types'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn(() => ({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
})),
}))
vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: vi.fn(() => 'test-request-id'),
}))
vi.mock('@/lib/execution/isolated-vm', () => ({
executeInIsolatedVM: vi.fn(),
}))
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
const mockExecuteInIsolatedVM = executeInIsolatedVM as ReturnType<typeof vi.fn>
function simulateIsolatedVMExecution(
code: string,
contextVariables: Record<string, unknown>
): { result: unknown; stdout: string; error?: { message: string; name: string } } {
try {
const fn = new Function(...Object.keys(contextVariables), code)
const result = fn(...Object.values(contextVariables))
return { result, stdout: '' }
} catch (error: any) {
return {
result: null,
stdout: '',
error: { message: error.message, name: error.name || 'Error' },
}
}
}
describe('ConditionBlockHandler', () => {
let handler: ConditionBlockHandler
let mockBlock: SerializedBlock
@@ -18,7 +54,6 @@ describe('ConditionBlockHandler', () => {
let mockPathTracker: any
beforeEach(() => {
// Define blocks first
mockSourceBlock = {
id: 'source-block-1',
metadata: { id: 'source', name: 'Source Block' },
@@ -33,7 +68,7 @@ describe('ConditionBlockHandler', () => {
metadata: { id: BlockType.CONDITION, name: 'Test Condition' },
position: { x: 50, y: 50 },
config: { tool: BlockType.CONDITION, params: {} },
inputs: { conditions: 'json' }, // Corrected based on previous step
inputs: { conditions: 'json' },
outputs: {},
enabled: true,
}
@@ -56,7 +91,6 @@ describe('ConditionBlockHandler', () => {
enabled: true,
}
// Then define workflow using the block objects
mockWorkflow = {
blocks: [mockSourceBlock, mockBlock, mockTargetBlock1, mockTargetBlock2],
connections: [
@@ -84,7 +118,6 @@ describe('ConditionBlockHandler', () => {
handler = new ConditionBlockHandler(mockPathTracker, mockResolver)
// Define mock context *after* workflow and blocks are set up
mockContext = {
workflowId: 'test-workflow-id',
blockStates: new Map<string, BlockState>([
@@ -99,7 +132,7 @@ describe('ConditionBlockHandler', () => {
]),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {}, // Now set the context's env vars
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopExecutions: new Map(),
executedBlocks: new Set([mockSourceBlock.id]),
@@ -108,11 +141,11 @@ describe('ConditionBlockHandler', () => {
completedLoops: new Set(),
}
// Reset mocks using vi
vi.clearAllMocks()
// Default mock implementations - Removed as it's in the shared mock now
// mockResolver.resolveBlockReferences.mockImplementation((value) => value)
mockExecuteInIsolatedVM.mockImplementation(async ({ code, contextVariables }) => {
return simulateIsolatedVMExecution(code, contextVariables)
})
})
it('should handle condition blocks', () => {
@@ -141,7 +174,6 @@ describe('ConditionBlockHandler', () => {
selectedOption: 'cond1',
}
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('context.value > 5')
mockResolver.resolveBlockReferences.mockReturnValue('context.value > 5')
mockResolver.resolveEnvVariables.mockReturnValue('context.value > 5')
@@ -182,7 +214,6 @@ describe('ConditionBlockHandler', () => {
selectedOption: 'else1',
}
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('context.value < 0')
mockResolver.resolveBlockReferences.mockReturnValue('context.value < 0')
mockResolver.resolveEnvVariables.mockReturnValue('context.value < 0')
@@ -207,7 +238,7 @@ describe('ConditionBlockHandler', () => {
const inputs = { conditions: '{ "invalid json ' }
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Invalid conditions format: Unterminated string.*/
/^Invalid conditions format:/
)
})
@@ -218,7 +249,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('{{source-block-1.value}} > 5')
mockResolver.resolveBlockReferences.mockReturnValue('10 > 5')
mockResolver.resolveEnvVariables.mockReturnValue('10 > 5')
@@ -245,7 +275,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline for variable resolution
mockResolver.resolveVariableReferences.mockReturnValue('"john" !== null')
mockResolver.resolveBlockReferences.mockReturnValue('"john" !== null')
mockResolver.resolveEnvVariables.mockReturnValue('"john" !== null')
@@ -272,7 +301,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline for env variable resolution
mockResolver.resolveVariableReferences.mockReturnValue('{{POOP}} === "hi"')
mockResolver.resolveBlockReferences.mockReturnValue('{{POOP}} === "hi"')
mockResolver.resolveEnvVariables.mockReturnValue('"hi" === "hi"')
@@ -300,7 +328,6 @@ describe('ConditionBlockHandler', () => {
const inputs = { conditions: JSON.stringify(conditions) }
const resolutionError = new Error('Could not resolve reference: invalid-ref')
// Mock the pipeline to throw at the variable resolution stage
mockResolver.resolveVariableReferences.mockImplementation(() => {
throw resolutionError
})
@@ -317,7 +344,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue(
'context.nonExistentProperty.doSomething()'
)
@@ -325,7 +351,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveEnvVariables.mockReturnValue('context.nonExistentProperty.doSomething()')
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Evaluation error in condition "if": Evaluation error in condition: Cannot read properties of undefined \(reading 'doSomething'\)\. \(Resolved: context\.nonExistentProperty\.doSomething\(\)\)$/
/Evaluation error in condition "if".*doSomething/
)
})
@@ -333,7 +359,6 @@ describe('ConditionBlockHandler', () => {
const conditions = [{ id: 'cond1', title: 'if', value: 'true' }]
const inputs = { conditions: JSON.stringify(conditions) }
// Create a new context with empty blockStates instead of trying to delete from readonly map
const contextWithoutSource = {
...mockContext,
blockStates: new Map<string, BlockState>(),
@@ -355,7 +380,6 @@ describe('ConditionBlockHandler', () => {
mockContext.workflow!.blocks = [mockSourceBlock, mockBlock, mockTargetBlock2]
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('true')
mockResolver.resolveBlockReferences.mockReturnValue('true')
mockResolver.resolveEnvVariables.mockReturnValue('true')
@@ -381,7 +405,6 @@ describe('ConditionBlockHandler', () => {
},
]
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences
.mockReturnValueOnce('false')
.mockReturnValueOnce('context.value === 99')
@@ -394,12 +417,10 @@ describe('ConditionBlockHandler', () => {
const result = await handler.execute(mockContext, mockBlock, inputs)
// Should return success with no path selected (branch ends gracefully)
expect((result as any).conditionResult).toBe(false)
expect((result as any).selectedPath).toBeNull()
expect((result as any).selectedConditionId).toBeNull()
expect((result as any).selectedOption).toBeNull()
// Decision should not be set when no condition matches
expect(mockContext.decisions.condition.has(mockBlock.id)).toBe(false)
})
@@ -410,7 +431,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('context.item === "apple"')
mockResolver.resolveBlockReferences.mockReturnValue('context.item === "apple"')
mockResolver.resolveEnvVariables.mockReturnValue('context.item === "apple"')

View File

@@ -1,3 +1,5 @@
import { generateRequestId } from '@/lib/core/utils/request'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockOutput } from '@/blocks/types'
import { BlockType, CONDITION, DEFAULTS, EDGE } from '@/executor/constants'
@@ -6,6 +8,8 @@ import type { SerializedBlock } from '@/serializer/types'
const logger = createLogger('ConditionBlockHandler')
const CONDITION_TIMEOUT_MS = 5000
/**
* Evaluates a single condition expression with variable/block reference resolution
* Returns true if condition is met, false otherwise
@@ -35,11 +39,32 @@ export async function evaluateConditionExpression(
}
try {
const conditionMet = new Function(
'context',
`with(context) { return ${resolvedConditionValue} }`
)(evalContext)
return Boolean(conditionMet)
const requestId = generateRequestId()
const code = `return Boolean(${resolvedConditionValue})`
const result = await executeInIsolatedVM({
code,
params: {},
envVars: {},
contextVariables: { context: evalContext },
timeoutMs: CONDITION_TIMEOUT_MS,
requestId,
})
if (result.error) {
logger.error(`Failed to evaluate condition: ${result.error.message}`, {
originalCondition: conditionExpression,
resolvedCondition: resolvedConditionValue,
evalContext,
error: result.error,
})
throw new Error(
`Evaluation error in condition: ${result.error.message}. (Resolved: ${resolvedConditionValue})`
)
}
return Boolean(result.result)
} catch (evalError: any) {
logger.error(`Failed to evaluate condition: ${evalError.message}`, {
originalCondition: conditionExpression,
@@ -87,7 +112,6 @@ export class ConditionBlockHandler implements BlockHandler {
block
)
// Handle case where no condition matched and no else exists - branch ends gracefully
if (!selectedConnection || !selectedCondition) {
return {
...((sourceOutput as any) || {}),
@@ -206,14 +230,12 @@ export class ConditionBlockHandler implements BlockHandler {
if (elseConnection) {
return { selectedConnection: elseConnection, selectedCondition: elseCondition }
}
// Else exists but has no connection - treat as no match, branch ends
logger.info(`No condition matched and else has no connection - branch ending`, {
blockId: block.id,
})
return { selectedConnection: null, selectedCondition: null }
}
// No condition matched and no else exists - branch ends gracefully
logger.info(`No condition matched and no else block - branch ending`, { blockId: block.id })
return { selectedConnection: null, selectedCondition: null }
}

View File

@@ -1,3 +1,5 @@
import { generateRequestId } from '@/lib/core/utils/request'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { createLogger } from '@/lib/logs/console/logger'
import { buildLoopIndexCondition, DEFAULTS, EDGE } from '@/executor/constants'
import type { DAG } from '@/executor/dag/builder'
@@ -17,6 +19,8 @@ import type { SerializedLoop } from '@/serializer/types'
const logger = createLogger('LoopOrchestrator')
const LOOP_CONDITION_TIMEOUT_MS = 5000
export type LoopRoute = typeof EDGE.LOOP_CONTINUE | typeof EDGE.LOOP_EXIT
export interface LoopContinuationResult {
@@ -112,7 +116,10 @@ export class LoopOrchestrator {
scope.currentIterationOutputs.set(baseId, output)
}
evaluateLoopContinuation(ctx: ExecutionContext, loopId: string): LoopContinuationResult {
async evaluateLoopContinuation(
ctx: ExecutionContext,
loopId: string
): Promise<LoopContinuationResult> {
const scope = ctx.loopExecutions?.get(loopId)
if (!scope) {
logger.error('Loop scope not found during continuation evaluation', { loopId })
@@ -123,7 +130,6 @@ export class LoopOrchestrator {
}
}
// Check for cancellation
if (ctx.isCancelled) {
logger.info('Loop execution cancelled', { loopId, iteration: scope.iteration })
return this.createExitResult(ctx, loopId, scope)
@@ -140,7 +146,7 @@ export class LoopOrchestrator {
scope.currentIterationOutputs.clear()
if (!this.evaluateCondition(ctx, scope, scope.iteration + 1)) {
if (!(await this.evaluateCondition(ctx, scope, scope.iteration + 1))) {
return this.createExitResult(ctx, loopId, scope)
}
@@ -173,7 +179,11 @@ export class LoopOrchestrator {
}
}
private evaluateCondition(ctx: ExecutionContext, scope: LoopScope, iteration?: number): boolean {
private async evaluateCondition(
ctx: ExecutionContext,
scope: LoopScope,
iteration?: number
): Promise<boolean> {
if (!scope.condition) {
logger.warn('No condition defined for loop')
return false
@@ -184,7 +194,7 @@ export class LoopOrchestrator {
scope.iteration = iteration
}
const result = this.evaluateWhileCondition(ctx, scope.condition, scope)
const result = await this.evaluateWhileCondition(ctx, scope.condition, scope)
if (iteration !== undefined) {
scope.iteration = currentIteration
@@ -223,7 +233,6 @@ export class LoopOrchestrator {
const loopNodes = loopConfig.nodes
const allLoopNodeIds = new Set([sentinelStartId, sentinelEndId, ...loopNodes])
// Clear deactivated edges for loop nodes so error/success edges can be re-evaluated
if (this.edgeManager) {
this.edgeManager.clearDeactivatedEdgesForNodes(allLoopNodeIds)
}
@@ -263,7 +272,7 @@ export class LoopOrchestrator {
*
* @returns true if the loop should execute, false if it should be skipped
*/
evaluateInitialCondition(ctx: ExecutionContext, loopId: string): boolean {
async evaluateInitialCondition(ctx: ExecutionContext, loopId: string): Promise<boolean> {
const scope = ctx.loopExecutions?.get(loopId)
if (!scope) {
logger.warn('Loop scope not found for initial condition evaluation', { loopId })
@@ -300,7 +309,7 @@ export class LoopOrchestrator {
return false
}
const result = this.evaluateWhileCondition(ctx, scope.condition, scope)
const result = await this.evaluateWhileCondition(ctx, scope.condition, scope)
logger.info('While loop initial condition evaluation', {
loopId,
condition: scope.condition,
@@ -327,11 +336,11 @@ export class LoopOrchestrator {
return undefined
}
private evaluateWhileCondition(
private async evaluateWhileCondition(
ctx: ExecutionContext,
condition: string,
scope: LoopScope
): boolean {
): Promise<boolean> {
if (!condition) {
return false
}
@@ -343,7 +352,6 @@ export class LoopOrchestrator {
workflowVariables: ctx.workflowVariables,
})
// Use generic utility for smart variable reference replacement
const evaluatedCondition = replaceValidReferences(condition, (match) => {
const resolved = this.resolver.resolveSingleReference(ctx, '', match, scope)
logger.info('Resolved variable reference in loop condition', {
@@ -352,11 +360,9 @@ export class LoopOrchestrator {
resolvedType: typeof resolved,
})
if (resolved !== undefined) {
// For booleans and numbers, return as-is (no quotes)
if (typeof resolved === 'boolean' || typeof resolved === 'number') {
return String(resolved)
}
// For strings that represent booleans, return without quotes
if (typeof resolved === 'string') {
const lower = resolved.toLowerCase().trim()
if (lower === 'true' || lower === 'false') {
@@ -364,13 +370,33 @@ export class LoopOrchestrator {
}
return `"${resolved}"`
}
// For other types, stringify them
return JSON.stringify(resolved)
}
return match
})
const result = Boolean(new Function(`return (${evaluatedCondition})`)())
const requestId = generateRequestId()
const code = `return Boolean(${evaluatedCondition})`
const vmResult = await executeInIsolatedVM({
code,
params: {},
envVars: {},
contextVariables: {},
timeoutMs: LOOP_CONDITION_TIMEOUT_MS,
requestId,
})
if (vmResult.error) {
logger.error('Failed to evaluate loop condition', {
condition,
evaluatedCondition,
error: vmResult.error,
})
return false
}
const result = Boolean(vmResult.result)
logger.info('Loop condition evaluation result', {
originalCondition: condition,

View File

@@ -68,7 +68,7 @@ export class NodeExecutionOrchestrator {
}
if (node.metadata.isSentinel) {
const output = this.handleSentinel(ctx, node)
const output = await this.handleSentinel(ctx, node)
const isFinalOutput = node.outgoingEdges.size === 0
return {
nodeId,
@@ -86,14 +86,17 @@ export class NodeExecutionOrchestrator {
}
}
private handleSentinel(ctx: ExecutionContext, node: DAGNode): NormalizedBlockOutput {
private async handleSentinel(
ctx: ExecutionContext,
node: DAGNode
): Promise<NormalizedBlockOutput> {
const sentinelType = node.metadata.sentinelType
const loopId = node.metadata.loopId
switch (sentinelType) {
case 'start': {
if (loopId) {
const shouldExecute = this.loopOrchestrator.evaluateInitialCondition(ctx, loopId)
const shouldExecute = await this.loopOrchestrator.evaluateInitialCondition(ctx, loopId)
if (!shouldExecute) {
logger.info('While loop initial condition false, skipping loop body', { loopId })
return {
@@ -112,7 +115,7 @@ export class NodeExecutionOrchestrator {
return { shouldExit: true, selectedRoute: EDGE.LOOP_EXIT }
}
const continuationResult = this.loopOrchestrator.evaluateLoopContinuation(ctx, loopId)
const continuationResult = await this.loopOrchestrator.evaluateLoopContinuation(ctx, loopId)
if (continuationResult.shouldContinue) {
return {

View File

@@ -1,20 +1,17 @@
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import { createLogger } from '@/lib/logs/console/logger'
import type { McpServerStatusConfig } from '@/lib/mcp/types'
const logger = createLogger('McpQueries')
/**
* Query key factories for MCP-related queries
*/
export type { McpServerStatusConfig }
export const mcpKeys = {
all: ['mcp'] as const,
servers: (workspaceId: string) => [...mcpKeys.all, 'servers', workspaceId] as const,
tools: (workspaceId: string) => [...mcpKeys.all, 'tools', workspaceId] as const,
}
/**
* MCP Server Types
*/
export interface McpServer {
id: string
workspaceId: string
@@ -25,9 +22,11 @@ export interface McpServer {
headers?: Record<string, string>
enabled: boolean
connectionStatus?: 'connected' | 'disconnected' | 'error'
lastError?: string
lastError?: string | null
statusConfig?: McpServerStatusConfig
toolCount?: number
lastToolsRefresh?: string
lastConnected?: string
createdAt: string
updatedAt: string
deletedAt?: string
@@ -86,8 +85,13 @@ export function useMcpServers(workspaceId: string) {
/**
* Fetch MCP tools for a workspace
*/
async function fetchMcpTools(workspaceId: string): Promise<McpTool[]> {
const response = await fetch(`/api/mcp/tools/discover?workspaceId=${workspaceId}`)
async function fetchMcpTools(workspaceId: string, forceRefresh = false): Promise<McpTool[]> {
const params = new URLSearchParams({ workspaceId })
if (forceRefresh) {
params.set('refresh', 'true')
}
const response = await fetch(`/api/mcp/tools/discover?${params.toString()}`)
// Treat 404 as "no tools available" - return empty array
if (response.status === 404) {
@@ -159,14 +163,43 @@ export function useCreateMcpServer() {
return {
...serverData,
id: serverId,
connectionStatus: 'disconnected' as const,
connectionStatus: 'connected' as const,
serverId,
updated: wasUpdated,
}
},
onSuccess: (_data, variables) => {
onSuccess: async (data, variables) => {
const freshTools = await fetchMcpTools(variables.workspaceId, true)
const previousServers = queryClient.getQueryData<McpServer[]>(
mcpKeys.servers(variables.workspaceId)
)
if (previousServers) {
const newServer: McpServer = {
id: data.id,
workspaceId: variables.workspaceId,
name: variables.config.name,
transport: variables.config.transport,
url: variables.config.url,
timeout: variables.config.timeout || 30000,
headers: variables.config.headers,
enabled: variables.config.enabled,
connectionStatus: 'connected',
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
}
const serverExists = previousServers.some((s) => s.id === data.id)
queryClient.setQueryData<McpServer[]>(
mcpKeys.servers(variables.workspaceId),
serverExists
? previousServers.map((s) => (s.id === data.id ? { ...s, ...newServer } : s))
: [...previousServers, newServer]
)
}
queryClient.setQueryData(mcpKeys.tools(variables.workspaceId), freshTools)
queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) })
queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) })
},
})
}
@@ -213,7 +246,7 @@ export function useDeleteMcpServer() {
interface UpdateMcpServerParams {
workspaceId: string
serverId: string
updates: Partial<McpServerConfig>
updates: Partial<McpServerConfig & { enabled?: boolean }>
}
export function useUpdateMcpServer() {
@@ -221,8 +254,20 @@ export function useUpdateMcpServer() {
return useMutation({
mutationFn: async ({ workspaceId, serverId, updates }: UpdateMcpServerParams) => {
const response = await fetch(`/api/mcp/servers/${serverId}?workspaceId=${workspaceId}`, {
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(updates),
})
const data = await response.json()
if (!response.ok) {
throw new Error(data.error || 'Failed to update MCP server')
}
logger.info(`Updated MCP server: ${serverId} in workspace: ${workspaceId}`)
return { serverId, updates }
return data.data?.server
},
onMutate: async ({ workspaceId, serverId, updates }) => {
await queryClient.cancelQueries({ queryKey: mcpKeys.servers(workspaceId) })
@@ -249,6 +294,7 @@ export function useUpdateMcpServer() {
},
onSettled: (_data, _error, variables) => {
queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) })
queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) })
},
})
}
@@ -292,9 +338,10 @@ export function useRefreshMcpServer() {
logger.info(`Refreshed MCP server: ${serverId}`)
return data.data
},
onSuccess: (_data, variables) => {
onSuccess: async (_data, variables) => {
queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) })
queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) })
const freshTools = await fetchMcpTools(variables.workspaceId, true)
queryClient.setQueryData(mcpKeys.tools(variables.workspaceId), freshTools)
},
})
}
@@ -349,3 +396,42 @@ export function useTestMcpServer() {
},
})
}
/**
* Stored MCP tool from workflow state
*/
export interface StoredMcpTool {
workflowId: string
workflowName: string
serverId: string
serverUrl?: string
toolName: string
schema?: Record<string, unknown>
}
/**
* Fetch stored MCP tools from all workflows in the workspace
*/
async function fetchStoredMcpTools(workspaceId: string): Promise<StoredMcpTool[]> {
const response = await fetch(`/api/mcp/tools/stored?workspaceId=${workspaceId}`)
if (!response.ok) {
const data = await response.json().catch(() => ({}))
throw new Error(data.error || 'Failed to fetch stored MCP tools')
}
const data = await response.json()
return data.data?.tools || []
}
/**
* Hook to fetch stored MCP tools from all workflows
*/
export function useStoredMcpTools(workspaceId: string) {
return useQuery({
queryKey: [...mcpKeys.all, workspaceId, 'stored'],
queryFn: () => fetchStoredMcpTools(workspaceId),
enabled: !!workspaceId,
staleTime: 60 * 1000, // 1 minute - workflows don't change frequently
})
}

View File

@@ -142,6 +142,13 @@ export function useConnectOAuthService() {
return { success: true }
}
// ServiceNow requires a custom OAuth flow with instance URL input
if (providerId === 'servicenow') {
const returnUrl = encodeURIComponent(callbackURL)
window.location.href = `/api/auth/servicenow/authorize?returnUrl=${returnUrl}`
return { success: true }
}
await client.oauth2.link({
providerId,
callbackURL,

View File

@@ -1540,7 +1540,7 @@ export function useCollaborativeWorkflow() {
const config = {
id: nodeId,
nodes: childNodes,
iterations: Math.max(1, Math.min(100, count)), // Clamp between 1-100 for loops
iterations: Math.max(1, Math.min(1000, count)), // Clamp between 1-1000 for loops
loopType: currentLoopType,
forEachItems: currentCollection,
}

View File

@@ -34,14 +34,19 @@ export function useMcpServerTest() {
const [isTestingConnection, setIsTestingConnection] = useState(false)
const testConnection = useCallback(
async (config: McpServerTestConfig): Promise<McpServerTestResult> => {
async (
config: McpServerTestConfig,
options?: { silent?: boolean }
): Promise<McpServerTestResult> => {
const { silent = false } = options || {}
if (!config.name || !config.transport || !config.workspaceId) {
const result: McpServerTestResult = {
success: false,
message: 'Missing required configuration',
error: 'Please provide server name, transport method, and workspace ID',
}
setTestResult(result)
if (!silent) setTestResult(result)
return result
}
@@ -51,12 +56,14 @@ export function useMcpServerTest() {
message: 'Missing server URL',
error: 'Please provide a server URL for HTTP/SSE transport',
}
setTestResult(result)
if (!silent) setTestResult(result)
return result
}
setIsTestingConnection(true)
setTestResult(null)
if (!silent) {
setIsTestingConnection(true)
setTestResult(null)
}
try {
const cleanConfig = {
@@ -88,14 +95,14 @@ export function useMcpServerTest() {
error: result.data.error,
warnings: result.data.warnings,
}
setTestResult(testResult)
if (!silent) setTestResult(testResult)
logger.error('MCP server test failed:', result.data.error)
return testResult
}
throw new Error(result.error || 'Connection test failed')
}
setTestResult(result.data || result)
if (!silent) setTestResult(result.data || result)
logger.info(`MCP server test ${result.data?.success ? 'passed' : 'failed'}:`, config.name)
return result.data || result
} catch (error) {
@@ -105,11 +112,11 @@ export function useMcpServerTest() {
message: 'Connection failed',
error: errorMessage,
}
setTestResult(result)
if (!silent) setTestResult(result)
logger.error('MCP server test failed:', errorMessage)
return result
} finally {
setIsTestingConnection(false)
if (!silent) setIsTestingConnection(false)
}
},
[]

View File

@@ -148,7 +148,14 @@ export type CopilotProviderConfig =
endpoint?: string
}
| {
provider: Exclude<ProviderId, 'azure-openai'>
provider: 'vertex'
model: string
apiKey?: string
vertexProject?: string
vertexLocation?: string
}
| {
provider: Exclude<ProviderId, 'azure-openai' | 'vertex'>
model?: string
apiKey?: string
}

View File

@@ -98,6 +98,10 @@ export const env = createEnv({
OCR_AZURE_MODEL_NAME: z.string().optional(), // Azure Mistral OCR model name for document processing
OCR_AZURE_API_KEY: z.string().min(1).optional(), // Azure Mistral OCR API key
// Vertex AI Configuration
VERTEX_PROJECT: z.string().optional(), // Google Cloud project ID for Vertex AI
VERTEX_LOCATION: z.string().optional(), // Google Cloud location/region for Vertex AI (defaults to us-central1)
// Monitoring & Analytics
TELEMETRY_ENDPOINT: z.string().url().optional(), // Custom telemetry/analytics endpoint
COST_MULTIPLIER: z.number().optional(), // Multiplier for cost calculations
@@ -233,6 +237,8 @@ export const env = createEnv({
WORDPRESS_CLIENT_SECRET: z.string().optional(), // WordPress.com OAuth client secret
SPOTIFY_CLIENT_ID: z.string().optional(), // Spotify OAuth client ID
SPOTIFY_CLIENT_SECRET: z.string().optional(), // Spotify OAuth client secret
SERVICENOW_CLIENT_ID: z.string().optional(), // ServiceNow OAuth client ID
SERVICENOW_CLIENT_SECRET: z.string().optional(), // ServiceNow OAuth client secret
// E2B Remote Code Execution
E2B_ENABLED: z.string().optional(), // Enable E2B remote code execution

View File

@@ -204,12 +204,17 @@ async function ensureWorker(): Promise<void> {
import('node:child_process').then(({ spawn }) => {
worker = spawn('node', [workerPath], {
stdio: ['ignore', 'pipe', 'inherit', 'ipc'],
stdio: ['ignore', 'pipe', 'pipe', 'ipc'],
serialization: 'json',
})
worker.on('message', handleWorkerMessage)
let stderrData = ''
worker.stderr?.on('data', (data: Buffer) => {
stderrData += data.toString()
})
const startTimeout = setTimeout(() => {
worker?.kill()
worker = null
@@ -232,20 +237,42 @@ async function ensureWorker(): Promise<void> {
}
worker.on('message', readyHandler)
worker.on('exit', () => {
worker.on('exit', (code) => {
if (workerIdleTimeout) {
clearTimeout(workerIdleTimeout)
workerIdleTimeout = null
}
const wasStartupFailure = !workerReady && workerReadyPromise
worker = null
workerReady = false
workerReadyPromise = null
let errorMessage = 'Worker process exited unexpectedly'
if (stderrData.includes('isolated_vm') || stderrData.includes('MODULE_NOT_FOUND')) {
errorMessage =
'Code execution requires the isolated-vm native module which failed to load. ' +
'This usually means the module needs to be rebuilt for your Node.js version. ' +
'Please run: cd node_modules/isolated-vm && npm rebuild'
logger.error('isolated-vm module failed to load', { stderr: stderrData })
} else if (stderrData) {
errorMessage = `Worker process failed: ${stderrData.slice(0, 500)}`
logger.error('Worker process failed', { stderr: stderrData })
}
if (wasStartupFailure) {
clearTimeout(startTimeout)
reject(new Error(errorMessage))
return
}
for (const [id, pending] of pendingExecutions) {
clearTimeout(pending.timeout)
pending.resolve({
result: null,
stdout: '',
error: { message: 'Worker process exited unexpectedly', name: 'WorkerError' },
error: { message: errorMessage, name: 'WorkerError' },
})
pendingExecutions.delete(id)
}

View File

@@ -23,6 +23,8 @@ const FILTER_FIELDS = {
workflow: 'string',
trigger: 'string',
execution: 'string',
executionId: 'string',
workflowId: 'string',
id: 'string',
cost: 'number',
duration: 'number',
@@ -215,11 +217,13 @@ export function queryToApiParams(parsedQuery: ParsedQuery): Record<string, strin
break
case 'cost':
params[`cost_${filter.operator}_${filter.value}`] = 'true'
params.costOperator = filter.operator
params.costValue = String(filter.value)
break
case 'duration':
params[`duration_${filter.operator}_${filter.value}`] = 'true'
params.durationOperator = filter.operator
params.durationValue = String(filter.value)
break
}
}

View File

@@ -38,8 +38,6 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [
{ value: 'info', label: 'Info', description: 'Info logs only' },
],
},
// Note: Trigger options are now dynamically populated from active logs
// Core types are included by default, integration triggers are added from actual log data
{
key: 'cost',
label: 'Cost',
@@ -82,14 +80,6 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [
},
]
const CORE_TRIGGERS: TriggerData[] = [
{ value: 'api', label: 'API', color: '#3b82f6' },
{ value: 'manual', label: 'Manual', color: '#6b7280' },
{ value: 'webhook', label: 'Webhook', color: '#f97316' },
{ value: 'chat', label: 'Chat', color: '#8b5cf6' },
{ value: 'schedule', label: 'Schedule', color: '#10b981' },
]
export class SearchSuggestions {
private workflowsData: WorkflowData[]
private foldersData: FolderData[]
@@ -116,10 +106,10 @@ export class SearchSuggestions {
}
/**
* Get all triggers (core + integrations)
* Get all triggers from registry data
*/
private getAllTriggers(): TriggerData[] {
return [...CORE_TRIGGERS, ...this.triggersData]
return this.triggersData
}
/**
@@ -128,24 +118,20 @@ export class SearchSuggestions {
getSuggestions(input: string): SuggestionGroup | null {
const trimmed = input.trim()
// Empty input → show all filter keys
if (!trimmed) {
return this.getFilterKeysList()
}
// Input ends with ':' → show values for that key
if (trimmed.endsWith(':')) {
const key = trimmed.slice(0, -1)
return this.getFilterValues(key)
}
// Input contains ':' → filter value context
if (trimmed.includes(':')) {
const [key, partial] = trimmed.split(':')
return this.getFilterValues(key, partial)
}
// Plain text → multi-section results
return this.getMultiSectionResults(trimmed)
}
@@ -155,7 +141,6 @@ export class SearchSuggestions {
private getFilterKeysList(): SuggestionGroup {
const suggestions: Suggestion[] = []
// Add all filter keys
for (const filter of FILTER_DEFINITIONS) {
suggestions.push({
id: `filter-key-${filter.key}`,
@@ -166,7 +151,6 @@ export class SearchSuggestions {
})
}
// Add trigger key (always available - core types + integrations)
suggestions.push({
id: 'filter-key-trigger',
value: 'trigger:',
@@ -175,7 +159,6 @@ export class SearchSuggestions {
category: 'filters',
})
// Add workflow and folder keys
if (this.workflowsData.length > 0) {
suggestions.push({
id: 'filter-key-workflow',
@@ -249,12 +232,10 @@ export class SearchSuggestions {
: null
}
// Trigger filter values (core + integrations)
if (key === 'trigger') {
const allTriggers = this.getAllTriggers()
const suggestions = allTriggers
.filter((t) => !partial || t.label.toLowerCase().includes(partial.toLowerCase()))
.slice(0, 15) // Show more since we have core + integrations
.map((t) => ({
id: `filter-value-trigger-${t.value}`,
value: `trigger:${t.value}`,
@@ -273,11 +254,9 @@ export class SearchSuggestions {
: null
}
// Workflow filter values
if (key === 'workflow') {
const suggestions = this.workflowsData
.filter((w) => !partial || w.name.toLowerCase().includes(partial.toLowerCase()))
.slice(0, 8)
.map((w) => ({
id: `filter-value-workflow-${w.id}`,
value: `workflow:"${w.name}"`,
@@ -295,11 +274,9 @@ export class SearchSuggestions {
: null
}
// Folder filter values
if (key === 'folder') {
const suggestions = this.foldersData
.filter((f) => !partial || f.name.toLowerCase().includes(partial.toLowerCase()))
.slice(0, 8)
.map((f) => ({
id: `filter-value-folder-${f.id}`,
value: `folder:"${f.name}"`,
@@ -326,7 +303,6 @@ export class SearchSuggestions {
const sections: Array<{ title: string; suggestions: Suggestion[] }> = []
const allSuggestions: Suggestion[] = []
// Show all results option
const showAllSuggestion: Suggestion = {
id: 'show-all',
value: query,
@@ -335,7 +311,6 @@ export class SearchSuggestions {
}
allSuggestions.push(showAllSuggestion)
// Match filter values (e.g., "info" → "Status: Info")
const matchingFilterValues = this.getMatchingFilterValues(query)
if (matchingFilterValues.length > 0) {
sections.push({
@@ -345,7 +320,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingFilterValues)
}
// Match triggers
const matchingTriggers = this.getMatchingTriggers(query)
if (matchingTriggers.length > 0) {
sections.push({
@@ -355,7 +329,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingTriggers)
}
// Match workflows
const matchingWorkflows = this.getMatchingWorkflows(query)
if (matchingWorkflows.length > 0) {
sections.push({
@@ -365,7 +338,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingWorkflows)
}
// Match folders
const matchingFolders = this.getMatchingFolders(query)
if (matchingFolders.length > 0) {
sections.push({
@@ -375,7 +347,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingFolders)
}
// Add filter keys if no specific matches
if (
matchingFilterValues.length === 0 &&
matchingTriggers.length === 0 &&

View File

@@ -108,7 +108,7 @@ export class McpClient {
this.connectionStatus.lastError = errorMessage
this.isConnected = false
logger.error(`Failed to connect to MCP server ${this.config.name}:`, error)
throw new McpConnectionError(errorMessage, this.config.id)
throw new McpConnectionError(errorMessage, this.config.name)
}
}
@@ -141,7 +141,7 @@ export class McpClient {
*/
async listTools(): Promise<McpTool[]> {
if (!this.isConnected) {
throw new McpConnectionError('Not connected to server', this.config.id)
throw new McpConnectionError('Not connected to server', this.config.name)
}
try {
@@ -170,7 +170,7 @@ export class McpClient {
*/
async callTool(toolCall: McpToolCall): Promise<McpToolResult> {
if (!this.isConnected) {
throw new McpConnectionError('Not connected to server', this.config.id)
throw new McpConnectionError('Not connected to server', this.config.name)
}
const consentRequest: McpConsentRequest = {
@@ -217,7 +217,7 @@ export class McpClient {
*/
async ping(): Promise<{ _meta?: Record<string, any> }> {
if (!this.isConnected) {
throw new McpConnectionError('Not connected to server', this.config.id)
throw new McpConnectionError('Not connected to server', this.config.name)
}
try {

View File

@@ -10,8 +10,14 @@ import { generateRequestId } from '@/lib/core/utils/request'
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
import { McpClient } from '@/lib/mcp/client'
import {
createMcpCacheAdapter,
getMcpCacheType,
type McpCacheStorageAdapter,
} from '@/lib/mcp/storage'
import type {
McpServerConfig,
McpServerStatusConfig,
McpServerSummary,
McpTool,
McpToolCall,
@@ -22,154 +28,21 @@ import { MCP_CONSTANTS } from '@/lib/mcp/utils'
const logger = createLogger('McpService')
interface ToolCache {
tools: McpTool[]
expiry: Date
lastAccessed: Date
}
interface CacheStats {
totalEntries: number
activeEntries: number
expiredEntries: number
maxCacheSize: number
cacheHitRate: number
memoryUsage: {
approximateBytes: number
entriesEvicted: number
}
}
class McpService {
private toolCache = new Map<string, ToolCache>()
private readonly cacheTimeout = MCP_CONSTANTS.CACHE_TIMEOUT // 30 seconds
private readonly maxCacheSize = MCP_CONSTANTS.MAX_CACHE_SIZE // 1000
private cleanupInterval: NodeJS.Timeout | null = null
private cacheHits = 0
private cacheMisses = 0
private entriesEvicted = 0
private cacheAdapter: McpCacheStorageAdapter
private readonly cacheTimeout = MCP_CONSTANTS.CACHE_TIMEOUT // 5 minutes
constructor() {
this.startPeriodicCleanup()
}
/**
* Start periodic cleanup of expired cache entries
*/
private startPeriodicCleanup(): void {
this.cleanupInterval = setInterval(
() => {
this.cleanupExpiredEntries()
},
5 * 60 * 1000
)
}
/**
* Stop periodic cleanup
*/
private stopPeriodicCleanup(): void {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval)
this.cleanupInterval = null
}
}
/**
* Cleanup expired cache entries
*/
private cleanupExpiredEntries(): void {
const now = new Date()
const expiredKeys: string[] = []
this.toolCache.forEach((cache, key) => {
if (cache.expiry <= now) {
expiredKeys.push(key)
}
})
expiredKeys.forEach((key) => this.toolCache.delete(key))
if (expiredKeys.length > 0) {
logger.debug(`Cleaned up ${expiredKeys.length} expired cache entries`)
}
}
/**
* Evict least recently used entries when cache exceeds max size
*/
private evictLRUEntries(): void {
if (this.toolCache.size <= this.maxCacheSize) {
return
}
const entries: { key: string; cache: ToolCache }[] = []
this.toolCache.forEach((cache, key) => {
entries.push({ key, cache })
})
entries.sort((a, b) => a.cache.lastAccessed.getTime() - b.cache.lastAccessed.getTime())
const entriesToRemove = this.toolCache.size - this.maxCacheSize + 1
for (let i = 0; i < entriesToRemove && i < entries.length; i++) {
this.toolCache.delete(entries[i].key)
this.entriesEvicted++
}
logger.debug(`Evicted ${entriesToRemove} LRU cache entries to maintain size limit`)
}
/**
* Get cache entry and update last accessed time
*/
private getCacheEntry(key: string): ToolCache | undefined {
const entry = this.toolCache.get(key)
if (entry) {
entry.lastAccessed = new Date()
this.cacheHits++
return entry
}
this.cacheMisses++
return undefined
}
/**
* Set cache entry with LRU eviction
*/
private setCacheEntry(key: string, tools: McpTool[]): void {
const now = new Date()
const cache: ToolCache = {
tools,
expiry: new Date(now.getTime() + this.cacheTimeout),
lastAccessed: now,
}
this.toolCache.set(key, cache)
this.evictLRUEntries()
}
/**
* Calculate approximate memory usage of cache
*/
private calculateMemoryUsage(): number {
let totalBytes = 0
this.toolCache.forEach((cache, key) => {
totalBytes += key.length * 2 // UTF-16 encoding
totalBytes += JSON.stringify(cache.tools).length * 2
totalBytes += 64
})
return totalBytes
this.cacheAdapter = createMcpCacheAdapter()
logger.info(`MCP Service initialized with ${getMcpCacheType()} cache`)
}
/**
* Dispose of the service and cleanup resources
*/
dispose(): void {
this.stopPeriodicCleanup()
this.toolCache.clear()
logger.info('MCP Service disposed and cleanup stopped')
this.cacheAdapter.dispose()
logger.info('MCP Service disposed')
}
/**
@@ -385,6 +258,81 @@ class McpService {
)
}
/**
* Update server connection status after discovery attempt
*/
private async updateServerStatus(
serverId: string,
workspaceId: string,
success: boolean,
error?: string,
toolCount?: number
): Promise<void> {
try {
const [currentServer] = await db
.select({ statusConfig: mcpServers.statusConfig })
.from(mcpServers)
.where(
and(
eq(mcpServers.id, serverId),
eq(mcpServers.workspaceId, workspaceId),
isNull(mcpServers.deletedAt)
)
)
.limit(1)
const currentConfig: McpServerStatusConfig =
(currentServer?.statusConfig as McpServerStatusConfig | null) ?? {
consecutiveFailures: 0,
lastSuccessfulDiscovery: null,
}
const now = new Date()
if (success) {
await db
.update(mcpServers)
.set({
connectionStatus: 'connected',
lastConnected: now,
lastError: null,
toolCount: toolCount ?? 0,
lastToolsRefresh: now,
statusConfig: {
consecutiveFailures: 0,
lastSuccessfulDiscovery: now.toISOString(),
},
updatedAt: now,
})
.where(eq(mcpServers.id, serverId))
} else {
const newFailures = currentConfig.consecutiveFailures + 1
const isErrorState = newFailures >= MCP_CONSTANTS.MAX_CONSECUTIVE_FAILURES
await db
.update(mcpServers)
.set({
connectionStatus: isErrorState ? 'error' : 'disconnected',
lastError: error || 'Unknown error',
statusConfig: {
consecutiveFailures: newFailures,
lastSuccessfulDiscovery: currentConfig.lastSuccessfulDiscovery,
},
updatedAt: now,
})
.where(eq(mcpServers.id, serverId))
if (isErrorState) {
logger.warn(
`Server ${serverId} marked as error after ${newFailures} consecutive failures`
)
}
}
} catch (err) {
logger.error(`Failed to update server status for ${serverId}:`, err)
}
}
/**
* Discover tools from all workspace servers
*/
@@ -399,10 +347,14 @@ class McpService {
try {
if (!forceRefresh) {
const cached = this.getCacheEntry(cacheKey)
if (cached && cached.expiry > new Date()) {
logger.debug(`[${requestId}] Using cached tools for user ${userId}`)
return cached.tools
try {
const cached = await this.cacheAdapter.get(cacheKey)
if (cached) {
logger.debug(`[${requestId}] Using cached tools for user ${userId}`)
return cached.tools
}
} catch (error) {
logger.warn(`[${requestId}] Cache read failed, proceeding with discovery:`, error)
}
}
@@ -425,7 +377,7 @@ class McpService {
logger.debug(
`[${requestId}] Discovered ${tools.length} tools from server ${config.name}`
)
return tools
return { serverId: config.id, tools }
} finally {
await client.disconnect()
}
@@ -433,20 +385,40 @@ class McpService {
)
let failedCount = 0
const statusUpdates: Promise<void>[] = []
results.forEach((result, index) => {
const server = servers[index]
if (result.status === 'fulfilled') {
allTools.push(...result.value)
allTools.push(...result.value.tools)
statusUpdates.push(
this.updateServerStatus(
server.id!,
workspaceId,
true,
undefined,
result.value.tools.length
)
)
} else {
failedCount++
logger.warn(
`[${requestId}] Failed to discover tools from server ${servers[index].name}:`,
result.reason
)
const errorMessage =
result.reason instanceof Error ? result.reason.message : 'Unknown error'
logger.warn(`[${requestId}] Failed to discover tools from server ${server.name}:`)
statusUpdates.push(this.updateServerStatus(server.id!, workspaceId, false, errorMessage))
}
})
Promise.allSettled(statusUpdates).catch((err) => {
logger.error(`[${requestId}] Error updating server statuses:`, err)
})
if (failedCount === 0) {
this.setCacheEntry(cacheKey, allTools)
try {
await this.cacheAdapter.set(cacheKey, allTools, this.cacheTimeout)
} catch (error) {
logger.warn(`[${requestId}] Cache write failed:`, error)
}
} else {
logger.warn(
`[${requestId}] Skipping cache due to ${failedCount} failed server(s) - will retry on next request`
@@ -565,44 +537,18 @@ class McpService {
/**
* Clear tool cache for a workspace or all workspaces
*/
clearCache(workspaceId?: string): void {
if (workspaceId) {
const workspaceCacheKey = `workspace:${workspaceId}`
this.toolCache.delete(workspaceCacheKey)
logger.debug(`Cleared MCP tool cache for workspace ${workspaceId}`)
} else {
this.toolCache.clear()
this.cacheHits = 0
this.cacheMisses = 0
this.entriesEvicted = 0
logger.debug('Cleared all MCP tool cache and reset statistics')
}
}
/**
* Get comprehensive cache statistics
*/
getCacheStats(): CacheStats {
const entries: { key: string; cache: ToolCache }[] = []
this.toolCache.forEach((cache, key) => {
entries.push({ key, cache })
})
const now = new Date()
const activeEntries = entries.filter(({ cache }) => cache.expiry > now)
const totalRequests = this.cacheHits + this.cacheMisses
const hitRate = totalRequests > 0 ? this.cacheHits / totalRequests : 0
return {
totalEntries: entries.length,
activeEntries: activeEntries.length,
expiredEntries: entries.length - activeEntries.length,
maxCacheSize: this.maxCacheSize,
cacheHitRate: Math.round(hitRate * 100) / 100,
memoryUsage: {
approximateBytes: this.calculateMemoryUsage(),
entriesEvicted: this.entriesEvicted,
},
async clearCache(workspaceId?: string): Promise<void> {
try {
if (workspaceId) {
const workspaceCacheKey = `workspace:${workspaceId}`
await this.cacheAdapter.delete(workspaceCacheKey)
logger.debug(`Cleared MCP tool cache for workspace ${workspaceId}`)
} else {
await this.cacheAdapter.clear()
logger.debug('Cleared all MCP tool cache')
}
} catch (error) {
logger.warn('Failed to clear cache:', error)
}
}
}

View File

@@ -0,0 +1,14 @@
import type { McpTool } from '@/lib/mcp/types'
export interface McpCacheEntry {
tools: McpTool[]
expiry: number // Unix timestamp ms
}
export interface McpCacheStorageAdapter {
get(key: string): Promise<McpCacheEntry | null>
set(key: string, tools: McpTool[], ttlMs: number): Promise<void>
delete(key: string): Promise<void>
clear(): Promise<void>
dispose(): void
}

View File

@@ -0,0 +1,53 @@
import { getRedisClient } from '@/lib/core/config/redis'
import { createLogger } from '@/lib/logs/console/logger'
import type { McpCacheStorageAdapter } from './adapter'
import { MemoryMcpCache } from './memory-cache'
import { RedisMcpCache } from './redis-cache'
const logger = createLogger('McpCacheFactory')
let cachedAdapter: McpCacheStorageAdapter | null = null
/**
* Create MCP cache storage adapter.
* Uses Redis if available, falls back to in-memory cache.
*
* Unlike rate-limiting (which fails if Redis is configured but unavailable),
* MCP caching gracefully falls back to memory since it's an optimization.
*/
export function createMcpCacheAdapter(): McpCacheStorageAdapter {
if (cachedAdapter) {
return cachedAdapter
}
const redis = getRedisClient()
if (redis) {
logger.info('MCP cache: Using Redis')
cachedAdapter = new RedisMcpCache(redis)
} else {
logger.info('MCP cache: Using in-memory (Redis not configured)')
cachedAdapter = new MemoryMcpCache()
}
return cachedAdapter
}
/**
* Get the current adapter type for logging/debugging
*/
export function getMcpCacheType(): 'redis' | 'memory' {
const redis = getRedisClient()
return redis ? 'redis' : 'memory'
}
/**
* Reset the cached adapter.
* Only use for testing purposes.
*/
export function resetMcpCacheAdapter(): void {
if (cachedAdapter) {
cachedAdapter.dispose()
cachedAdapter = null
}
}

View File

@@ -0,0 +1,4 @@
export type { McpCacheEntry, McpCacheStorageAdapter } from './adapter'
export { createMcpCacheAdapter, getMcpCacheType, resetMcpCacheAdapter } from './factory'
export { MemoryMcpCache } from './memory-cache'
export { RedisMcpCache } from './redis-cache'

View File

@@ -0,0 +1,103 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { McpTool } from '@/lib/mcp/types'
import { MCP_CONSTANTS } from '@/lib/mcp/utils'
import type { McpCacheEntry, McpCacheStorageAdapter } from './adapter'
const logger = createLogger('McpMemoryCache')
export class MemoryMcpCache implements McpCacheStorageAdapter {
private cache = new Map<string, McpCacheEntry>()
private readonly maxCacheSize = MCP_CONSTANTS.MAX_CACHE_SIZE
private cleanupInterval: NodeJS.Timeout | null = null
constructor() {
this.startPeriodicCleanup()
}
private startPeriodicCleanup(): void {
this.cleanupInterval = setInterval(
() => {
this.cleanupExpiredEntries()
},
5 * 60 * 1000 // 5 minutes
)
// Don't keep Node process alive just for cache cleanup
this.cleanupInterval.unref()
}
private cleanupExpiredEntries(): void {
const now = Date.now()
const expiredKeys: string[] = []
this.cache.forEach((entry, key) => {
if (entry.expiry <= now) {
expiredKeys.push(key)
}
})
expiredKeys.forEach((key) => this.cache.delete(key))
if (expiredKeys.length > 0) {
logger.debug(`Cleaned up ${expiredKeys.length} expired cache entries`)
}
}
private evictIfNeeded(): void {
if (this.cache.size <= this.maxCacheSize) {
return
}
// Evict oldest entries (by insertion order - Map maintains order)
const entriesToRemove = this.cache.size - this.maxCacheSize
const keys = Array.from(this.cache.keys()).slice(0, entriesToRemove)
keys.forEach((key) => this.cache.delete(key))
logger.debug(`Evicted ${entriesToRemove} cache entries`)
}
async get(key: string): Promise<McpCacheEntry | null> {
const entry = this.cache.get(key)
const now = Date.now()
if (!entry || entry.expiry <= now) {
if (entry) {
this.cache.delete(key)
}
return null
}
// Return copy to prevent caller from mutating cache
return {
tools: entry.tools,
expiry: entry.expiry,
}
}
async set(key: string, tools: McpTool[], ttlMs: number): Promise<void> {
const now = Date.now()
const entry: McpCacheEntry = {
tools,
expiry: now + ttlMs,
}
this.cache.set(key, entry)
this.evictIfNeeded()
}
async delete(key: string): Promise<void> {
this.cache.delete(key)
}
async clear(): Promise<void> {
this.cache.clear()
}
dispose(): void {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval)
this.cleanupInterval = null
}
this.cache.clear()
logger.info('Memory cache disposed')
}
}

View File

@@ -0,0 +1,96 @@
import type Redis from 'ioredis'
import { createLogger } from '@/lib/logs/console/logger'
import type { McpTool } from '@/lib/mcp/types'
import type { McpCacheEntry, McpCacheStorageAdapter } from './adapter'
const logger = createLogger('McpRedisCache')
const REDIS_KEY_PREFIX = 'mcp:tools:'
export class RedisMcpCache implements McpCacheStorageAdapter {
constructor(private redis: Redis) {}
private getKey(key: string): string {
return `${REDIS_KEY_PREFIX}${key}`
}
async get(key: string): Promise<McpCacheEntry | null> {
try {
const redisKey = this.getKey(key)
const data = await this.redis.get(redisKey)
if (!data) {
return null
}
try {
return JSON.parse(data) as McpCacheEntry
} catch {
// Corrupted data - delete and treat as miss
logger.warn('Corrupted cache entry, deleting:', redisKey)
await this.redis.del(redisKey)
return null
}
} catch (error) {
logger.error('Redis cache get error:', error)
throw error
}
}
async set(key: string, tools: McpTool[], ttlMs: number): Promise<void> {
try {
const now = Date.now()
const entry: McpCacheEntry = {
tools,
expiry: now + ttlMs,
}
await this.redis.set(this.getKey(key), JSON.stringify(entry), 'PX', ttlMs)
} catch (error) {
logger.error('Redis cache set error:', error)
throw error
}
}
async delete(key: string): Promise<void> {
try {
await this.redis.del(this.getKey(key))
} catch (error) {
logger.error('Redis cache delete error:', error)
throw error
}
}
async clear(): Promise<void> {
try {
let cursor = '0'
let deletedCount = 0
do {
const [nextCursor, keys] = await this.redis.scan(
cursor,
'MATCH',
`${REDIS_KEY_PREFIX}*`,
'COUNT',
100
)
cursor = nextCursor
if (keys.length > 0) {
await this.redis.del(...keys)
deletedCount += keys.length
}
} while (cursor !== '0')
logger.debug(`Cleared ${deletedCount} MCP cache entries from Redis`)
} catch (error) {
logger.error('Redis cache clear error:', error)
throw error
}
}
dispose(): void {
// Redis client is managed externally, nothing to dispose
logger.info('Redis cache adapter disposed')
}
}

View File

@@ -0,0 +1,129 @@
/**
* MCP Tool Validation
*
* Shared logic for detecting issues with MCP tools across the platform.
* Used by both tool-input.tsx (workflow context) and MCP modal (workspace context).
*/
import isEqual from 'lodash/isEqual'
import omit from 'lodash/omit'
export type McpToolIssueType =
| 'server_not_found'
| 'server_error'
| 'tool_not_found'
| 'schema_changed'
| 'url_changed'
export interface McpToolIssue {
type: McpToolIssueType
message: string
}
export interface StoredMcpTool {
serverId: string
serverUrl?: string
toolName: string
schema?: Record<string, unknown>
}
export interface ServerState {
id: string
url?: string
connectionStatus?: 'connected' | 'disconnected' | 'error'
lastError?: string
}
export interface DiscoveredTool {
serverId: string
name: string
inputSchema?: Record<string, unknown>
}
/**
* Compares two schemas to detect changes.
* Uses lodash isEqual for deep, key-order-independent comparison.
* Ignores description field which may be backfilled.
*/
export function hasSchemaChanged(
storedSchema: Record<string, unknown> | undefined,
serverSchema: Record<string, unknown> | undefined
): boolean {
if (!storedSchema || !serverSchema) return false
const storedWithoutDesc = omit(storedSchema, 'description')
const serverWithoutDesc = omit(serverSchema, 'description')
return !isEqual(storedWithoutDesc, serverWithoutDesc)
}
/**
* Detects issues with a stored MCP tool by comparing against current server/tool state.
*/
export function getMcpToolIssue(
storedTool: StoredMcpTool,
servers: ServerState[],
discoveredTools: DiscoveredTool[]
): McpToolIssue | null {
const { serverId, serverUrl, toolName, schema } = storedTool
// Check server exists
const server = servers.find((s) => s.id === serverId)
if (!server) {
return { type: 'server_not_found', message: 'Server not found' }
}
// Check server connection status
if (server.connectionStatus === 'error') {
return { type: 'server_error', message: server.lastError || 'Server connection error' }
}
if (server.connectionStatus !== 'connected') {
return { type: 'server_error', message: 'Server not connected' }
}
// Check server URL changed (if we have stored URL)
if (serverUrl && server.url && serverUrl !== server.url) {
return { type: 'url_changed', message: 'Server URL changed - tools may be different' }
}
// Check tool exists on server
const serverTool = discoveredTools.find((t) => t.serverId === serverId && t.name === toolName)
if (!serverTool) {
return { type: 'tool_not_found', message: 'Tool not found on server' }
}
// Check schema changed
if (schema && serverTool.inputSchema) {
if (hasSchemaChanged(schema, serverTool.inputSchema)) {
return { type: 'schema_changed', message: 'Tool schema changed' }
}
}
return null
}
/**
* Returns a user-friendly label for the issue badge
*/
export function getIssueBadgeLabel(issue: McpToolIssue): string {
switch (issue.type) {
case 'schema_changed':
return 'stale'
case 'url_changed':
return 'stale'
default:
return 'unavailable'
}
}
/**
* Checks if an issue means the tool cannot be used (vs just being stale)
*/
export function isToolUnavailable(issue: McpToolIssue | null): boolean {
if (!issue) return false
return (
issue.type === 'server_not_found' ||
issue.type === 'server_error' ||
issue.type === 'tool_not_found'
)
}

View File

@@ -6,6 +6,11 @@
// Modern MCP uses Streamable HTTP which handles both HTTP POST and SSE responses
export type McpTransport = 'streamable-http'
export interface McpServerStatusConfig {
consecutiveFailures: number
lastSuccessfulDiscovery: string | null
}
export interface McpServerConfig {
id: string
name: string
@@ -20,6 +25,7 @@ export interface McpServerConfig {
timeout?: number
retries?: number
enabled?: boolean
statusConfig?: McpServerStatusConfig
createdAt?: string
updatedAt?: string
}
@@ -113,8 +119,8 @@ export class McpError extends Error {
}
export class McpConnectionError extends McpError {
constructor(message: string, serverId: string) {
super(`MCP Connection Error for server ${serverId}: ${message}`)
constructor(message: string, serverName: string) {
super(`Failed to connect to "${serverName}": ${message}`)
this.name = 'McpConnectionError'
}
}

View File

@@ -6,10 +6,11 @@ import type { McpApiResponse } from '@/lib/mcp/types'
*/
export const MCP_CONSTANTS = {
EXECUTION_TIMEOUT: 60000,
CACHE_TIMEOUT: 30 * 1000,
CACHE_TIMEOUT: 5 * 60 * 1000, // 5 minutes
DEFAULT_RETRIES: 3,
DEFAULT_CONNECTION_TIMEOUT: 30000,
MAX_CACHE_SIZE: 1000,
MAX_CONSECUTIVE_FAILURES: 3,
} as const
/**

View File

@@ -5,7 +5,7 @@ import {
workspaceNotificationDelivery,
workspaceNotificationSubscription,
} from '@sim/db/schema'
import { and, eq, gte, sql } from 'drizzle-orm'
import { and, eq, gte, inArray, sql } from 'drizzle-orm'
import { v4 as uuidv4 } from 'uuid'
import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags'
import { createLogger } from '@/lib/logs/console/logger'
@@ -45,6 +45,8 @@ async function checkWorkflowInactivity(
}
const windowStart = new Date(Date.now() - (alertConfig.inactivityHours || 24) * 60 * 60 * 1000)
const triggerFilter = subscription.triggerFilter
const levelFilter = subscription.levelFilter
const recentLogs = await db
.select({ id: workflowExecutionLogs.id })
@@ -52,7 +54,9 @@ async function checkWorkflowInactivity(
.where(
and(
eq(workflowExecutionLogs.workflowId, workflowId),
gte(workflowExecutionLogs.createdAt, windowStart)
gte(workflowExecutionLogs.createdAt, windowStart),
inArray(workflowExecutionLogs.trigger, triggerFilter),
inArray(workflowExecutionLogs.level, levelFilter)
)
)
.limit(1)

View File

@@ -29,6 +29,7 @@ import {
PipedriveIcon,
RedditIcon,
SalesforceIcon,
ServiceNowIcon,
ShopifyIcon,
SlackIcon,
SpotifyIcon,
@@ -69,6 +70,7 @@ export type OAuthProvider =
| 'salesforce'
| 'linkedin'
| 'shopify'
| 'servicenow'
| 'zoom'
| 'wordpress'
| 'spotify'
@@ -111,6 +113,7 @@ export type OAuthService =
| 'salesforce'
| 'linkedin'
| 'shopify'
| 'servicenow'
| 'zoom'
| 'wordpress'
| 'spotify'
@@ -618,6 +621,23 @@ export const OAUTH_PROVIDERS: Record<string, OAuthProviderConfig> = {
},
defaultService: 'shopify',
},
servicenow: {
id: 'servicenow',
name: 'ServiceNow',
icon: (props) => ServiceNowIcon(props),
services: {
servicenow: {
id: 'servicenow',
name: 'ServiceNow',
description: 'Manage incidents, tasks, and records in your ServiceNow instance.',
providerId: 'servicenow',
icon: (props) => ServiceNowIcon(props),
baseProviderIcon: (props) => ServiceNowIcon(props),
scopes: ['useraccount'],
},
},
defaultService: 'servicenow',
},
slack: {
id: 'slack',
name: 'Slack',
@@ -1487,6 +1507,21 @@ function getProviderAuthConfig(provider: string): ProviderAuthConfig {
supportsRefreshTokenRotation: false,
}
}
case 'servicenow': {
// ServiceNow OAuth - token endpoint is instance-specific
// This is a placeholder; actual token endpoint is set during authorization
const { clientId, clientSecret } = getCredentials(
env.SERVICENOW_CLIENT_ID,
env.SERVICENOW_CLIENT_SECRET
)
return {
tokenEndpoint: '', // Instance-specific, set during authorization
clientId,
clientSecret,
useBasicAuth: false,
supportsRefreshTokenRotation: true,
}
}
case 'zoom': {
const { clientId, clientSecret } = getCredentials(env.ZOOM_CLIENT_ID, env.ZOOM_CLIENT_SECRET)
return {
@@ -1565,11 +1600,13 @@ function buildAuthRequest(
* This is a server-side utility function to refresh OAuth tokens
* @param providerId The provider ID (e.g., 'google-drive')
* @param refreshToken The refresh token to use
* @param instanceUrl Optional instance URL for providers with instance-specific endpoints (e.g., ServiceNow)
* @returns Object containing the new access token and expiration time in seconds, or null if refresh failed
*/
export async function refreshOAuthToken(
providerId: string,
refreshToken: string
refreshToken: string,
instanceUrl?: string
): Promise<{ accessToken: string; expiresIn: number; refreshToken: string } | null> {
try {
// Get the provider from the providerId (e.g., 'google-drive' -> 'google')
@@ -1578,11 +1615,21 @@ export async function refreshOAuthToken(
// Get provider configuration
const config = getProviderAuthConfig(provider)
// For ServiceNow, the token endpoint is instance-specific
let tokenEndpoint = config.tokenEndpoint
if (provider === 'servicenow') {
if (!instanceUrl) {
logger.error('ServiceNow token refresh requires instance URL')
return null
}
tokenEndpoint = `${instanceUrl.replace(/\/$/, '')}/oauth_token.do`
}
// Build authentication request
const { headers, bodyParams } = buildAuthRequest(config, refreshToken)
// Refresh the token
const response = await fetch(config.tokenEndpoint, {
const response = await fetch(tokenEndpoint, {
method: 'POST',
headers,
body: new URLSearchParams(bodyParams).toString(),

View File

@@ -24,6 +24,10 @@ export const CONTAINER_DIMENSIONS = {
MIN_WIDTH: 400,
MIN_HEIGHT: 200,
HEADER_HEIGHT: 50,
LEFT_PADDING: 16,
RIGHT_PADDING: 80,
TOP_PADDING: 16,
BOTTOM_PADDING: 16,
} as const
/**

View File

@@ -8,7 +8,7 @@
"node": ">=20.0.0"
},
"scripts": {
"dev": "next dev --port 3000",
"dev": "next dev --port 7321",
"dev:webpack": "next dev --webpack",
"dev:sockets": "bun run socket-server/index.ts",
"dev:full": "concurrently -n \"App,Realtime\" -c \"cyan,magenta\" \"bun run dev\" \"bun run dev:sockets\"",

View File

@@ -1,35 +1,24 @@
import Anthropic from '@anthropic-ai/sdk'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import {
checkForForcedToolUsage,
createReadableStreamFromAnthropicStream,
generateToolUseId,
} from '@/providers/anthropic/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
ProviderRequest,
ProviderResponse,
TimeSegment,
} from '@/providers/types'
import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils'
import { executeTool } from '@/tools'
import { getProviderDefaultModel, getProviderModels } from '../models'
import type { ProviderConfig, ProviderRequest, ProviderResponse, TimeSegment } from '../types'
import { prepareToolExecution, prepareToolsWithUsageControl, trackForcedToolUsage } from '../utils'
const logger = createLogger('AnthropicProvider')
/**
* Helper to wrap Anthropic streaming into a browser-friendly ReadableStream
*/
function createReadableStreamFromAnthropicStream(
anthropicStream: AsyncIterable<any>
): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const event of anthropicStream) {
if (event.type === 'content_block_delta' && event.delta?.text) {
controller.enqueue(new TextEncoder().encode(event.delta.text))
}
}
controller.close()
} catch (err) {
controller.error(err)
}
},
})
}
export const anthropicProvider: ProviderConfig = {
id: 'anthropic',
name: 'Anthropic',
@@ -47,11 +36,6 @@ export const anthropicProvider: ProviderConfig = {
const anthropic = new Anthropic({ apiKey: request.apiKey })
// Helper function to generate a simple unique ID for tool uses
const generateToolUseId = (toolName: string) => {
return `${toolName}-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`
}
// Transform messages to Anthropic format
const messages: any[] = []
@@ -373,7 +357,6 @@ ${fieldDescriptions}
const toolResults = []
const currentMessages = [...messages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track if a forced tool has been used
let hasUsedForcedTool = false
@@ -393,47 +376,20 @@ ${fieldDescriptions}
},
]
// Helper function to check for forced tool usage in Anthropic responses
const checkForForcedToolUsage = (response: any, toolChoice: any) => {
if (
typeof toolChoice === 'object' &&
toolChoice !== null &&
Array.isArray(response.content)
) {
const toolUses = response.content.filter((item: any) => item.type === 'tool_use')
if (toolUses.length > 0) {
// Convert Anthropic tool_use format to a format trackForcedToolUsage can understand
const adaptedToolCalls = toolUses.map((tool: any) => ({
name: tool.name,
}))
// Convert Anthropic tool_choice format to match OpenAI format for tracking
const adaptedToolChoice =
toolChoice.type === 'tool' ? { function: { name: toolChoice.name } } : toolChoice
const result = trackForcedToolUsage(
adaptedToolCalls,
adaptedToolChoice,
logger,
'anthropic',
forcedTools,
usedForcedTools
)
// Make the behavior consistent with the initial check
hasUsedForcedTool = result.hasUsedForcedTool
usedForcedTools = result.usedForcedTools
return result
}
}
return null
// Check if a forced tool was used in the first response
const firstCheckResult = checkForForcedToolUsage(
currentResponse,
originalToolChoice,
forcedTools,
usedForcedTools
)
if (firstCheckResult) {
hasUsedForcedTool = firstCheckResult.hasUsedForcedTool
usedForcedTools = firstCheckResult.usedForcedTools
}
// Check if a forced tool was used in the first response
checkForForcedToolUsage(currentResponse, originalToolChoice)
try {
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use')
if (!toolUses || toolUses.length === 0) {
@@ -576,7 +532,16 @@ ${fieldDescriptions}
currentResponse = await anthropic.messages.create(nextPayload)
// Check if any forced tools were used in this response
checkForForcedToolUsage(currentResponse, nextPayload.tool_choice)
const nextCheckResult = checkForForcedToolUsage(
currentResponse,
nextPayload.tool_choice,
forcedTools,
usedForcedTools
)
if (nextCheckResult) {
hasUsedForcedTool = nextCheckResult.hasUsedForcedTool
usedForcedTools = nextCheckResult.usedForcedTools
}
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
@@ -727,7 +692,6 @@ ${fieldDescriptions}
const toolResults = []
const currentMessages = [...messages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track if a forced tool has been used
let hasUsedForcedTool = false
@@ -747,47 +711,20 @@ ${fieldDescriptions}
},
]
// Helper function to check for forced tool usage in Anthropic responses
const checkForForcedToolUsage = (response: any, toolChoice: any) => {
if (
typeof toolChoice === 'object' &&
toolChoice !== null &&
Array.isArray(response.content)
) {
const toolUses = response.content.filter((item: any) => item.type === 'tool_use')
if (toolUses.length > 0) {
// Convert Anthropic tool_use format to a format trackForcedToolUsage can understand
const adaptedToolCalls = toolUses.map((tool: any) => ({
name: tool.name,
}))
// Convert Anthropic tool_choice format to match OpenAI format for tracking
const adaptedToolChoice =
toolChoice.type === 'tool' ? { function: { name: toolChoice.name } } : toolChoice
const result = trackForcedToolUsage(
adaptedToolCalls,
adaptedToolChoice,
logger,
'anthropic',
forcedTools,
usedForcedTools
)
// Make the behavior consistent with the initial check
hasUsedForcedTool = result.hasUsedForcedTool
usedForcedTools = result.usedForcedTools
return result
}
}
return null
// Check if a forced tool was used in the first response
const firstCheckResult = checkForForcedToolUsage(
currentResponse,
originalToolChoice,
forcedTools,
usedForcedTools
)
if (firstCheckResult) {
hasUsedForcedTool = firstCheckResult.hasUsedForcedTool
usedForcedTools = firstCheckResult.usedForcedTools
}
// Check if a forced tool was used in the first response
checkForForcedToolUsage(currentResponse, originalToolChoice)
try {
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use')
if (!toolUses || toolUses.length === 0) {
@@ -926,7 +863,16 @@ ${fieldDescriptions}
currentResponse = await anthropic.messages.create(nextPayload)
// Check if any forced tools were used in this response
checkForForcedToolUsage(currentResponse, nextPayload.tool_choice)
const nextCheckResult = checkForForcedToolUsage(
currentResponse,
nextPayload.tool_choice,
forcedTools,
usedForcedTools
)
if (nextCheckResult) {
hasUsedForcedTool = nextCheckResult.hasUsedForcedTool
usedForcedTools = nextCheckResult.usedForcedTools
}
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime

View File

@@ -0,0 +1,70 @@
import { createLogger } from '@/lib/logs/console/logger'
import { trackForcedToolUsage } from '@/providers/utils'
const logger = createLogger('AnthropicUtils')
/**
* Helper to wrap Anthropic streaming into a browser-friendly ReadableStream
*/
export function createReadableStreamFromAnthropicStream(
anthropicStream: AsyncIterable<any>
): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const event of anthropicStream) {
if (event.type === 'content_block_delta' && event.delta?.text) {
controller.enqueue(new TextEncoder().encode(event.delta.text))
}
}
controller.close()
} catch (err) {
controller.error(err)
}
},
})
}
/**
* Helper function to generate a simple unique ID for tool uses
*/
export function generateToolUseId(toolName: string): string {
return `${toolName}-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`
}
/**
* Helper function to check for forced tool usage in Anthropic responses
*/
export function checkForForcedToolUsage(
response: any,
toolChoice: any,
forcedTools: string[],
usedForcedTools: string[]
): { hasUsedForcedTool: boolean; usedForcedTools: string[] } | null {
if (typeof toolChoice === 'object' && toolChoice !== null && Array.isArray(response.content)) {
const toolUses = response.content.filter((item: any) => item.type === 'tool_use')
if (toolUses.length > 0) {
// Convert Anthropic tool_use format to a format trackForcedToolUsage can understand
const adaptedToolCalls = toolUses.map((tool: any) => ({
name: tool.name,
}))
// Convert Anthropic tool_choice format to match OpenAI format for tracking
const adaptedToolChoice =
toolChoice.type === 'tool' ? { function: { name: toolChoice.name } } : toolChoice
const result = trackForcedToolUsage(
adaptedToolCalls,
adaptedToolChoice,
logger,
'anthropic',
forcedTools,
usedForcedTools
)
return result
}
}
return null
}

View File

@@ -2,6 +2,11 @@ import { AzureOpenAI } from 'openai'
import { env } from '@/lib/core/config/env'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import {
checkForForcedToolUsage,
createReadableStreamFromAzureOpenAIStream,
} from '@/providers/azure-openai/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
@@ -9,55 +14,11 @@ import type {
ProviderResponse,
TimeSegment,
} from '@/providers/types'
import {
prepareToolExecution,
prepareToolsWithUsageControl,
trackForcedToolUsage,
} from '@/providers/utils'
import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils'
import { executeTool } from '@/tools'
const logger = createLogger('AzureOpenAIProvider')
/**
* Helper function to convert an Azure OpenAI stream to a standard ReadableStream
* and collect completion metrics
*/
function createReadableStreamFromAzureOpenAIStream(
azureOpenAIStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of azureOpenAIStream) {
// Check for usage data in the final chunk
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
// Once stream is complete, call the completion callback with the final content and usage
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
/**
* Azure OpenAI provider configuration
*/
@@ -303,26 +264,6 @@ export const azureOpenAIProvider: ProviderConfig = {
const forcedTools = preparedTools?.forcedTools || []
let usedForcedTools: string[] = []
// Helper function to check for forced tool usage in responses
const checkForForcedToolUsage = (
response: any,
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }
) => {
if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) {
const toolCallsResponse = response.choices[0].message.tool_calls
const result = trackForcedToolUsage(
toolCallsResponse,
toolChoice,
logger,
'azure-openai',
forcedTools,
usedForcedTools
)
hasUsedForcedTool = result.hasUsedForcedTool
usedForcedTools = result.usedForcedTools
}
}
let currentResponse = await azureOpenAI.chat.completions.create(payload)
const firstResponseTime = Date.now() - initialCallTime
@@ -337,7 +278,6 @@ export const azureOpenAIProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track time spent in model vs tools
let modelTime = firstResponseTime
@@ -358,9 +298,17 @@ export const azureOpenAIProvider: ProviderConfig = {
]
// Check if a forced tool was used in the first response
checkForForcedToolUsage(currentResponse, originalToolChoice)
const firstCheckResult = checkForForcedToolUsage(
currentResponse,
originalToolChoice,
logger,
forcedTools,
usedForcedTools
)
hasUsedForcedTool = firstCheckResult.hasUsedForcedTool
usedForcedTools = firstCheckResult.usedForcedTools
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
@@ -368,7 +316,7 @@ export const azureOpenAIProvider: ProviderConfig = {
}
logger.info(
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
)
// Track time for tool calls in this batch
@@ -491,7 +439,15 @@ export const azureOpenAIProvider: ProviderConfig = {
currentResponse = await azureOpenAI.chat.completions.create(nextPayload)
// Check if any forced tools were used in this response
checkForForcedToolUsage(currentResponse, nextPayload.tool_choice)
const nextCheckResult = checkForForcedToolUsage(
currentResponse,
nextPayload.tool_choice,
logger,
forcedTools,
usedForcedTools
)
hasUsedForcedTool = nextCheckResult.hasUsedForcedTool
usedForcedTools = nextCheckResult.usedForcedTools
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime

View File

@@ -0,0 +1,70 @@
import type { Logger } from '@/lib/logs/console/logger'
import { trackForcedToolUsage } from '@/providers/utils'
/**
* Helper function to convert an Azure OpenAI stream to a standard ReadableStream
* and collect completion metrics
*/
export function createReadableStreamFromAzureOpenAIStream(
azureOpenAIStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of azureOpenAIStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
/**
* Helper function to check for forced tool usage in responses
*/
export function checkForForcedToolUsage(
response: any,
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any },
logger: Logger,
forcedTools: string[],
usedForcedTools: string[]
): { hasUsedForcedTool: boolean; usedForcedTools: string[] } {
let hasUsedForcedTool = false
let updatedUsedForcedTools = [...usedForcedTools]
if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) {
const toolCallsResponse = response.choices[0].message.tool_calls
const result = trackForcedToolUsage(
toolCallsResponse,
toolChoice,
logger,
'azure-openai',
forcedTools,
updatedUsedForcedTools
)
hasUsedForcedTool = result.hasUsedForcedTool
updatedUsedForcedTools = result.usedForcedTools
}
return { hasUsedForcedTool, usedForcedTools: updatedUsedForcedTools }
}

View File

@@ -1,6 +1,9 @@
import { Cerebras } from '@cerebras/cerebras_cloud_sdk'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import type { CerebrasResponse } from '@/providers/cerebras/types'
import { createReadableStreamFromCerebrasStream } from '@/providers/cerebras/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
@@ -14,35 +17,9 @@ import {
trackForcedToolUsage,
} from '@/providers/utils'
import { executeTool } from '@/tools'
import type { CerebrasResponse } from './types'
const logger = createLogger('CerebrasProvider')
/**
* Helper to convert a Cerebras streaming response (async iterable) into a ReadableStream.
* Enqueues only the model's text delta chunks as UTF-8 encoded bytes.
*/
function createReadableStreamFromCerebrasStream(
cerebrasStream: AsyncIterable<any>
): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of cerebrasStream) {
// Expecting delta content similar to OpenAI: chunk.choices[0]?.delta?.content
const content = chunk.choices?.[0]?.delta?.content || ''
if (content) {
controller.enqueue(new TextEncoder().encode(content))
}
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
export const cerebrasProvider: ProviderConfig = {
id: 'cerebras',
name: 'Cerebras',
@@ -223,7 +200,6 @@ export const cerebrasProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track time spent in model vs tools
let modelTime = firstResponseTime
@@ -246,7 +222,7 @@ export const cerebrasProvider: ProviderConfig = {
const toolCallSignatures = new Set()
try {
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls

View File

@@ -0,0 +1,23 @@
/**
* Helper to convert a Cerebras streaming response (async iterable) into a ReadableStream.
* Enqueues only the model's text delta chunks as UTF-8 encoded bytes.
*/
export function createReadableStreamFromCerebrasStream(
cerebrasStream: AsyncIterable<any>
): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of cerebrasStream) {
const content = chunk.choices?.[0]?.delta?.content || ''
if (content) {
controller.enqueue(new TextEncoder().encode(content))
}
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}

View File

@@ -1,6 +1,8 @@
import OpenAI from 'openai'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { createReadableStreamFromDeepseekStream } from '@/providers/deepseek/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
@@ -17,28 +19,6 @@ import { executeTool } from '@/tools'
const logger = createLogger('DeepseekProvider')
/**
* Helper function to convert a DeepSeek (OpenAI-compatible) stream to a ReadableStream
* of text chunks that can be consumed by the browser.
*/
function createReadableStreamFromDeepseekStream(deepseekStream: any): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of deepseekStream) {
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
controller.enqueue(new TextEncoder().encode(content))
}
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
export const deepseekProvider: ProviderConfig = {
id: 'deepseek',
name: 'Deepseek',
@@ -231,7 +211,6 @@ export const deepseekProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track if a forced tool has been used
let hasUsedForcedTool = false
@@ -270,7 +249,7 @@ export const deepseekProvider: ProviderConfig = {
}
try {
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {

View File

@@ -0,0 +1,21 @@
/**
* Helper function to convert a DeepSeek (OpenAI-compatible) stream to a ReadableStream
* of text chunks that can be consumed by the browser.
*/
export function createReadableStreamFromDeepseekStream(deepseekStream: any): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of deepseekStream) {
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
controller.enqueue(new TextEncoder().encode(content))
}
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}

View File

@@ -1,5 +1,12 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import {
cleanSchemaForGemini,
convertToGeminiFormat,
extractFunctionCall,
extractTextContent,
} from '@/providers/google/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
@@ -19,7 +26,13 @@ const logger = createLogger('GoogleProvider')
/**
* Creates a ReadableStream from Google's Gemini stream response
*/
function createReadableStreamFromGeminiStream(response: Response): ReadableStream<Uint8Array> {
function createReadableStreamFromGeminiStream(
response: Response,
onComplete?: (
content: string,
usage?: { promptTokenCount?: number; candidatesTokenCount?: number; totalTokenCount?: number }
) => void
): ReadableStream<Uint8Array> {
const reader = response.body?.getReader()
if (!reader) {
throw new Error('Failed to get reader from response body')
@@ -29,18 +42,24 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
async start(controller) {
try {
let buffer = ''
let fullContent = ''
let usageData: {
promptTokenCount?: number
candidatesTokenCount?: number
totalTokenCount?: number
} | null = null
while (true) {
const { done, value } = await reader.read()
if (done) {
// Try to parse any remaining buffer as complete JSON
if (buffer.trim()) {
// Processing final buffer
try {
const data = JSON.parse(buffer.trim())
if (data.usageMetadata) {
usageData = data.usageMetadata
}
const candidate = data.candidates?.[0]
if (candidate?.content?.parts) {
// Check if this is a function call
const functionCall = extractFunctionCall(candidate)
if (functionCall) {
logger.debug(
@@ -49,26 +68,27 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
functionName: functionCall.name,
}
)
// Function calls should not be streamed - end the stream early
if (onComplete) onComplete(fullContent, usageData || undefined)
controller.close()
return
}
const content = extractTextContent(candidate)
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
} catch (e) {
// Final buffer not valid JSON, checking if it contains JSON array
// Try parsing as JSON array if it starts with [
if (buffer.trim().startsWith('[')) {
try {
const dataArray = JSON.parse(buffer.trim())
if (Array.isArray(dataArray)) {
for (const item of dataArray) {
if (item.usageMetadata) {
usageData = item.usageMetadata
}
const candidate = item.candidates?.[0]
if (candidate?.content?.parts) {
// Check if this is a function call
const functionCall = extractFunctionCall(candidate)
if (functionCall) {
logger.debug(
@@ -77,11 +97,13 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
functionName: functionCall.name,
}
)
if (onComplete) onComplete(fullContent, usageData || undefined)
controller.close()
return
}
const content = extractTextContent(candidate)
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
@@ -93,6 +115,7 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
}
}
}
if (onComplete) onComplete(fullContent, usageData || undefined)
controller.close()
break
}
@@ -100,14 +123,11 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
const text = new TextDecoder().decode(value)
buffer += text
// Try to find complete JSON objects in buffer
// Look for patterns like: {...}\n{...} or just a single {...}
let searchIndex = 0
while (searchIndex < buffer.length) {
const openBrace = buffer.indexOf('{', searchIndex)
if (openBrace === -1) break
// Try to find the matching closing brace
let braceCount = 0
let inString = false
let escaped = false
@@ -138,28 +158,34 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
}
if (closeBrace !== -1) {
// Found a complete JSON object
const jsonStr = buffer.substring(openBrace, closeBrace + 1)
try {
const data = JSON.parse(jsonStr)
// JSON parsed successfully from stream
if (data.usageMetadata) {
usageData = data.usageMetadata
}
const candidate = data.candidates?.[0]
// Handle specific finish reasons
if (candidate?.finishReason === 'UNEXPECTED_TOOL_CALL') {
logger.warn('Gemini returned UNEXPECTED_TOOL_CALL in streaming mode', {
finishReason: candidate.finishReason,
hasContent: !!candidate?.content,
hasParts: !!candidate?.content?.parts,
})
// This indicates a configuration issue - tools might be improperly configured for streaming
continue
const textContent = extractTextContent(candidate)
if (textContent) {
fullContent += textContent
controller.enqueue(new TextEncoder().encode(textContent))
}
if (onComplete) onComplete(fullContent, usageData || undefined)
controller.close()
return
}
if (candidate?.content?.parts) {
// Check if this is a function call
const functionCall = extractFunctionCall(candidate)
if (functionCall) {
logger.debug(
@@ -168,13 +194,13 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
functionName: functionCall.name,
}
)
// Function calls should not be streamed - we need to end the stream
// and let the non-streaming tool execution flow handle this
if (onComplete) onComplete(fullContent, usageData || undefined)
controller.close()
return
}
const content = extractTextContent(candidate)
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
@@ -185,7 +211,6 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea
})
}
// Remove processed JSON from buffer and continue searching
buffer = buffer.substring(closeBrace + 1)
searchIndex = 0
} else {
@@ -232,45 +257,36 @@ export const googleProvider: ProviderConfig = {
streaming: !!request.stream,
})
// Start execution timer for the entire provider execution
const providerStartTime = Date.now()
const providerStartTimeISO = new Date(providerStartTime).toISOString()
try {
// Convert messages to Gemini format
const { contents, tools, systemInstruction } = convertToGeminiFormat(request)
const requestedModel = request.model || 'gemini-2.5-pro'
// Build request payload
const payload: any = {
contents,
generationConfig: {},
}
// Add temperature if specified
if (request.temperature !== undefined && request.temperature !== null) {
payload.generationConfig.temperature = request.temperature
}
// Add max tokens if specified
if (request.maxTokens !== undefined) {
payload.generationConfig.maxOutputTokens = request.maxTokens
}
// Add system instruction if provided
if (systemInstruction) {
payload.systemInstruction = systemInstruction
}
// Add structured output format if requested (but not when tools are present)
if (request.responseFormat && !tools?.length) {
const responseFormatSchema = request.responseFormat.schema || request.responseFormat
// Clean the schema using our helper function
const cleanSchema = cleanSchemaForGemini(responseFormatSchema)
// Use Gemini's native structured output approach
payload.generationConfig.responseMimeType = 'application/json'
payload.generationConfig.responseSchema = cleanSchema
@@ -284,7 +300,6 @@ export const googleProvider: ProviderConfig = {
)
}
// Handle tools and tool usage control
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
if (tools?.length) {
@@ -298,7 +313,6 @@ export const googleProvider: ProviderConfig = {
},
]
// Add Google-specific tool configuration
if (toolConfig) {
payload.toolConfig = toolConfig
}
@@ -313,14 +327,10 @@ export const googleProvider: ProviderConfig = {
}
}
// Make the API request
const initialCallTime = Date.now()
// Disable streaming for initial requests when tools are present to avoid function calls in streams
// Only enable streaming for the final response after tool execution
const shouldStream = request.stream && !tools?.length
// Use streamGenerateContent for streaming requests
const endpoint = shouldStream
? `https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:streamGenerateContent?key=${request.apiKey}`
: `https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:generateContent?key=${request.apiKey}`
@@ -352,16 +362,11 @@ export const googleProvider: ProviderConfig = {
const firstResponseTime = Date.now() - initialCallTime
// Handle streaming response
if (shouldStream) {
logger.info('Handling Google Gemini streaming response')
// Create a ReadableStream from the Google Gemini stream
const stream = createReadableStreamFromGeminiStream(response)
// Create an object that combines the stream with execution metadata
const streamingExecution: StreamingExecution = {
stream,
const streamingResult: StreamingExecution = {
stream: null as any,
execution: {
success: true,
output: {
@@ -389,7 +394,6 @@ export const googleProvider: ProviderConfig = {
duration: firstResponseTime,
},
],
// Cost will be calculated in logger
},
},
logs: [],
@@ -402,18 +406,49 @@ export const googleProvider: ProviderConfig = {
},
}
return streamingExecution
streamingResult.stream = createReadableStreamFromGeminiStream(
response,
(content, usage) => {
streamingResult.execution.output.content = content
const streamEndTime = Date.now()
const streamEndTimeISO = new Date(streamEndTime).toISOString()
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
streamEndTime
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
streamEndTime - providerStartTime
}
}
if (usage) {
streamingResult.execution.output.tokens = {
prompt: usage.promptTokenCount || 0,
completion: usage.candidatesTokenCount || 0,
total:
usage.totalTokenCount ||
(usage.promptTokenCount || 0) + (usage.candidatesTokenCount || 0),
}
}
}
)
return streamingResult
}
let geminiResponse = await response.json()
// Check structured output format
if (payload.generationConfig?.responseSchema) {
const candidate = geminiResponse.candidates?.[0]
if (candidate?.content?.parts?.[0]?.text) {
const text = candidate.content.parts[0].text
try {
// Validate JSON structure
JSON.parse(text)
logger.info('Successfully received structured JSON output')
} catch (_e) {
@@ -422,7 +457,6 @@ export const googleProvider: ProviderConfig = {
}
}
// Initialize response tracking variables
let content = ''
let tokens = {
prompt: 0,
@@ -432,16 +466,13 @@ export const googleProvider: ProviderConfig = {
const toolCalls = []
const toolResults = []
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track forced tools and their usage (similar to OpenAI pattern)
const originalToolConfig = preparedTools?.toolConfig
const forcedTools = preparedTools?.forcedTools || []
let usedForcedTools: string[] = []
let hasUsedForcedTool = false
let currentToolConfig = originalToolConfig
// Helper function to check for forced tool usage in responses
const checkForForcedToolUsage = (functionCall: { name: string; args: any }) => {
if (currentToolConfig && forcedTools.length > 0) {
const toolCallsForTracking = [{ name: functionCall.name, arguments: functionCall.args }]
@@ -466,11 +497,9 @@ export const googleProvider: ProviderConfig = {
}
}
// Track time spent in model vs tools
let modelTime = firstResponseTime
let toolsTime = 0
// Track each model and tool call segment with timestamps
const timeSegments: TimeSegment[] = [
{
type: 'model',
@@ -482,46 +511,50 @@ export const googleProvider: ProviderConfig = {
]
try {
// Extract content or function calls from initial response
const candidate = geminiResponse.candidates?.[0]
// Check if response contains function calls
if (candidate?.finishReason === 'UNEXPECTED_TOOL_CALL') {
logger.warn(
'Gemini returned UNEXPECTED_TOOL_CALL - model attempted to call a tool that was not provided',
{
finishReason: candidate.finishReason,
hasContent: !!candidate?.content,
hasParts: !!candidate?.content?.parts,
}
)
content = extractTextContent(candidate)
}
const functionCall = extractFunctionCall(candidate)
if (functionCall) {
logger.info(`Received function call from Gemini: ${functionCall.name}`)
// Process function calls in a loop
while (iterationCount < MAX_ITERATIONS) {
// Get the latest function calls
while (iterationCount < MAX_TOOL_ITERATIONS) {
const latestResponse = geminiResponse.candidates?.[0]
const latestFunctionCall = extractFunctionCall(latestResponse)
if (!latestFunctionCall) {
// No more function calls - extract final text content
content = extractTextContent(latestResponse)
break
}
logger.info(
`Processing function call: ${latestFunctionCall.name} (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
`Processing function call: ${latestFunctionCall.name} (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
)
// Track time for tool calls
const toolsStartTime = Date.now()
try {
const toolName = latestFunctionCall.name
const toolArgs = latestFunctionCall.args || {}
// Get the tool from the tools registry
const tool = request.tools?.find((t) => t.id === toolName)
if (!tool) {
logger.warn(`Tool ${toolName} not found in registry, skipping`)
break
}
// Execute the tool
const toolCallStartTime = Date.now()
const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request)
@@ -529,7 +562,6 @@ export const googleProvider: ProviderConfig = {
const toolCallEndTime = Date.now()
const toolCallDuration = toolCallEndTime - toolCallStartTime
// Add to time segments for both success and failure
timeSegments.push({
type: 'tool',
name: toolName,
@@ -538,13 +570,11 @@ export const googleProvider: ProviderConfig = {
duration: toolCallDuration,
})
// Prepare result content for the LLM
let resultContent: any
if (result.success) {
toolResults.push(result.output)
resultContent = result.output
} else {
// Include error information so LLM can respond appropriately
resultContent = {
error: true,
message: result.error || 'Tool execution failed',
@@ -562,14 +592,10 @@ export const googleProvider: ProviderConfig = {
success: result.success,
})
// Prepare for next request with simplified messages
// Use simple format: original query + most recent function call + result
const simplifiedMessages = [
// Original user request - find the first user request
...(contents.filter((m) => m.role === 'user').length > 0
? [contents.filter((m) => m.role === 'user')[0]]
: [contents[0]]),
// Function call from model
{
role: 'model',
parts: [
@@ -581,7 +607,6 @@ export const googleProvider: ProviderConfig = {
},
],
},
// Function response - but use USER role since Gemini only accepts user or model
{
role: 'user',
parts: [
@@ -592,35 +617,27 @@ export const googleProvider: ProviderConfig = {
},
]
// Calculate tool call time
const thisToolsTime = Date.now() - toolsStartTime
toolsTime += thisToolsTime
// Check for forced tool usage and update configuration
checkForForcedToolUsage(latestFunctionCall)
// Make the next request with updated messages
const nextModelStartTime = Date.now()
try {
// Check if we should stream the final response after tool calls
if (request.stream) {
// Create a payload for the streaming response after tool calls
const streamingPayload = {
...payload,
contents: simplifiedMessages,
}
// Check if we should remove tools and enable structured output for final response
const allForcedToolsUsed =
forcedTools.length > 0 && usedForcedTools.length === forcedTools.length
if (allForcedToolsUsed && request.responseFormat) {
// All forced tools have been used, we can now remove tools and enable structured output
streamingPayload.tools = undefined
streamingPayload.toolConfig = undefined
// Add structured output format for final response
const responseFormatSchema =
request.responseFormat.schema || request.responseFormat
const cleanSchema = cleanSchemaForGemini(responseFormatSchema)
@@ -633,7 +650,6 @@ export const googleProvider: ProviderConfig = {
logger.info('Using structured output for final response after tool execution')
} else {
// Use updated tool configuration if available, otherwise default to AUTO
if (currentToolConfig) {
streamingPayload.toolConfig = currentToolConfig
} else {
@@ -641,11 +657,8 @@ export const googleProvider: ProviderConfig = {
}
}
// Check if we should handle this as a potential forced tool call
// First make a non-streaming request to see if we get a function call
const checkPayload = {
...streamingPayload,
// Remove stream property to get non-streaming response
}
checkPayload.stream = undefined
@@ -677,7 +690,6 @@ export const googleProvider: ProviderConfig = {
const checkFunctionCall = extractFunctionCall(checkCandidate)
if (checkFunctionCall) {
// We have a function call - handle it in non-streaming mode
logger.info(
'Function call detected in follow-up, handling in non-streaming mode',
{
@@ -685,10 +697,8 @@ export const googleProvider: ProviderConfig = {
}
)
// Update geminiResponse to continue the tool execution loop
geminiResponse = checkResult
// Update token counts if available
if (checkResult.usageMetadata) {
tokens.prompt += checkResult.usageMetadata.promptTokenCount || 0
tokens.completion += checkResult.usageMetadata.candidatesTokenCount || 0
@@ -697,12 +707,10 @@ export const googleProvider: ProviderConfig = {
(checkResult.usageMetadata.candidatesTokenCount || 0)
}
// Calculate timing for this model call
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
modelTime += thisModelTime
// Add to time segments
timeSegments.push({
type: 'model',
name: `Model response (iteration ${iterationCount + 1})`,
@@ -711,14 +719,32 @@ export const googleProvider: ProviderConfig = {
duration: thisModelTime,
})
// Continue the loop to handle the function call
iterationCount++
continue
}
// No function call - proceed with streaming
logger.info('No function call detected, proceeding with streaming response')
// Make the streaming request with streamGenerateContent endpoint
// Apply structured output for the final response if responseFormat is specified
// This works regardless of whether tools were forced or auto
if (request.responseFormat) {
streamingPayload.tools = undefined
streamingPayload.toolConfig = undefined
const responseFormatSchema =
request.responseFormat.schema || request.responseFormat
const cleanSchema = cleanSchemaForGemini(responseFormatSchema)
if (!streamingPayload.generationConfig) {
streamingPayload.generationConfig = {}
}
streamingPayload.generationConfig.responseMimeType = 'application/json'
streamingPayload.generationConfig.responseSchema = cleanSchema
logger.info(
'Using structured output for final streaming response after tool execution'
)
}
const streamingResponse = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:streamGenerateContent?key=${request.apiKey}`,
{
@@ -742,15 +768,10 @@ export const googleProvider: ProviderConfig = {
)
}
// Create a stream from the response
const stream = createReadableStreamFromGeminiStream(streamingResponse)
// Calculate timing information
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
modelTime += thisModelTime
// Add to time segments
timeSegments.push({
type: 'model',
name: 'Final streaming response after tool calls',
@@ -759,9 +780,8 @@ export const googleProvider: ProviderConfig = {
duration: thisModelTime,
})
// Return a streaming execution with tool call information
const streamingExecution: StreamingExecution = {
stream,
stream: null as any,
execution: {
success: true,
output: {
@@ -786,7 +806,6 @@ export const googleProvider: ProviderConfig = {
iterations: iterationCount + 1,
timeSegments,
},
// Cost will be calculated in logger
},
logs: [],
metadata: {
@@ -798,25 +817,55 @@ export const googleProvider: ProviderConfig = {
},
}
streamingExecution.stream = createReadableStreamFromGeminiStream(
streamingResponse,
(content, usage) => {
streamingExecution.execution.output.content = content
const streamEndTime = Date.now()
const streamEndTimeISO = new Date(streamEndTime).toISOString()
if (streamingExecution.execution.output.providerTiming) {
streamingExecution.execution.output.providerTiming.endTime =
streamEndTimeISO
streamingExecution.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
}
if (usage) {
const existingTokens = streamingExecution.execution.output.tokens || {
prompt: 0,
completion: 0,
total: 0,
}
streamingExecution.execution.output.tokens = {
prompt: (existingTokens.prompt || 0) + (usage.promptTokenCount || 0),
completion:
(existingTokens.completion || 0) + (usage.candidatesTokenCount || 0),
total:
(existingTokens.total || 0) +
(usage.totalTokenCount ||
(usage.promptTokenCount || 0) + (usage.candidatesTokenCount || 0)),
}
}
}
)
return streamingExecution
}
// Make the next request for non-streaming response
const nextPayload = {
...payload,
contents: simplifiedMessages,
}
// Check if we should remove tools and enable structured output for final response
const allForcedToolsUsed =
forcedTools.length > 0 && usedForcedTools.length === forcedTools.length
if (allForcedToolsUsed && request.responseFormat) {
// All forced tools have been used, we can now remove tools and enable structured output
nextPayload.tools = undefined
nextPayload.toolConfig = undefined
// Add structured output format for final response
const responseFormatSchema =
request.responseFormat.schema || request.responseFormat
const cleanSchema = cleanSchemaForGemini(responseFormatSchema)
@@ -831,7 +880,6 @@ export const googleProvider: ProviderConfig = {
'Using structured output for final non-streaming response after tool execution'
)
} else {
// Add updated tool configuration if available
if (currentToolConfig) {
nextPayload.toolConfig = currentToolConfig
}
@@ -864,7 +912,6 @@ export const googleProvider: ProviderConfig = {
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
// Add to time segments
timeSegments.push({
type: 'model',
name: `Model response (iteration ${iterationCount + 1})`,
@@ -873,15 +920,65 @@ export const googleProvider: ProviderConfig = {
duration: thisModelTime,
})
// Add to model time
modelTime += thisModelTime
// Check if we need to continue or break
const nextCandidate = geminiResponse.candidates?.[0]
const nextFunctionCall = extractFunctionCall(nextCandidate)
if (!nextFunctionCall) {
content = extractTextContent(nextCandidate)
// If responseFormat is specified, make one final request with structured output
if (request.responseFormat) {
const finalPayload = {
...payload,
contents: nextPayload.contents,
tools: undefined,
toolConfig: undefined,
}
const responseFormatSchema =
request.responseFormat.schema || request.responseFormat
const cleanSchema = cleanSchemaForGemini(responseFormatSchema)
if (!finalPayload.generationConfig) {
finalPayload.generationConfig = {}
}
finalPayload.generationConfig.responseMimeType = 'application/json'
finalPayload.generationConfig.responseSchema = cleanSchema
logger.info('Making final request with structured output after tool execution')
const finalResponse = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:generateContent?key=${request.apiKey}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(finalPayload),
}
)
if (finalResponse.ok) {
const finalResult = await finalResponse.json()
const finalCandidate = finalResult.candidates?.[0]
content = extractTextContent(finalCandidate)
if (finalResult.usageMetadata) {
tokens.prompt += finalResult.usageMetadata.promptTokenCount || 0
tokens.completion += finalResult.usageMetadata.candidatesTokenCount || 0
tokens.total +=
(finalResult.usageMetadata.promptTokenCount || 0) +
(finalResult.usageMetadata.candidatesTokenCount || 0)
}
} else {
logger.warn(
'Failed to get structured output, falling back to regular response'
)
content = extractTextContent(nextCandidate)
}
} else {
content = extractTextContent(nextCandidate)
}
break
}
@@ -902,7 +999,6 @@ export const googleProvider: ProviderConfig = {
}
}
} else {
// Regular text response
content = extractTextContent(candidate)
}
} catch (error) {
@@ -911,18 +1007,15 @@ export const googleProvider: ProviderConfig = {
iterationCount,
})
// Don't rethrow, so we can still return partial results
if (!content && toolCalls.length > 0) {
content = `Tool call(s) executed: ${toolCalls.map((t) => t.name).join(', ')}. Results are available in the tool results.`
}
}
// Calculate overall timing
const providerEndTime = Date.now()
const providerEndTimeISO = new Date(providerEndTime).toISOString()
const totalDuration = providerEndTime - providerStartTime
// Extract token usage if available
if (geminiResponse.usageMetadata) {
tokens = {
prompt: geminiResponse.usageMetadata.promptTokenCount || 0,
@@ -949,10 +1042,8 @@ export const googleProvider: ProviderConfig = {
iterations: iterationCount + 1,
timeSegments: timeSegments,
},
// Cost will be calculated in logger
}
} catch (error) {
// Include timing information even for errors
const providerEndTime = Date.now()
const providerEndTimeISO = new Date(providerEndTime).toISOString()
const totalDuration = providerEndTime - providerStartTime
@@ -962,7 +1053,6 @@ export const googleProvider: ProviderConfig = {
duration: totalDuration,
})
// Create a new error with timing information
const enhancedError = new Error(error instanceof Error ? error.message : String(error))
// @ts-ignore - Adding timing property to the error
enhancedError.timing = {
@@ -975,200 +1065,3 @@ export const googleProvider: ProviderConfig = {
}
},
}
/**
* Helper function to remove additionalProperties from a schema object
* and perform a deep copy of the schema to avoid modifying the original
*/
function cleanSchemaForGemini(schema: any): any {
// Handle base cases
if (schema === null || schema === undefined) return schema
if (typeof schema !== 'object') return schema
if (Array.isArray(schema)) {
return schema.map((item) => cleanSchemaForGemini(item))
}
// Create a new object for the deep copy
const cleanedSchema: any = {}
// Process each property in the schema
for (const key in schema) {
// Skip additionalProperties
if (key === 'additionalProperties') continue
// Deep copy nested objects
cleanedSchema[key] = cleanSchemaForGemini(schema[key])
}
return cleanedSchema
}
/**
* Helper function to extract content from a Gemini response, handling structured output
*/
function extractTextContent(candidate: any): string {
if (!candidate?.content?.parts) return ''
// Check for JSON response (typically from structured output)
if (candidate.content.parts?.length === 1 && candidate.content.parts[0].text) {
const text = candidate.content.parts[0].text
if (text && (text.trim().startsWith('{') || text.trim().startsWith('['))) {
try {
JSON.parse(text) // Validate JSON
return text // Return valid JSON as-is
} catch (_e) {
/* Not valid JSON, continue with normal extraction */
}
}
}
// Standard text extraction
return candidate.content.parts
.filter((part: any) => part.text)
.map((part: any) => part.text)
.join('\n')
}
/**
* Helper function to extract a function call from a Gemini response
*/
function extractFunctionCall(candidate: any): { name: string; args: any } | null {
if (!candidate?.content?.parts) return null
// Check for functionCall in parts
for (const part of candidate.content.parts) {
if (part.functionCall) {
const args = part.functionCall.args || {}
// Parse string args if they look like JSON
if (
typeof part.functionCall.args === 'string' &&
part.functionCall.args.trim().startsWith('{')
) {
try {
return { name: part.functionCall.name, args: JSON.parse(part.functionCall.args) }
} catch (_e) {
return { name: part.functionCall.name, args: part.functionCall.args }
}
}
return { name: part.functionCall.name, args }
}
}
// Check for alternative function_call format
if (candidate.content.function_call) {
const args =
typeof candidate.content.function_call.arguments === 'string'
? JSON.parse(candidate.content.function_call.arguments || '{}')
: candidate.content.function_call.arguments || {}
return { name: candidate.content.function_call.name, args }
}
return null
}
/**
* Convert OpenAI-style request format to Gemini format
*/
function convertToGeminiFormat(request: ProviderRequest): {
contents: any[]
tools: any[] | undefined
systemInstruction: any | undefined
} {
const contents = []
let systemInstruction
// Handle system prompt
if (request.systemPrompt) {
systemInstruction = { parts: [{ text: request.systemPrompt }] }
}
// Add context as user message if present
if (request.context) {
contents.push({ role: 'user', parts: [{ text: request.context }] })
}
// Process messages
if (request.messages && request.messages.length > 0) {
for (const message of request.messages) {
if (message.role === 'system') {
// Add to system instruction
if (!systemInstruction) {
systemInstruction = { parts: [{ text: message.content }] }
} else {
// Append to existing system instruction
systemInstruction.parts[0].text = `${systemInstruction.parts[0].text || ''}\n${message.content}`
}
} else if (message.role === 'user' || message.role === 'assistant') {
// Convert to Gemini role format
const geminiRole = message.role === 'user' ? 'user' : 'model'
// Add text content
if (message.content) {
contents.push({ role: geminiRole, parts: [{ text: message.content }] })
}
// Handle tool calls
if (message.role === 'assistant' && message.tool_calls && message.tool_calls.length > 0) {
const functionCalls = message.tool_calls.map((toolCall) => ({
functionCall: {
name: toolCall.function?.name,
args: JSON.parse(toolCall.function?.arguments || '{}'),
},
}))
contents.push({ role: 'model', parts: functionCalls })
}
} else if (message.role === 'tool') {
// Convert tool response (Gemini only accepts user/model roles)
contents.push({
role: 'user',
parts: [{ text: `Function result: ${message.content}` }],
})
}
}
}
// Convert tools to Gemini function declarations
const tools = request.tools?.map((tool) => {
const toolParameters = { ...(tool.parameters || {}) }
// Process schema properties
if (toolParameters.properties) {
const properties = { ...toolParameters.properties }
const required = toolParameters.required ? [...toolParameters.required] : []
// Remove defaults and optional parameters
for (const key in properties) {
const prop = properties[key] as any
if (prop.default !== undefined) {
const { default: _, ...cleanProp } = prop
properties[key] = cleanProp
}
}
// Build Gemini-compatible parameters schema
const parameters = {
type: toolParameters.type || 'object',
properties,
...(required.length > 0 ? { required } : {}),
}
// Clean schema for Gemini
return {
name: tool.id,
description: tool.description || `Execute the ${tool.id} function`,
parameters: cleanSchemaForGemini(parameters),
}
}
// Simple schema case
return {
name: tool.id,
description: tool.description || `Execute the ${tool.id} function`,
parameters: cleanSchemaForGemini(toolParameters),
}
})
return { contents, tools, systemInstruction }
}

View File

@@ -0,0 +1,171 @@
import type { ProviderRequest } from '@/providers/types'
/**
* Removes additionalProperties from a schema object (not supported by Gemini)
*/
export function cleanSchemaForGemini(schema: any): any {
if (schema === null || schema === undefined) return schema
if (typeof schema !== 'object') return schema
if (Array.isArray(schema)) {
return schema.map((item) => cleanSchemaForGemini(item))
}
const cleanedSchema: any = {}
for (const key in schema) {
if (key === 'additionalProperties') continue
cleanedSchema[key] = cleanSchemaForGemini(schema[key])
}
return cleanedSchema
}
/**
* Extracts text content from a Gemini response candidate, handling structured output
*/
export function extractTextContent(candidate: any): string {
if (!candidate?.content?.parts) return ''
if (candidate.content.parts?.length === 1 && candidate.content.parts[0].text) {
const text = candidate.content.parts[0].text
if (text && (text.trim().startsWith('{') || text.trim().startsWith('['))) {
try {
JSON.parse(text)
return text
} catch (_e) {
/* Not valid JSON, continue with normal extraction */
}
}
}
return candidate.content.parts
.filter((part: any) => part.text)
.map((part: any) => part.text)
.join('\n')
}
/**
* Extracts a function call from a Gemini response candidate
*/
export function extractFunctionCall(candidate: any): { name: string; args: any } | null {
if (!candidate?.content?.parts) return null
for (const part of candidate.content.parts) {
if (part.functionCall) {
const args = part.functionCall.args || {}
if (
typeof part.functionCall.args === 'string' &&
part.functionCall.args.trim().startsWith('{')
) {
try {
return { name: part.functionCall.name, args: JSON.parse(part.functionCall.args) }
} catch (_e) {
return { name: part.functionCall.name, args: part.functionCall.args }
}
}
return { name: part.functionCall.name, args }
}
}
if (candidate.content.function_call) {
const args =
typeof candidate.content.function_call.arguments === 'string'
? JSON.parse(candidate.content.function_call.arguments || '{}')
: candidate.content.function_call.arguments || {}
return { name: candidate.content.function_call.name, args }
}
return null
}
/**
* Converts OpenAI-style request format to Gemini format
*/
export function convertToGeminiFormat(request: ProviderRequest): {
contents: any[]
tools: any[] | undefined
systemInstruction: any | undefined
} {
const contents: any[] = []
let systemInstruction
if (request.systemPrompt) {
systemInstruction = { parts: [{ text: request.systemPrompt }] }
}
if (request.context) {
contents.push({ role: 'user', parts: [{ text: request.context }] })
}
if (request.messages && request.messages.length > 0) {
for (const message of request.messages) {
if (message.role === 'system') {
if (!systemInstruction) {
systemInstruction = { parts: [{ text: message.content }] }
} else {
systemInstruction.parts[0].text = `${systemInstruction.parts[0].text || ''}\n${message.content}`
}
} else if (message.role === 'user' || message.role === 'assistant') {
const geminiRole = message.role === 'user' ? 'user' : 'model'
if (message.content) {
contents.push({ role: geminiRole, parts: [{ text: message.content }] })
}
if (message.role === 'assistant' && message.tool_calls && message.tool_calls.length > 0) {
const functionCalls = message.tool_calls.map((toolCall) => ({
functionCall: {
name: toolCall.function?.name,
args: JSON.parse(toolCall.function?.arguments || '{}'),
},
}))
contents.push({ role: 'model', parts: functionCalls })
}
} else if (message.role === 'tool') {
contents.push({
role: 'user',
parts: [{ text: `Function result: ${message.content}` }],
})
}
}
}
const tools = request.tools?.map((tool) => {
const toolParameters = { ...(tool.parameters || {}) }
if (toolParameters.properties) {
const properties = { ...toolParameters.properties }
const required = toolParameters.required ? [...toolParameters.required] : []
for (const key in properties) {
const prop = properties[key] as any
if (prop.default !== undefined) {
const { default: _, ...cleanProp } = prop
properties[key] = cleanProp
}
}
const parameters = {
type: toolParameters.type || 'object',
properties,
...(required.length > 0 ? { required } : {}),
}
return {
name: tool.id,
description: tool.description || `Execute the ${tool.id} function`,
parameters: cleanSchemaForGemini(parameters),
}
}
return {
name: tool.id,
description: tool.description || `Execute the ${tool.id} function`,
parameters: cleanSchemaForGemini(toolParameters),
}
})
return { contents, tools, systemInstruction }
}

View File

@@ -1,6 +1,8 @@
import { Groq } from 'groq-sdk'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { createReadableStreamFromGroqStream } from '@/providers/groq/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
@@ -17,27 +19,6 @@ import { executeTool } from '@/tools'
const logger = createLogger('GroqProvider')
/**
* Helper to wrap Groq streaming into a browser-friendly ReadableStream
* of raw assistant text chunks.
*/
function createReadableStreamFromGroqStream(groqStream: any): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of groqStream) {
if (chunk.choices[0]?.delta?.content) {
controller.enqueue(new TextEncoder().encode(chunk.choices[0].delta.content))
}
}
controller.close()
} catch (err) {
controller.error(err)
}
},
})
}
export const groqProvider: ProviderConfig = {
id: 'groq',
name: 'Groq',
@@ -225,7 +206,6 @@ export const groqProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track time spent in model vs tools
let modelTime = firstResponseTime
@@ -243,7 +223,7 @@ export const groqProvider: ProviderConfig = {
]
try {
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {

View File

@@ -0,0 +1,23 @@
/**
* Helper to wrap Groq streaming into a browser-friendly ReadableStream
* of raw assistant text chunks.
*
* @param groqStream - The Groq streaming response
* @returns A ReadableStream that emits text chunks
*/
export function createReadableStreamFromGroqStream(groqStream: any): ReadableStream {
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of groqStream) {
if (chunk.choices[0]?.delta?.content) {
controller.enqueue(new TextEncoder().encode(chunk.choices[0].delta.content))
}
}
controller.close()
} catch (err) {
controller.error(err)
}
},
})
}

View File

@@ -12,6 +12,12 @@ import {
const logger = createLogger('Providers')
/**
* Maximum number of iterations for tool call loops to prevent infinite loops.
* Used across all providers that support tool/function calling.
*/
export const MAX_TOOL_ITERATIONS = 20
function sanitizeRequest(request: ProviderRequest): ProviderRequest {
const sanitizedRequest = { ...request }
@@ -44,7 +50,6 @@ export async function executeProviderRequest(
}
const sanitizedRequest = sanitizeRequest(request)
// If responseFormat is provided, modify the system prompt to enforce structured output
if (sanitizedRequest.responseFormat) {
if (
typeof sanitizedRequest.responseFormat === 'string' &&
@@ -53,12 +58,10 @@ export async function executeProviderRequest(
logger.info('Empty response format provided, ignoring it')
sanitizedRequest.responseFormat = undefined
} else {
// Generate structured output instructions
const structuredOutputInstructions = generateStructuredOutputInstructions(
sanitizedRequest.responseFormat
)
// Only add additional instructions if they're not empty
if (structuredOutputInstructions.trim()) {
const originalPrompt = sanitizedRequest.systemPrompt || ''
sanitizedRequest.systemPrompt =
@@ -69,10 +72,8 @@ export async function executeProviderRequest(
}
}
// Execute the request using the provider's implementation
const response = await provider.executeRequest(sanitizedRequest)
// If we received a StreamingExecution or ReadableStream, just pass it through
if (isStreamingExecution(response)) {
logger.info('Provider returned StreamingExecution')
return response

View File

@@ -1,6 +1,8 @@
import OpenAI from 'openai'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { createReadableStreamFromMistralStream } from '@/providers/mistral/utils'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import type {
ProviderConfig,
@@ -17,40 +19,6 @@ import { executeTool } from '@/tools'
const logger = createLogger('MistralProvider')
function createReadableStreamFromMistralStream(
mistralStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of mistralStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
/**
* Mistral AI provider configuration
*/
@@ -288,7 +256,6 @@ export const mistralProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10
let modelTime = firstResponseTime
let toolsTime = 0
@@ -307,14 +274,14 @@ export const mistralProvider: ProviderConfig = {
checkForForcedToolUsage(currentResponse, originalToolChoice)
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
}
logger.info(
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
)
const toolsStartTime = Date.now()

View File

@@ -0,0 +1,39 @@
/**
* Creates a ReadableStream from a Mistral AI streaming response
* @param mistralStream - The Mistral AI stream object
* @param onComplete - Optional callback when streaming completes
* @returns A ReadableStream that yields text chunks
*/
export function createReadableStreamFromMistralStream(
mistralStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of mistralStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}

View File

@@ -19,6 +19,7 @@ import {
OllamaIcon,
OpenAIIcon,
OpenRouterIcon,
VertexIcon,
VllmIcon,
xAIIcon,
} from '@/components/icons'
@@ -130,7 +131,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
values: ['none', 'minimal', 'low', 'medium', 'high', 'xhigh'],
},
verbosity: {
values: ['low', 'medium', 'high'],
@@ -283,7 +284,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
output: 60,
updatedAt: '2025-06-17',
},
capabilities: {},
capabilities: {
reasoningEffort: {
values: ['low', 'medium', 'high'],
},
},
contextWindow: 200000,
},
{
@@ -294,7 +299,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
output: 8,
updatedAt: '2025-06-17',
},
capabilities: {},
capabilities: {
reasoningEffort: {
values: ['low', 'medium', 'high'],
},
},
contextWindow: 128000,
},
{
@@ -305,7 +314,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
output: 4.4,
updatedAt: '2025-06-17',
},
capabilities: {},
capabilities: {
reasoningEffort: {
values: ['low', 'medium', 'high'],
},
},
contextWindow: 128000,
},
{
@@ -383,7 +396,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
capabilities: {
reasoningEffort: {
values: ['none', 'low', 'medium', 'high'],
values: ['none', 'minimal', 'low', 'medium', 'high', 'xhigh'],
},
verbosity: {
values: ['low', 'medium', 'high'],
@@ -536,7 +549,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
output: 40,
updatedAt: '2025-06-15',
},
capabilities: {},
capabilities: {
reasoningEffort: {
values: ['low', 'medium', 'high'],
},
},
contextWindow: 128000,
},
{
@@ -547,7 +564,11 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
output: 4.4,
updatedAt: '2025-06-15',
},
capabilities: {},
capabilities: {
reasoningEffort: {
values: ['low', 'medium', 'high'],
},
},
contextWindow: 128000,
},
{
@@ -708,9 +729,22 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
id: 'gemini-3-pro-preview',
pricing: {
input: 2.0,
cachedInput: 1.0,
cachedInput: 0.2,
output: 12.0,
updatedAt: '2025-11-18',
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
{
id: 'gemini-3-flash-preview',
pricing: {
input: 0.5,
cachedInput: 0.05,
output: 3.0,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
@@ -756,6 +790,132 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
contextWindow: 1048576,
},
{
id: 'gemini-2.0-flash',
pricing: {
input: 0.1,
output: 0.4,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
{
id: 'gemini-2.0-flash-lite',
pricing: {
input: 0.075,
output: 0.3,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
],
},
vertex: {
id: 'vertex',
name: 'Vertex AI',
description: "Google's Vertex AI platform for Gemini models",
defaultModel: 'vertex/gemini-2.5-pro',
modelPatterns: [/^vertex\//],
icon: VertexIcon,
capabilities: {
toolUsageControl: true,
},
models: [
{
id: 'vertex/gemini-3-pro-preview',
pricing: {
input: 2.0,
cachedInput: 0.2,
output: 12.0,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
{
id: 'vertex/gemini-3-flash-preview',
pricing: {
input: 0.5,
cachedInput: 0.05,
output: 3.0,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
{
id: 'vertex/gemini-2.5-pro',
pricing: {
input: 1.25,
cachedInput: 0.125,
output: 10.0,
updatedAt: '2025-12-02',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1048576,
},
{
id: 'vertex/gemini-2.5-flash',
pricing: {
input: 0.3,
cachedInput: 0.03,
output: 2.5,
updatedAt: '2025-12-02',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1048576,
},
{
id: 'vertex/gemini-2.5-flash-lite',
pricing: {
input: 0.1,
cachedInput: 0.01,
output: 0.4,
updatedAt: '2025-12-02',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1048576,
},
{
id: 'vertex/gemini-2.0-flash',
pricing: {
input: 0.1,
output: 0.4,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
{
id: 'vertex/gemini-2.0-flash-lite',
pricing: {
input: 0.075,
output: 0.3,
updatedAt: '2025-12-17',
},
capabilities: {
temperature: { min: 0, max: 2 },
},
contextWindow: 1000000,
},
],
},
deepseek: {
@@ -1708,6 +1868,20 @@ export function getModelsWithReasoningEffort(): string[] {
return models
}
/**
* Get the reasoning effort values for a specific model
* Returns the valid options for that model, or null if the model doesn't support reasoning effort
*/
export function getReasoningEffortValuesForModel(modelId: string): string[] | null {
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
const model = provider.models.find((m) => m.id.toLowerCase() === modelId.toLowerCase())
if (model?.capabilities.reasoningEffort) {
return model.capabilities.reasoningEffort.values
}
}
return null
}
/**
* Get all models that support verbosity
*/
@@ -1722,3 +1896,17 @@ export function getModelsWithVerbosity(): string[] {
}
return models
}
/**
* Get the verbosity values for a specific model
* Returns the valid options for that model, or null if the model doesn't support verbosity
*/
export function getVerbosityValuesForModel(modelId: string): string[] | null {
for (const provider of Object.values(PROVIDER_DEFINITIONS)) {
const model = provider.models.find((m) => m.id.toLowerCase() === modelId.toLowerCase())
if (model?.capabilities.verbosity) {
return model.capabilities.verbosity.values
}
}
return null
}

View File

@@ -2,7 +2,9 @@ import OpenAI from 'openai'
import { env } from '@/lib/core/config/env'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import type { ModelsObject } from '@/providers/ollama/types'
import { createReadableStreamFromOllamaStream } from '@/providers/ollama/utils'
import type {
ProviderConfig,
ProviderRequest,
@@ -16,46 +18,6 @@ import { executeTool } from '@/tools'
const logger = createLogger('OllamaProvider')
const OLLAMA_HOST = env.OLLAMA_URL || 'http://localhost:11434'
/**
* Helper function to convert an Ollama stream to a standard ReadableStream
* and collect completion metrics
*/
function createReadableStreamFromOllamaStream(
ollamaStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of ollamaStream) {
// Check for usage data in the final chunk
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
// Once stream is complete, call the completion callback with the final content and usage
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
export const ollamaProvider: ProviderConfig = {
id: 'ollama',
name: 'Ollama',
@@ -334,7 +296,6 @@ export const ollamaProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track time spent in model vs tools
let modelTime = firstResponseTime
@@ -351,7 +312,7 @@ export const ollamaProvider: ProviderConfig = {
},
]
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
@@ -359,7 +320,7 @@ export const ollamaProvider: ProviderConfig = {
}
logger.info(
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
)
// Track time for tool calls in this batch

View File

@@ -0,0 +1,37 @@
/**
* Helper function to convert an Ollama stream to a standard ReadableStream
* and collect completion metrics
*/
export function createReadableStreamFromOllamaStream(
ollamaStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of ollamaStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}

View File

@@ -1,7 +1,9 @@
import OpenAI from 'openai'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import { createReadableStreamFromOpenAIStream } from '@/providers/openai/utils'
import type {
ProviderConfig,
ProviderRequest,
@@ -17,46 +19,6 @@ import { executeTool } from '@/tools'
const logger = createLogger('OpenAIProvider')
/**
* Helper function to convert an OpenAI stream to a standard ReadableStream
* and collect completion metrics
*/
function createReadableStreamFromOpenAIStream(
openaiStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of openaiStream) {
// Check for usage data in the final chunk
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
// Once stream is complete, call the completion callback with the final content and usage
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
/**
* OpenAI provider configuration
*/
@@ -319,7 +281,6 @@ export const openaiProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
// Track time spent in model vs tools
let modelTime = firstResponseTime
@@ -342,7 +303,7 @@ export const openaiProvider: ProviderConfig = {
// Check if a forced tool was used in the first response
checkForForcedToolUsage(currentResponse, originalToolChoice)
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
@@ -350,7 +311,7 @@ export const openaiProvider: ProviderConfig = {
}
logger.info(
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})`
)
// Track time for tool calls in this batch

View File

@@ -0,0 +1,37 @@
/**
* Helper function to convert an OpenAI stream to a standard ReadableStream
* and collect completion metrics
*/
export function createReadableStreamFromOpenAIStream(
openaiStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of openaiStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}

View File

@@ -1,56 +1,23 @@
import OpenAI from 'openai'
import { createLogger } from '@/lib/logs/console/logger'
import type { StreamingExecution } from '@/executor/types'
import { MAX_TOOL_ITERATIONS } from '@/providers'
import { getProviderDefaultModel, getProviderModels } from '@/providers/models'
import {
checkForForcedToolUsage,
createReadableStreamFromOpenAIStream,
} from '@/providers/openrouter/utils'
import type {
ProviderConfig,
ProviderRequest,
ProviderResponse,
TimeSegment,
} from '@/providers/types'
import {
prepareToolExecution,
prepareToolsWithUsageControl,
trackForcedToolUsage,
} from '@/providers/utils'
import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils'
import { executeTool } from '@/tools'
const logger = createLogger('OpenRouterProvider')
function createReadableStreamFromOpenAIStream(
openaiStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of openaiStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
export const openRouterProvider: ProviderConfig = {
id: 'openrouter',
name: 'OpenRouter',
@@ -227,7 +194,6 @@ export const openRouterProvider: ProviderConfig = {
const toolResults = [] as any[]
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10
let modelTime = firstResponseTime
let toolsTime = 0
let hasUsedForcedTool = false
@@ -241,28 +207,16 @@ export const openRouterProvider: ProviderConfig = {
},
]
const checkForForcedToolUsage = (
response: any,
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }
) => {
if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) {
const toolCallsResponse = response.choices[0].message.tool_calls
const result = trackForcedToolUsage(
toolCallsResponse,
toolChoice,
logger,
'openrouter',
forcedTools,
usedForcedTools
)
hasUsedForcedTool = result.hasUsedForcedTool
usedForcedTools = result.usedForcedTools
}
}
const forcedToolResult = checkForForcedToolUsage(
currentResponse,
originalToolChoice,
forcedTools,
usedForcedTools
)
hasUsedForcedTool = forcedToolResult.hasUsedForcedTool
usedForcedTools = forcedToolResult.usedForcedTools
checkForForcedToolUsage(currentResponse, originalToolChoice)
while (iterationCount < MAX_ITERATIONS) {
while (iterationCount < MAX_TOOL_ITERATIONS) {
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
@@ -359,7 +313,14 @@ export const openRouterProvider: ProviderConfig = {
const nextModelStartTime = Date.now()
currentResponse = await client.chat.completions.create(nextPayload)
checkForForcedToolUsage(currentResponse, nextPayload.tool_choice)
const nextForcedToolResult = checkForForcedToolUsage(
currentResponse,
nextPayload.tool_choice,
forcedTools,
usedForcedTools
)
hasUsedForcedTool = nextForcedToolResult.hasUsedForcedTool
usedForcedTools = nextForcedToolResult.usedForcedTools
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
timeSegments.push({

View File

@@ -0,0 +1,78 @@
import { createLogger } from '@/lib/logs/console/logger'
import { trackForcedToolUsage } from '@/providers/utils'
const logger = createLogger('OpenRouterProvider')
/**
* Creates a ReadableStream from an OpenAI-compatible stream response
* @param openaiStream - The OpenAI stream to convert
* @param onComplete - Optional callback when streaming is complete with content and usage data
* @returns ReadableStream that emits text chunks
*/
export function createReadableStreamFromOpenAIStream(
openaiStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const chunk of openaiStream) {
if (chunk.usage) {
usageData = chunk.usage
}
const content = chunk.choices[0]?.delta?.content || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
/**
* Checks if a forced tool was used in the response and updates tracking
* @param response - The API response containing tool calls
* @param toolChoice - The tool choice configuration (string or object)
* @param forcedTools - Array of forced tool names
* @param usedForcedTools - Array of already used forced tools
* @returns Object with hasUsedForcedTool flag and updated usedForcedTools array
*/
export function checkForForcedToolUsage(
response: any,
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any },
forcedTools: string[],
usedForcedTools: string[]
): { hasUsedForcedTool: boolean; usedForcedTools: string[] } {
let hasUsedForcedTool = false
let updatedUsedForcedTools = usedForcedTools
if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) {
const toolCallsResponse = response.choices[0].message.tool_calls
const result = trackForcedToolUsage(
toolCallsResponse,
toolChoice,
logger,
'openrouter',
forcedTools,
updatedUsedForcedTools
)
hasUsedForcedTool = result.hasUsedForcedTool
updatedUsedForcedTools = result.usedForcedTools
}
return { hasUsedForcedTool, usedForcedTools: updatedUsedForcedTools }
}

Some files were not shown because too many files have changed in this diff Show More