mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-09 23:17:59 -05:00
Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aeef2b7e2b | ||
|
|
6ec5cf46e2 | ||
|
|
4880d34786 | ||
|
|
2bba20129f | ||
|
|
64cd60d63a | ||
|
|
696ef12c80 | ||
|
|
063734f02c | ||
|
|
41b1357afb | ||
|
|
221a473ccc | ||
|
|
48b32a346c | ||
|
|
6ab9fa76a1 | ||
|
|
b613010a26 | ||
|
|
1035aca71e | ||
|
|
eb51d6d3f5 | ||
|
|
f3880ad8ed | ||
|
|
936705f04c | ||
|
|
4819b88ac1 | ||
|
|
e71a736400 | ||
|
|
58e764c1dd | ||
|
|
dc5cccdee3 | ||
|
|
cb48174f96 | ||
|
|
f9312c5855 | ||
|
|
6c12104a2e | ||
|
|
9f0673b285 | ||
|
|
18332b9dc4 | ||
|
|
f1d6b9ca1b | ||
|
|
1e0df7466d | ||
|
|
e8b05ae420 | ||
|
|
38e1747130 | ||
|
|
ab85c1a215 | ||
|
|
9c3bcbabf9 | ||
|
|
6161bb8dbc | ||
|
|
eb79986b69 | ||
|
|
9f810e8c29 | ||
|
|
63b4a81acc | ||
|
|
545ec791df | ||
|
|
3bd7a6c402 | ||
|
|
2e2be9bf38 | ||
|
|
7d45306999 | ||
|
|
63f3871f60 | ||
|
|
9a565f48b1 | ||
|
|
f327d0479a | ||
|
|
fae123754d | ||
|
|
bab4b9f041 | ||
|
|
608964a8b3 | ||
|
|
5b53cc2be6 | ||
|
|
914f1cdd47 | ||
|
|
fb6f5553bb | ||
|
|
84f095d40d | ||
|
|
bc1c1d1751 | ||
|
|
1c68523aa7 | ||
|
|
b253454723 | ||
|
|
03607bbc8b | ||
|
|
12bb0b4589 | ||
|
|
1b929c72a5 | ||
|
|
27e49217cc | ||
|
|
b4faf08c20 | ||
|
|
578129c6e6 | ||
|
|
c2593900d4 | ||
|
|
8d7f3a50d1 |
59
.github/CONTRIBUTING.md
vendored
59
.github/CONTRIBUTING.md
vendored
@@ -301,8 +301,8 @@ In addition, you will need to update the registries:
|
||||
|
||||
```typescript:/apps/sim/blocks/blocks/pinecone.ts
|
||||
import { PineconeIcon } from '@/components/icons'
|
||||
import { PineconeResponse } from '@/tools/pinecone/types'
|
||||
import { BlockConfig } from '../types'
|
||||
import type { BlockConfig } from '@/blocks/types'
|
||||
import type { PineconeResponse } from '@/tools/pinecone/types'
|
||||
|
||||
export const PineconeBlock: BlockConfig<PineconeResponse> = {
|
||||
type: 'pinecone',
|
||||
@@ -313,13 +313,58 @@ In addition, you will need to update the registries:
|
||||
bgColor: '#123456',
|
||||
icon: PineconeIcon,
|
||||
|
||||
// If this block requires OAuth authentication
|
||||
provider: 'pinecone',
|
||||
|
||||
// Define subBlocks for the UI configuration
|
||||
subBlocks: [
|
||||
// Block configuration options
|
||||
{
|
||||
id: 'operation',
|
||||
title: 'Operation',
|
||||
type: 'dropdown',
|
||||
layout: 'full',
|
||||
required: true,
|
||||
options: [
|
||||
{ label: 'Generate Embeddings', id: 'generate' },
|
||||
{ label: 'Search Text', id: 'search_text' },
|
||||
],
|
||||
value: () => 'generate',
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Your Pinecone API key',
|
||||
password: true,
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
|
||||
tools: {
|
||||
access: ['pinecone_generate_embeddings', 'pinecone_search_text'],
|
||||
config: {
|
||||
tool: (params: Record<string, any>) => {
|
||||
switch (params.operation) {
|
||||
case 'generate':
|
||||
return 'pinecone_generate_embeddings'
|
||||
case 'search_text':
|
||||
return 'pinecone_search_text'
|
||||
default:
|
||||
throw new Error('Invalid operation selected')
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
inputs: {
|
||||
operation: { type: 'string', description: 'Operation to perform' },
|
||||
apiKey: { type: 'string', description: 'Pinecone API key' },
|
||||
searchQuery: { type: 'string', description: 'Search query text' },
|
||||
topK: { type: 'string', description: 'Number of results to return' },
|
||||
},
|
||||
|
||||
outputs: {
|
||||
matches: { type: 'any', description: 'Search results or generated embeddings' },
|
||||
data: { type: 'any', description: 'Response data from Pinecone' },
|
||||
usage: { type: 'any', description: 'API usage statistics' },
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
55
.github/PULL_REQUEST_TEMPLATE.md
vendored
55
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,42 +1,25 @@
|
||||
## Description
|
||||
## Summary
|
||||
Brief description of what this PR does and why.
|
||||
|
||||
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context.
|
||||
Fixes #(issue)
|
||||
|
||||
Fixes # (issue)
|
||||
## Type of Change
|
||||
- [ ] Bug fix
|
||||
- [ ] New feature
|
||||
- [ ] Breaking change
|
||||
- [ ] Documentation
|
||||
- [ ] Other: ___________
|
||||
|
||||
## Type of change
|
||||
## Testing
|
||||
How has this been tested? What should reviewers focus on?
|
||||
|
||||
Please delete options that are not relevant.
|
||||
|
||||
- [ ] Bug fix (non-breaking change which fixes an issue)
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
||||
- [ ] Documentation update
|
||||
- [ ] Security enhancement
|
||||
- [ ] Performance improvement
|
||||
- [ ] Code refactoring (no functional changes)
|
||||
|
||||
## How Has This Been Tested?
|
||||
|
||||
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration.
|
||||
|
||||
## Checklist:
|
||||
|
||||
- [ ] My code follows the style guidelines of this project
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||
- [ ] I have added tests that prove my fix is effective or that my feature works
|
||||
- [ ] All tests pass locally and in CI (`bun run test`)
|
||||
- [ ] My changes generate no new warnings
|
||||
- [ ] Any dependent changes have been merged and published in downstream modules
|
||||
- [ ] I have updated version numbers as needed (if needed)
|
||||
## Checklist
|
||||
- [ ] Code follows project style guidelines
|
||||
- [ ] Self-reviewed my changes
|
||||
- [ ] Tests added/updated and passing
|
||||
- [ ] No new warnings introduced
|
||||
- [ ] I confirm that I have read and agree to the terms outlined in the [Contributor License Agreement (CLA)](./CONTRIBUTING.md#contributor-license-agreement-cla)
|
||||
|
||||
## Security Considerations:
|
||||
|
||||
- [ ] My changes do not introduce any new security vulnerabilities
|
||||
- [ ] I have considered the security implications of my changes
|
||||
|
||||
## Additional Information:
|
||||
|
||||
Any additional information, configuration or data that might be necessary to reproduce the issue or use the feature.
|
||||
## Screenshots/Videos
|
||||
<!-- If applicable, add screenshots or videos to help explain your changes -->
|
||||
<!-- For UI changes, before/after screenshots are especially helpful -->
|
||||
|
||||
38
.github/workflows/docs-embeddings.yml
vendored
Normal file
38
.github/workflows/docs-embeddings.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: Process Docs Embeddings
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, staging]
|
||||
paths:
|
||||
- 'apps/docs/**'
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
|
||||
jobs:
|
||||
process-docs-embeddings:
|
||||
name: Process Documentation Embeddings
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: latest
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install
|
||||
|
||||
- name: Process docs embeddings
|
||||
working-directory: ./apps/sim
|
||||
env:
|
||||
DATABASE_URL: ${{ github.ref == 'refs/heads/main' && secrets.DATABASE_URL || secrets.STAGING_DATABASE_URL }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: bun run scripts/process-docs-embeddings.ts --clear
|
||||
@@ -5,7 +5,7 @@
|
||||
<p align="center">
|
||||
<a href="https://www.apache.org/licenses/LICENSE-2.0"><img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="License: Apache-2.0"></a>
|
||||
<a href="https://discord.gg/Hr4UWYEcTT"><img src="https://img.shields.io/badge/Discord-Join%20Server-7289DA?logo=discord&logoColor=white" alt="Discord"></a>
|
||||
<a href="https://x.com/simstudioai"><img src="https://img.shields.io/twitter/follow/simstudioai?style=social" alt="Twitter"></a>
|
||||
<a href="https://x.com/simdotai"><img src="https://img.shields.io/twitter/follow/simstudioai?style=social" alt="Twitter"></a>
|
||||
<a href="https://github.com/simstudioai/sim/pulls"><img src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg" alt="PRs welcome"></a>
|
||||
<a href="https://docs.sim.ai"><img src="https://img.shields.io/badge/Docs-visit%20documentation-blue.svg" alt="Documentation"></a>
|
||||
</p>
|
||||
|
||||
@@ -136,12 +136,18 @@ Sim automatically calculates costs for all AI model usage:
|
||||
|
||||
### How Costs Are Calculated
|
||||
|
||||
Every workflow execution includes two cost components:
|
||||
|
||||
**Base Execution Charge**: $0.001 per execution
|
||||
|
||||
**AI Model Usage**: Variable cost based on token consumption
|
||||
```javascript
|
||||
cost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000
|
||||
modelCost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000
|
||||
totalCost = baseExecutionCharge + modelCost
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost.
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base execution charge.
|
||||
</Callout>
|
||||
|
||||
### Pricing Options
|
||||
|
||||
@@ -79,11 +79,11 @@ Read records from an Airtable table
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `records` | string |
|
||||
| `metadata` | string |
|
||||
| `totalRecords` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `records` | json | Retrieved record data |
|
||||
| `record` | json | Single record data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
|
||||
### `airtable_get_record`
|
||||
|
||||
@@ -100,10 +100,11 @@ Retrieve a single record from an Airtable table by its ID
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `record` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `records` | json | Retrieved record data |
|
||||
| `record` | json | Single record data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
|
||||
### `airtable_create_records`
|
||||
|
||||
@@ -119,10 +120,11 @@ Write new records to an Airtable table
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `records` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `records` | json | Retrieved record data |
|
||||
| `record` | json | Single record data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
|
||||
### `airtable_update_record`
|
||||
|
||||
@@ -140,11 +142,11 @@ Update an existing record in an Airtable table by ID
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `record` | string |
|
||||
| `metadata` | string |
|
||||
| `updatedFields` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `records` | json | Retrieved record data |
|
||||
| `record` | json | Single record data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
|
||||
### `airtable_update_multiple_records`
|
||||
|
||||
@@ -160,33 +162,14 @@ Update multiple existing records in an Airtable table
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `records` | string |
|
||||
| `metadata` | string |
|
||||
| `updatedRecordIds` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `records` | json | Retrieved record data |
|
||||
| `record` | json | Single record data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `records` | json | records output from the block |
|
||||
| `record` | json | record output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -61,7 +61,7 @@ Search for academic papers on ArXiv by keywords, authors, titles, or other field
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | The search query to execute |
|
||||
| `searchQuery` | string | Yes | The search query to execute |
|
||||
| `searchField` | string | No | Field to search in: all, ti \(title\), au \(author\), abs \(abstract\), co \(comment\), jr \(journal\), cat \(category\), rn \(report number\) |
|
||||
| `maxResults` | number | No | Maximum number of results to return \(default: 10, max: 2000\) |
|
||||
| `sortBy` | string | No | Sort by: relevance, lastUpdatedDate, submittedDate \(default: relevance\) |
|
||||
@@ -69,11 +69,12 @@ Search for academic papers on ArXiv by keywords, authors, titles, or other field
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `query` | string |
|
||||
| `papers` | string |
|
||||
| `totalResults` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `papers` | json | Found papers data |
|
||||
| `totalResults` | number | Total results count |
|
||||
| `paper` | json | Paper details |
|
||||
| `authorPapers` | json | Author papers list |
|
||||
|
||||
### `arxiv_get_paper`
|
||||
|
||||
@@ -87,9 +88,12 @@ Get detailed information about a specific ArXiv paper by its ID.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `paper` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `papers` | json | Found papers data |
|
||||
| `totalResults` | number | Total results count |
|
||||
| `paper` | json | Paper details |
|
||||
| `authorPapers` | json | Author papers list |
|
||||
|
||||
### `arxiv_get_author_papers`
|
||||
|
||||
@@ -104,34 +108,15 @@ Search for papers by a specific author on ArXiv.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `authorPapers` | string |
|
||||
| `authorName` | string |
|
||||
| `totalResults` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `papers` | json | Found papers data |
|
||||
| `totalResults` | number | Total results count |
|
||||
| `paper` | json | Paper details |
|
||||
| `authorPapers` | json | Author papers list |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `papers` | json | papers output from the block |
|
||||
| `totalResults` | number | totalResults output from the block |
|
||||
| `paper` | json | paper output from the block |
|
||||
| `authorPapers` | json | authorPapers output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -79,35 +79,15 @@ Runs a browser automation task using BrowserUse
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `id` | string |
|
||||
| `success` | string |
|
||||
| `output` | string |
|
||||
| `steps` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | Task execution identifier |
|
||||
| `success` | boolean | Task completion status |
|
||||
| `output` | any | Task output data |
|
||||
| `steps` | json | Execution steps taken |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `task` | string | Yes | Task - Describe what the browser agent should do... |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `id` | string | id output from the block |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `output` | any | output output from the block |
|
||||
| `steps` | json | steps output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -218,29 +218,12 @@ Populate Clay with data from a JSON file. Enables direct communication and notif
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | any | Response data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `authToken` | string | Yes | Auth Token - Enter your Clay Auth token |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `data` | any | data output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -64,12 +64,13 @@ Retrieve content from Confluence pages using the Confluence API.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ts` | string |
|
||||
| `pageId` | string |
|
||||
| `content` | string |
|
||||
| `title` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp |
|
||||
| `pageId` | string | Page identifier |
|
||||
| `content` | string | Page content |
|
||||
| `title` | string | Page title |
|
||||
| `success` | boolean | Operation success status |
|
||||
|
||||
### `confluence_update`
|
||||
|
||||
@@ -89,37 +90,16 @@ Update a Confluence page using the Confluence API.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ts` | string |
|
||||
| `pageId` | string |
|
||||
| `title` | string |
|
||||
| `body` | string |
|
||||
| `success` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp |
|
||||
| `pageId` | string | Page identifier |
|
||||
| `content` | string | Page content |
|
||||
| `title` | string | Page title |
|
||||
| `success` | boolean | Operation success status |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `ts` | string | ts output from the block |
|
||||
| `pageId` | string | pageId output from the block |
|
||||
| `content` | string | content output from the block |
|
||||
| `title` | string | title output from the block |
|
||||
| `success` | boolean | success output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -78,9 +78,10 @@ Send a message to a Discord channel
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Message content |
|
||||
| `data` | any | Response data |
|
||||
|
||||
### `discord_get_messages`
|
||||
|
||||
@@ -96,9 +97,10 @@ Retrieve messages from a Discord channel
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Message content |
|
||||
| `data` | any | Response data |
|
||||
|
||||
### `discord_get_server`
|
||||
|
||||
@@ -113,9 +115,10 @@ Retrieve information about a Discord server (guild)
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Message content |
|
||||
| `data` | any | Response data |
|
||||
|
||||
### `discord_get_user`
|
||||
|
||||
@@ -130,30 +133,13 @@ Retrieve information about a Discord user
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Message content |
|
||||
| `data` | any | Response data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `message` | string | message output from the block |
|
||||
| `data` | any | data output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -60,29 +60,12 @@ Convert TTS using ElevenLabs voices
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `audioUrl` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `audioUrl` | string | Generated audio URL |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `text` | string | Yes | Text - Enter the text to convert to speech |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `audioUrl` | string | audioUrl output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -61,22 +61,18 @@ Search the web using Exa AI. Returns relevant search results with titles, URLs,
|
||||
| `query` | string | Yes | The search query to execute |
|
||||
| `numResults` | number | No | Number of results to return \(default: 10, max: 25\) |
|
||||
| `useAutoprompt` | boolean | No | Whether to use autoprompt to improve the query \(default: false\) |
|
||||
| `type` | string | No | Search type: neural, keyword, auto or magic \(default: auto\) |
|
||||
| `type` | string | No | Search type: neural, keyword, auto or fast \(default: auto\) |
|
||||
| `apiKey` | string | Yes | Exa AI API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `results` | string |
|
||||
| `url` | string |
|
||||
| `publishedDate` | string |
|
||||
| `author` | string |
|
||||
| `summary` | string |
|
||||
| `favicon` | string |
|
||||
| `image` | string |
|
||||
| `text` | string |
|
||||
| `score` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `similarLinks` | json | Similar links found |
|
||||
| `answer` | string | Generated answer |
|
||||
| `citations` | json | Answer citations |
|
||||
| `research` | json | Research findings |
|
||||
|
||||
### `exa_get_contents`
|
||||
|
||||
@@ -93,12 +89,13 @@ Retrieve the contents of webpages using Exa AI. Returns the title, text content,
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `results` | string |
|
||||
| `title` | string |
|
||||
| `text` | string |
|
||||
| `summary` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `similarLinks` | json | Similar links found |
|
||||
| `answer` | string | Generated answer |
|
||||
| `citations` | json | Answer citations |
|
||||
| `research` | json | Research findings |
|
||||
|
||||
### `exa_find_similar_links`
|
||||
|
||||
@@ -115,12 +112,13 @@ Find webpages similar to a given URL using Exa AI. Returns a list of similar lin
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `similarLinks` | string |
|
||||
| `url` | string |
|
||||
| `text` | string |
|
||||
| `score` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `similarLinks` | json | Similar links found |
|
||||
| `answer` | string | Generated answer |
|
||||
| `citations` | json | Answer citations |
|
||||
| `research` | json | Research findings |
|
||||
|
||||
### `exa_answer`
|
||||
|
||||
@@ -136,13 +134,13 @@ Get an AI-generated answer to a question with citations from the web using Exa A
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `query` | string |
|
||||
| `answer` | string |
|
||||
| `citations` | string |
|
||||
| `url` | string |
|
||||
| `text` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `similarLinks` | json | Similar links found |
|
||||
| `answer` | string | Generated answer |
|
||||
| `citations` | json | Answer citations |
|
||||
| `research` | json | Research findings |
|
||||
|
||||
### `exa_research`
|
||||
|
||||
@@ -158,34 +156,16 @@ Perform comprehensive research using AI to generate detailed reports with citati
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `taskId` | string |
|
||||
| `research` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `similarLinks` | json | Similar links found |
|
||||
| `answer` | string | Generated answer |
|
||||
| `citations` | json | Answer citations |
|
||||
| `research` | json | Research findings |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `results` | json | results output from the block |
|
||||
| `similarLinks` | json | similarLinks output from the block |
|
||||
| `answer` | string | answer output from the block |
|
||||
| `citations` | json | citations output from the block |
|
||||
| `research` | json | research output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -69,28 +69,13 @@ Parse one or more uploaded files or files from URLs (text, PDF, CSV, images, etc
|
||||
|
||||
#### Output
|
||||
|
||||
This tool does not produce any outputs.
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `files` | json | Parsed file data |
|
||||
| `combinedContent` | string | Combined file content |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `inputMethod` | string | No | |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `files` | json | files output from the block |
|
||||
| `combinedContent` | string | combinedContent output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -79,11 +79,16 @@ Extract structured content from web pages with comprehensive metadata support. C
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `markdown` | string |
|
||||
| `html` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `markdown` | string | Page content markdown |
|
||||
| `html` | any | Raw HTML content |
|
||||
| `metadata` | json | Page metadata |
|
||||
| `data` | json | Search results data |
|
||||
| `warning` | any | Warning messages |
|
||||
| `pages` | json | Crawled pages data |
|
||||
| `total` | number | Total pages found |
|
||||
| `creditsUsed` | number | Credits consumed |
|
||||
|
||||
### `firecrawl_search`
|
||||
|
||||
@@ -98,10 +103,16 @@ Search for information on the web using Firecrawl
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| `warning` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `markdown` | string | Page content markdown |
|
||||
| `html` | any | Raw HTML content |
|
||||
| `metadata` | json | Page metadata |
|
||||
| `data` | json | Search results data |
|
||||
| `warning` | any | Warning messages |
|
||||
| `pages` | json | Crawled pages data |
|
||||
| `total` | number | Total pages found |
|
||||
| `creditsUsed` | number | Credits consumed |
|
||||
|
||||
### `firecrawl_crawl`
|
||||
|
||||
@@ -118,39 +129,19 @@ Crawl entire websites and extract structured content from all accessible pages
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `jobId` | string |
|
||||
| `pages` | string |
|
||||
| `total` | string |
|
||||
| `creditsUsed` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `markdown` | string | Page content markdown |
|
||||
| `html` | any | Raw HTML content |
|
||||
| `metadata` | json | Page metadata |
|
||||
| `data` | json | Search results data |
|
||||
| `warning` | any | Warning messages |
|
||||
| `pages` | json | Crawled pages data |
|
||||
| `total` | number | Total pages found |
|
||||
| `creditsUsed` | number | Credits consumed |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `markdown` | string | markdown output from the block |
|
||||
| `html` | any | html output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `data` | json | data output from the block |
|
||||
| `warning` | any | warning output from the block |
|
||||
| `pages` | json | pages output from the block |
|
||||
| `total` | number | total output from the block |
|
||||
| `creditsUsed` | number | creditsUsed output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -56,24 +56,10 @@ Fetch PR details including diff and files changed
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `title` | string |
|
||||
| `state` | string |
|
||||
| `html_url` | string |
|
||||
| `diff_url` | string |
|
||||
| `created_at` | string |
|
||||
| `updated_at` | string |
|
||||
| `files` | string |
|
||||
| `additions` | string |
|
||||
| `deletions` | string |
|
||||
| `changes` | string |
|
||||
| `patch` | string |
|
||||
| `blob_url` | string |
|
||||
| `raw_url` | string |
|
||||
| `status` | string |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Response metadata |
|
||||
|
||||
### `github_comment`
|
||||
|
||||
@@ -97,17 +83,10 @@ Create comments on GitHub PRs
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `html_url` | string |
|
||||
| `created_at` | string |
|
||||
| `updated_at` | string |
|
||||
| `path` | string |
|
||||
| `line` | string |
|
||||
| `side` | string |
|
||||
| `commit_id` | string |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Response metadata |
|
||||
|
||||
### `github_repo_info`
|
||||
|
||||
@@ -123,15 +102,10 @@ Retrieve comprehensive GitHub repository metadata including stars, forks, issues
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `description` | string |
|
||||
| `stars` | string |
|
||||
| `forks` | string |
|
||||
| `openIssues` | string |
|
||||
| `language` | string |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Response metadata |
|
||||
|
||||
### `github_latest_commit`
|
||||
|
||||
@@ -148,36 +122,13 @@ Retrieve the latest commit from a GitHub repository
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `html_url` | string |
|
||||
| `commit_message` | string |
|
||||
| `author` | string |
|
||||
| `login` | string |
|
||||
| `avatar_url` | string |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Response metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -72,12 +72,10 @@ Send emails using Gmail
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `threadId` | string |
|
||||
| `labelIds` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Email metadata |
|
||||
|
||||
### `gmail_draft`
|
||||
|
||||
@@ -94,13 +92,10 @@ Draft emails using Gmail
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `message` | string |
|
||||
| `threadId` | string |
|
||||
| `labelIds` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Email metadata |
|
||||
|
||||
### `gmail_read`
|
||||
|
||||
@@ -118,10 +113,10 @@ Read emails from Gmail
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Email metadata |
|
||||
|
||||
### `gmail_search`
|
||||
|
||||
@@ -137,31 +132,13 @@ Search emails in Gmail
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Response content |
|
||||
| `metadata` | json | Email metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -117,9 +117,10 @@ Create a new event in Google Calendar
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Operation response content |
|
||||
| `metadata` | json | Event metadata |
|
||||
|
||||
### `google_calendar_list`
|
||||
|
||||
@@ -138,9 +139,10 @@ List events from Google Calendar
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Operation response content |
|
||||
| `metadata` | json | Event metadata |
|
||||
|
||||
### `google_calendar_get`
|
||||
|
||||
@@ -156,9 +158,10 @@ Get a specific event from Google Calendar
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Operation response content |
|
||||
| `metadata` | json | Event metadata |
|
||||
|
||||
### `google_calendar_quick_add`
|
||||
|
||||
@@ -176,9 +179,10 @@ Create events from natural language text
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Operation response content |
|
||||
| `metadata` | json | Event metadata |
|
||||
|
||||
### `google_calendar_invite`
|
||||
|
||||
@@ -197,41 +201,13 @@ Invite attendees to an existing Google Calendar event
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `htmlLink` | string |
|
||||
| `status` | string |
|
||||
| `summary` | string |
|
||||
| `description` | string |
|
||||
| `location` | string |
|
||||
| `start` | string |
|
||||
| `end` | string |
|
||||
| `attendees` | string |
|
||||
| `creator` | string |
|
||||
| `organizer` | string |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Operation response content |
|
||||
| `metadata` | json | Event metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -100,10 +100,11 @@ Read content from a Google Docs document
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Document content |
|
||||
| `metadata` | json | Document metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
### `google_docs_write`
|
||||
|
||||
@@ -119,10 +120,11 @@ Write or update content in a Google Docs document
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `updatedContent` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Document content |
|
||||
| `metadata` | json | Document metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
### `google_docs_create`
|
||||
|
||||
@@ -140,31 +142,14 @@ Create a new Google Docs document
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Document content |
|
||||
| `metadata` | json | Document metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedContent` | boolean | updatedContent output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -96,17 +96,10 @@ Upload a file to Google Drive
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `file` | string |
|
||||
| `name` | string |
|
||||
| `mimeType` | string |
|
||||
| `webViewLink` | string |
|
||||
| `webContentLink` | string |
|
||||
| `size` | string |
|
||||
| `createdTime` | string |
|
||||
| `modifiedTime` | string |
|
||||
| `parents` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `file` | json | File data |
|
||||
| `files` | json | Files list |
|
||||
|
||||
### `google_drive_create_folder`
|
||||
|
||||
@@ -123,17 +116,10 @@ Create a new folder in Google Drive
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `file` | string |
|
||||
| `name` | string |
|
||||
| `mimeType` | string |
|
||||
| `webViewLink` | string |
|
||||
| `webContentLink` | string |
|
||||
| `size` | string |
|
||||
| `createdTime` | string |
|
||||
| `modifiedTime` | string |
|
||||
| `parents` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `file` | json | File data |
|
||||
| `files` | json | Files list |
|
||||
|
||||
### `google_drive_list`
|
||||
|
||||
@@ -152,38 +138,13 @@ List files and folders in Google Drive
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `files` | string |
|
||||
| `name` | string |
|
||||
| `mimeType` | string |
|
||||
| `webViewLink` | string |
|
||||
| `webContentLink` | string |
|
||||
| `size` | string |
|
||||
| `createdTime` | string |
|
||||
| `modifiedTime` | string |
|
||||
| `parents` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `file` | json | File data |
|
||||
| `files` | json | Files list |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `file` | json | file output from the block |
|
||||
| `files` | json | files output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -79,34 +79,13 @@ Search the web with the Custom Search API
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `items` | string |
|
||||
| `searchInformation` | string |
|
||||
| `searchTime` | string |
|
||||
| `formattedSearchTime` | string |
|
||||
| `formattedTotalResults` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `items` | json | Search result items |
|
||||
| `searchInformation` | json | Search metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | Search Query - Enter your search query |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `items` | json | items output from the block |
|
||||
| `searchInformation` | json | searchInformation output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -116,9 +116,15 @@ Read data from a Google Sheets spreadsheet
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `tableRange` | string | Table range |
|
||||
|
||||
### `google_sheets_write`
|
||||
|
||||
@@ -137,15 +143,15 @@ Write data to a Google Sheets spreadsheet
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `updatedRange` | string |
|
||||
| `updatedRows` | string |
|
||||
| `updatedColumns` | string |
|
||||
| `updatedCells` | string |
|
||||
| `metadata` | string |
|
||||
| `spreadsheetId` | string |
|
||||
| `spreadsheetUrl` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `tableRange` | string | Table range |
|
||||
|
||||
### `google_sheets_update`
|
||||
|
||||
@@ -164,15 +170,15 @@ Update data in a Google Sheets spreadsheet
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `updatedRange` | string |
|
||||
| `updatedRows` | string |
|
||||
| `updatedColumns` | string |
|
||||
| `updatedCells` | string |
|
||||
| `metadata` | string |
|
||||
| `spreadsheetId` | string |
|
||||
| `spreadsheetUrl` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `tableRange` | string | Table range |
|
||||
|
||||
### `google_sheets_append`
|
||||
|
||||
@@ -192,35 +198,18 @@ Append data to the end of a Google Sheets spreadsheet
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `tableRange` | string | Table range |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `data` | json | data output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedRange` | string | updatedRange output from the block |
|
||||
| `updatedRows` | number | updatedRows output from the block |
|
||||
| `updatedColumns` | number | updatedColumns output from the block |
|
||||
| `updatedCells` | number | updatedCells output from the block |
|
||||
| `tableRange` | string | tableRange output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -90,35 +90,14 @@ Generate completions using Hugging Face Inference API
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `model` | string |
|
||||
| `usage` | string |
|
||||
| `completion_tokens` | string |
|
||||
| `total_tokens` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Generated response |
|
||||
| `model` | string | Model used |
|
||||
| `usage` | json | Token usage stats |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `systemPrompt` | string | No | System Prompt - Enter system prompt to guide the model behavior... |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `usage` | json | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
218
apps/docs/content/docs/tools/hunter.mdx
Normal file
218
apps/docs/content/docs/tools/hunter.mdx
Normal file
@@ -0,0 +1,218 @@
|
||||
---
|
||||
title: Hunter io
|
||||
description: Find and verify professional email addresses
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="hunter"
|
||||
color="#E0E0E0"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
|
||||
|
||||
viewBox='0 0 20 19'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
>
|
||||
<path
|
||||
d='M12.0671 8.43455C11.6625 8.55094 11.2164 8.55288 10.7992 8.53525C10.3141 8.51472 9.80024 8.45339 9.35223 8.25426C8.98359 8.09047 8.68787 7.79493 8.84262 7.36805C8.95175 7.06699 9.19361 6.79803 9.47319 6.64644C9.78751 6.4759 10.1329 6.50361 10.4474 6.65774C10.8005 6.83082 11.0942 7.11235 11.3604 7.3964C11.5 7.54536 11.6332 7.70002 11.7646 7.85617C11.8252 7.92801 12.2364 8.33865 12.0671 8.43455ZM18.7923 8.58131C18.17 8.43655 17.4348 8.4884 16.811 8.38867C15.8284 8.23146 14.3648 7.08576 13.5714 5.92122C13.0201 5.11202 12.757 4.28785 12.3356 3.28356C12.0415 2.58257 11.4001 0.365389 10.5032 1.40318C10.1339 1.83057 9.7204 3.23752 9.41837 3.2177C9.19467 3.26971 9.15818 2.83371 9.08739 2.64738C8.95886 2.30903 8.89071 1.9176 8.7185 1.59854C8.58086 1.34353 8.40014 1.03806 8.12337 0.91412C7.63027 0.660572 7.03575 1.42476 6.74072 2.33095C6.61457 2.81687 5.76653 3.75879 5.39721 3.9866C3.71684 5.02352 0.344233 6.11595 0.000262184 9.75358C-0.00114142 9.76867 0.000262182 9.81455 0.0573714 9.77323C0.459591 9.48197 5.02183 6.19605 2.09392 12.5476C0.300195 16.439 8.96062 18.917 9.40582 18.9271C9.46582 18.9284 9.46144 18.9011 9.46347 18.8832C10.1546 12.6724 16.9819 13.3262 18.5718 11.8387C20.1474 10.3649 20.1796 8.93816 18.7923 8.58131Z'
|
||||
fill='#FA5320'
|
||||
/>
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Hunter.io](https://hunter.io/) is a leading platform for finding and verifying professional email addresses, discovering companies, and enriching contact data. Hunter.io provides robust APIs for domain search, email finding, verification, and company discovery, making it an essential tool for sales, recruiting, and business development.
|
||||
|
||||
With Hunter.io, you can:
|
||||
|
||||
- **Find email addresses by domain:** Search for all publicly available email addresses associated with a specific company domain.
|
||||
- **Discover companies:** Use advanced filters and AI-powered search to find companies matching your criteria.
|
||||
- **Find a specific email address:** Locate the most likely email address for a person at a company using their name and domain.
|
||||
- **Verify email addresses:** Check the deliverability and validity of any email address.
|
||||
- **Enrich company data:** Retrieve detailed information about companies, including size, technologies used, and more.
|
||||
|
||||
In Sim, the Hunter.io integration enables your agents to programmatically search for and verify email addresses, discover companies, and enrich contact data using Hunter.io’s API. This allows you to automate lead generation, contact enrichment, and email verification directly within your workflows. Your agents can leverage Hunter.io’s tools to streamline outreach, keep your CRM up-to-date, and power intelligent automation scenarios for sales, recruiting, and more.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Search for email addresses, verify their deliverability, discover companies, and enrich contact data using Hunter.io's powerful email finding capabilities.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `hunter_discover`
|
||||
|
||||
Returns companies matching a set of criteria using Hunter.io AI-powered search.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | No | Natural language search query for companies |
|
||||
| `domain` | string | No | Company domain names to filter by |
|
||||
| `headcount` | string | No | Company size filter \(e.g., |
|
||||
| `company_type` | string | No | Type of organization |
|
||||
| `technology` | string | No | Technology used by companies |
|
||||
| `apiKey` | string | Yes | Hunter.io API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `emails` | json | Email addresses found |
|
||||
| `email` | string | Found email address |
|
||||
| `score` | number | Confidence score |
|
||||
| `result` | string | Verification result |
|
||||
| `status` | string | Status message |
|
||||
| `total` | number | Total results count |
|
||||
| `personal_emails` | number | Personal emails count |
|
||||
| `generic_emails` | number | Generic emails count |
|
||||
|
||||
### `hunter_domain_search`
|
||||
|
||||
Returns all the email addresses found using one given domain name, with sources.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Domain name to search for email addresses |
|
||||
| `limit` | number | No | Maximum email addresses to return \(default: 10\) |
|
||||
| `offset` | number | No | Number of email addresses to skip |
|
||||
| `type` | string | No | Filter for personal or generic emails |
|
||||
| `seniority` | string | No | Filter by seniority level: junior, senior, or executive |
|
||||
| `department` | string | No | Filter by specific departments \(e.g., sales, marketing\) |
|
||||
| `apiKey` | string | Yes | Hunter.io API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `emails` | json | Email addresses found |
|
||||
| `email` | string | Found email address |
|
||||
| `score` | number | Confidence score |
|
||||
| `result` | string | Verification result |
|
||||
| `status` | string | Status message |
|
||||
| `total` | number | Total results count |
|
||||
| `personal_emails` | number | Personal emails count |
|
||||
| `generic_emails` | number | Generic emails count |
|
||||
|
||||
### `hunter_email_finder`
|
||||
|
||||
Finds the most likely email address for a person given their name and company domain.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Company domain name |
|
||||
| `first_name` | string | Yes | Person |
|
||||
| `last_name` | string | Yes | Person |
|
||||
| `company` | string | No | Company name |
|
||||
| `apiKey` | string | Yes | Hunter.io API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `emails` | json | Email addresses found |
|
||||
| `email` | string | Found email address |
|
||||
| `score` | number | Confidence score |
|
||||
| `result` | string | Verification result |
|
||||
| `status` | string | Status message |
|
||||
| `total` | number | Total results count |
|
||||
| `personal_emails` | number | Personal emails count |
|
||||
| `generic_emails` | number | Generic emails count |
|
||||
|
||||
### `hunter_email_verifier`
|
||||
|
||||
Verifies the deliverability of an email address and provides detailed verification status.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `email` | string | Yes | The email address to verify |
|
||||
| `apiKey` | string | Yes | Hunter.io API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `emails` | json | Email addresses found |
|
||||
| `email` | string | Found email address |
|
||||
| `score` | number | Confidence score |
|
||||
| `result` | string | Verification result |
|
||||
| `status` | string | Status message |
|
||||
| `total` | number | Total results count |
|
||||
| `personal_emails` | number | Personal emails count |
|
||||
| `generic_emails` | number | Generic emails count |
|
||||
|
||||
### `hunter_companies_find`
|
||||
|
||||
Enriches company data using domain name.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Domain to find company data for |
|
||||
| `apiKey` | string | Yes | Hunter.io API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `emails` | json | Email addresses found |
|
||||
| `email` | string | Found email address |
|
||||
| `score` | number | Confidence score |
|
||||
| `result` | string | Verification result |
|
||||
| `status` | string | Status message |
|
||||
| `total` | number | Total results count |
|
||||
| `personal_emails` | number | Personal emails count |
|
||||
| `generic_emails` | number | Generic emails count |
|
||||
|
||||
### `hunter_email_count`
|
||||
|
||||
Returns the total number of email addresses found for a domain or company.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | No | Domain to count emails for \(required if company not provided\) |
|
||||
| `company` | string | No | Company name to count emails for \(required if domain not provided\) |
|
||||
| `type` | string | No | Filter for personal or generic emails only |
|
||||
| `apiKey` | string | Yes | Hunter.io API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `emails` | json | Email addresses found |
|
||||
| `email` | string | Found email address |
|
||||
| `score` | number | Confidence score |
|
||||
| `result` | string | Verification result |
|
||||
| `status` | string | Status message |
|
||||
| `total` | number | Total results count |
|
||||
| `personal_emails` | number | Personal emails count |
|
||||
| `generic_emails` | number | Generic emails count |
|
||||
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
- Type: `hunter`
|
||||
@@ -71,33 +71,14 @@ Generate images using OpenAI
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `image` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Generation response |
|
||||
| `image` | string | Generated image URL |
|
||||
| `metadata` | json | Generation metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `prompt` | string | Yes | |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `image` | string | image output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -85,29 +85,12 @@ Extract and process web content into clean, LLM-friendly text using Jina AI Read
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Extracted content |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `url` | string | Yes | URL - Enter URL to extract content from |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -65,14 +65,16 @@ Retrieve detailed information about a specific Jira issue
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ts` | string |
|
||||
| `issueKey` | string |
|
||||
| `summary` | string |
|
||||
| `description` | string |
|
||||
| `created` | string |
|
||||
| `updated` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp |
|
||||
| `issueKey` | string | Issue key |
|
||||
| `summary` | string | Issue summary |
|
||||
| `description` | string | Issue description |
|
||||
| `created` | string | Creation date |
|
||||
| `updated` | string | Update date |
|
||||
| `success` | boolean | Operation success |
|
||||
| `url` | string | Issue URL |
|
||||
|
||||
### `jira_update`
|
||||
|
||||
@@ -95,12 +97,16 @@ Update a Jira issue
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ts` | string |
|
||||
| `issueKey` | string |
|
||||
| `summary` | string |
|
||||
| `success` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp |
|
||||
| `issueKey` | string | Issue key |
|
||||
| `summary` | string | Issue summary |
|
||||
| `description` | string | Issue description |
|
||||
| `created` | string | Creation date |
|
||||
| `updated` | string | Update date |
|
||||
| `success` | boolean | Operation success |
|
||||
| `url` | string | Issue URL |
|
||||
|
||||
### `jira_write`
|
||||
|
||||
@@ -118,17 +124,20 @@ Write a Jira issue
|
||||
| `priority` | string | No | Priority for the issue |
|
||||
| `assignee` | string | No | Assignee for the issue |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. |
|
||||
| `issueType` | string | Yes | Type of issue to create \(e.g., Task, Story, Bug, Sub-task\) |
|
||||
| `issueType` | string | Yes | Type of issue to create \(e.g., Task, Story\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ts` | string |
|
||||
| `issueKey` | string |
|
||||
| `summary` | string |
|
||||
| `success` | string |
|
||||
| `url` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp |
|
||||
| `issueKey` | string | Issue key |
|
||||
| `summary` | string | Issue summary |
|
||||
| `description` | string | Issue description |
|
||||
| `created` | string | Creation date |
|
||||
| `updated` | string | Update date |
|
||||
| `success` | boolean | Operation success |
|
||||
| `url` | string | Issue URL |
|
||||
|
||||
### `jira_bulk_read`
|
||||
|
||||
@@ -145,36 +154,19 @@ Retrieve multiple Jira issues in bulk
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `issues` | array |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp |
|
||||
| `issueKey` | string | Issue key |
|
||||
| `summary` | string | Issue summary |
|
||||
| `description` | string | Issue description |
|
||||
| `created` | string | Creation date |
|
||||
| `updated` | string | Update date |
|
||||
| `success` | boolean | Operation success |
|
||||
| `url` | string | Issue URL |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `ts` | string | ts output from the block |
|
||||
| `issueKey` | string | issueKey output from the block |
|
||||
| `summary` | string | summary output from the block |
|
||||
| `description` | string | description output from the block |
|
||||
| `created` | string | created output from the block |
|
||||
| `updated` | string | updated output from the block |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `url` | string | url output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -57,31 +57,24 @@ Perform semantic vector search across knowledge bases, upload individual chunks
|
||||
|
||||
### `knowledge_search`
|
||||
|
||||
Search for similar content in one or more knowledge bases using vector similarity
|
||||
Search for similar content in a knowledge base using vector similarity
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `knowledgeBaseIds` | string | Yes | ID of the knowledge base to search in, or comma-separated IDs for multiple knowledge bases |
|
||||
| `knowledgeBaseId` | string | Yes | ID of the knowledge base to search in |
|
||||
| `query` | string | Yes | Search query text |
|
||||
| `topK` | number | No | Number of most similar results to return \(1-100\) |
|
||||
| `tag1` | string | No | Filter by tag 1 value |
|
||||
| `tag2` | string | No | Filter by tag 2 value |
|
||||
| `tag3` | string | No | Filter by tag 3 value |
|
||||
| `tag4` | string | No | Filter by tag 4 value |
|
||||
| `tag5` | string | No | Filter by tag 5 value |
|
||||
| `tag6` | string | No | Filter by tag 6 value |
|
||||
| `tag7` | string | No | Filter by tag 7 value |
|
||||
| `tagFilters` | any | No | Array of tag filters with tagName and tagValue properties |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `results` | string |
|
||||
| `query` | string |
|
||||
| `totalResults` | string |
|
||||
| `cost` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `query` | string | Query used |
|
||||
| `totalResults` | number | Total results count |
|
||||
|
||||
### `knowledge_upload_chunk`
|
||||
|
||||
@@ -97,16 +90,11 @@ Upload a new chunk to a document in a knowledge base
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| `chunkIndex` | string |
|
||||
| `content` | string |
|
||||
| `contentLength` | string |
|
||||
| `tokenCount` | string |
|
||||
| `enabled` | string |
|
||||
| `createdAt` | string |
|
||||
| `updatedAt` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `query` | string | Query used |
|
||||
| `totalResults` | number | Total results count |
|
||||
|
||||
### `knowledge_create_document`
|
||||
|
||||
@@ -126,35 +114,18 @@ Create a new document in a knowledge base
|
||||
| `tag5` | string | No | Tag 5 value for the document |
|
||||
| `tag6` | string | No | Tag 6 value for the document |
|
||||
| `tag7` | string | No | Tag 7 value for the document |
|
||||
| `documentTagsData` | array | No | Structured tag data with names, types, and values |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| `name` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results |
|
||||
| `query` | string | Query used |
|
||||
| `totalResults` | number | Total results count |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `results` | json | results output from the block |
|
||||
| `query` | string | query output from the block |
|
||||
| `totalResults` | number | totalResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `blocks`
|
||||
|
||||
@@ -61,9 +61,10 @@ Fetch and filter issues from Linear
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `issues` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `issues` | json | Issues list |
|
||||
| `issue` | json | Single issue data |
|
||||
|
||||
### `linear_create_issue`
|
||||
|
||||
@@ -80,35 +81,13 @@ Create a new issue in Linear
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `issue` | string |
|
||||
| `title` | string |
|
||||
| `description` | string |
|
||||
| `state` | string |
|
||||
| `teamId` | string |
|
||||
| `projectId` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `issues` | json | Issues list |
|
||||
| `issue` | json | Single issue data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `issues` | json | issues output from the block |
|
||||
| `issue` | json | issue output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -64,31 +64,13 @@ Search the web for information using Linkup
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `answer` | string |
|
||||
| `sources` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `answer` | string | Generated answer |
|
||||
| `sources` | json | Source references |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `q` | string | Yes | Search Query - Enter your search query |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `answer` | string | answer output from the block |
|
||||
| `sources` | json | sources output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -64,9 +64,11 @@ Add memories to Mem0 for persistent storage and retrieval
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `memories` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ids` | any | Memory identifiers |
|
||||
| `memories` | any | Memory data |
|
||||
| `searchResults` | any | Search results |
|
||||
|
||||
### `mem0_search_memories`
|
||||
|
||||
@@ -83,10 +85,11 @@ Search for memories in Mem0 using semantic search
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `searchResults` | string |
|
||||
| `ids` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ids` | any | Memory identifiers |
|
||||
| `memories` | any | Memory data |
|
||||
| `searchResults` | any | Search results |
|
||||
|
||||
### `mem0_get_memories`
|
||||
|
||||
@@ -105,32 +108,14 @@ Retrieve memories from Mem0 by ID or filter criteria
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `memories` | string |
|
||||
| `ids` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ids` | any | Memory identifiers |
|
||||
| `memories` | any | Memory data |
|
||||
| `searchResults` | any | Search results |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `ids` | any | ids output from the block |
|
||||
| `memories` | any | memories output from the block |
|
||||
| `searchResults` | any | searchResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -55,9 +55,10 @@ Add a new memory to the database or append to existing memory with the same ID.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `memories` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `memories` | any | Memory data |
|
||||
| `id` | string | Memory identifier |
|
||||
|
||||
### `memory_get`
|
||||
|
||||
@@ -71,10 +72,10 @@ Retrieve a specific memory by its ID
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `memories` | string |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `memories` | any | Memory data |
|
||||
| `id` | string | Memory identifier |
|
||||
|
||||
### `memory_get_all`
|
||||
|
||||
@@ -87,10 +88,10 @@ Retrieve all memories from the database
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `memories` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `memories` | any | Memory data |
|
||||
| `id` | string | Memory identifier |
|
||||
|
||||
### `memory_delete`
|
||||
|
||||
@@ -104,30 +105,13 @@ Delete a specific memory by its ID
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `memories` | any | Memory data |
|
||||
| `id` | string | Memory identifier |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `memories` | any | memories output from the block |
|
||||
| `id` | string | id output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `blocks`
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
"google_search",
|
||||
"google_sheets",
|
||||
"huggingface",
|
||||
"hunter",
|
||||
"image_generator",
|
||||
"jina",
|
||||
"jira",
|
||||
|
||||
@@ -114,9 +114,16 @@ Read data from a Microsoft Excel spreadsheet
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `index` | number | Row index |
|
||||
| `values` | json | Table values |
|
||||
|
||||
### `microsoft_excel_write`
|
||||
|
||||
@@ -135,15 +142,16 @@ Write data to a Microsoft Excel spreadsheet
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `updatedRange` | string |
|
||||
| `updatedRows` | string |
|
||||
| `updatedColumns` | string |
|
||||
| `updatedCells` | string |
|
||||
| `metadata` | string |
|
||||
| `spreadsheetId` | string |
|
||||
| `spreadsheetUrl` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `index` | number | Row index |
|
||||
| `values` | json | Table values |
|
||||
|
||||
### `microsoft_excel_table_add`
|
||||
|
||||
@@ -160,36 +168,19 @@ Add new rows to a Microsoft Excel table
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Sheet data |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `updatedRange` | string | Updated range |
|
||||
| `updatedRows` | number | Updated rows count |
|
||||
| `updatedColumns` | number | Updated columns count |
|
||||
| `updatedCells` | number | Updated cells count |
|
||||
| `index` | number | Row index |
|
||||
| `values` | json | Table values |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `data` | json | data output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedRange` | string | updatedRange output from the block |
|
||||
| `updatedRows` | number | updatedRows output from the block |
|
||||
| `updatedColumns` | number | updatedColumns output from the block |
|
||||
| `updatedCells` | number | updatedCells output from the block |
|
||||
| `index` | number | index output from the block |
|
||||
| `values` | json | values output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -117,14 +117,11 @@ Read content from a Microsoft Teams chat
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `messageCount` | string |
|
||||
| `messages` | string |
|
||||
| `totalAttachments` | string |
|
||||
| `attachmentTypes` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Message content |
|
||||
| `metadata` | json | Message metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
### `microsoft_teams_write_chat`
|
||||
|
||||
@@ -140,10 +137,11 @@ Write or update content in a Microsoft Teams chat
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `updatedContent` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Message content |
|
||||
| `metadata` | json | Message metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
### `microsoft_teams_read_channel`
|
||||
|
||||
@@ -159,15 +157,11 @@ Read content from a Microsoft Teams channel
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `channelId` | string |
|
||||
| `messageCount` | string |
|
||||
| `messages` | string |
|
||||
| `totalAttachments` | string |
|
||||
| `attachmentTypes` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Message content |
|
||||
| `metadata` | json | Message metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
### `microsoft_teams_write_channel`
|
||||
|
||||
@@ -184,32 +178,14 @@ Write or send a message to a Microsoft Teams channel
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `updatedContent` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Message content |
|
||||
| `metadata` | json | Message metadata |
|
||||
| `updatedContent` | boolean | Content update status |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedContent` | boolean | updatedContent output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -104,28 +104,13 @@ Parse PDF documents using Mistral OCR API
|
||||
|
||||
#### Output
|
||||
|
||||
This tool does not produce any outputs.
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Extracted content |
|
||||
| `metadata` | json | Processing metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `inputMethod` | string | No | |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -64,13 +64,10 @@ Read content from a Notion page
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `lastEditedTime` | string |
|
||||
| `createdTime` | string |
|
||||
| `url` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
### `notion_read_database`
|
||||
|
||||
@@ -85,16 +82,10 @@ Read database information and structure from Notion
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `url` | string |
|
||||
| `id` | string |
|
||||
| `createdTime` | string |
|
||||
| `lastEditedTime` | string |
|
||||
| `properties` | string |
|
||||
| `content` | string |
|
||||
| `title` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
### `notion_write`
|
||||
|
||||
@@ -110,9 +101,10 @@ Append content to a Notion page
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
### `notion_create_page`
|
||||
|
||||
@@ -129,9 +121,10 @@ Create a new page in Notion
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
### `notion_query_database`
|
||||
|
||||
@@ -149,13 +142,10 @@ Query and filter Notion database entries with advanced filtering
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `hasMore` | string |
|
||||
| `nextCursor` | string |
|
||||
| `results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
### `notion_search`
|
||||
|
||||
@@ -172,13 +162,10 @@ Search across all pages and databases in Notion workspace
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
| `hasMore` | string |
|
||||
| `nextCursor` | string |
|
||||
| `results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
### `notion_create_database`
|
||||
|
||||
@@ -195,35 +182,13 @@ Create a new database in Notion with custom properties
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `url` | string |
|
||||
| `createdTime` | string |
|
||||
| `properties` | string |
|
||||
| `content` | string |
|
||||
| `title` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Page content |
|
||||
| `metadata` | any | Page metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | any | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -64,34 +64,14 @@ Generate embeddings from text using OpenAI
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `embeddings` | string |
|
||||
| `model` | string |
|
||||
| `usage` | string |
|
||||
| `total_tokens` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `embeddings` | json | Generated embeddings |
|
||||
| `model` | string | Model used |
|
||||
| `usage` | json | Token usage |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `input` | string | Yes | Input Text - Enter text to generate embeddings for |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `embeddings` | json | embeddings output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `usage` | json | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -161,11 +161,10 @@ Send emails using Outlook
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `results` | string |
|
||||
| `timestamp` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Response message |
|
||||
| `results` | json | Email results |
|
||||
|
||||
### `outlook_draft`
|
||||
|
||||
@@ -182,13 +181,10 @@ Draft emails using Outlook
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `results` | string |
|
||||
| `subject` | string |
|
||||
| `status` | string |
|
||||
| `timestamp` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Response message |
|
||||
| `results` | json | Email results |
|
||||
|
||||
### `outlook_read`
|
||||
|
||||
@@ -204,31 +200,13 @@ Read emails from Outlook
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Response message |
|
||||
| `results` | json | Email results |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `message` | string | message output from the block |
|
||||
| `results` | json | results output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -60,35 +60,14 @@ Generate completions using Perplexity AI chat models
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `model` | string |
|
||||
| `usage` | string |
|
||||
| `completion_tokens` | string |
|
||||
| `total_tokens` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Generated response |
|
||||
| `model` | string | Model used |
|
||||
| `usage` | json | Token usage |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `content` | string | Yes | User Prompt - Enter your prompt here... |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `usage` | json | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -65,12 +65,14 @@ Generate embeddings from text using Pinecone
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| `model` | string |
|
||||
| `vector_type` | string |
|
||||
| `usage` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `model` | any | Model information |
|
||||
| `vector_type` | any | Vector type |
|
||||
| `usage` | any | Usage statistics |
|
||||
|
||||
### `pinecone_upsert_text`
|
||||
|
||||
@@ -87,9 +89,14 @@ Insert or update text records in a Pinecone index
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `statusText` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `model` | any | Model information |
|
||||
| `vector_type` | any | Vector type |
|
||||
| `usage` | any | Usage statistics |
|
||||
|
||||
### `pinecone_search_text`
|
||||
|
||||
@@ -110,11 +117,14 @@ Search for similar text in a Pinecone index
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `matches` | string |
|
||||
| `score` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `model` | any | Model information |
|
||||
| `vector_type` | any | Vector type |
|
||||
| `usage` | any | Usage statistics |
|
||||
|
||||
### `pinecone_search_vector`
|
||||
|
||||
@@ -135,12 +145,14 @@ Search for similar vectors in a Pinecone index
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `matches` | string |
|
||||
| `score` | string |
|
||||
| `values` | string |
|
||||
| `metadata` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `model` | any | Model information |
|
||||
| `vector_type` | any | Vector type |
|
||||
| `usage` | any | Usage statistics |
|
||||
|
||||
### `pinecone_fetch`
|
||||
|
||||
@@ -157,38 +169,17 @@ Fetch vectors by ID from a Pinecone index
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `matches` | string |
|
||||
| `values` | string |
|
||||
| `metadata` | string |
|
||||
| `score` | string |
|
||||
| `id` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `model` | any | Model information |
|
||||
| `vector_type` | any | Vector type |
|
||||
| `usage` | any | Usage statistics |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `matches` | any | matches output from the block |
|
||||
| `upsertedCount` | any | upsertedCount output from the block |
|
||||
| `data` | any | data output from the block |
|
||||
| `model` | any | model output from the block |
|
||||
| `vector_type` | any | vector_type output from the block |
|
||||
| `usage` | any | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -124,10 +124,12 @@ Insert or update points in a Qdrant collection
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `status` | string |
|
||||
| `data` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `status` | any | Operation status |
|
||||
|
||||
### `qdrant_search_vector`
|
||||
|
||||
@@ -148,10 +150,12 @@ Search for similar vectors in a Qdrant collection
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| `status` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `status` | any | Operation status |
|
||||
|
||||
### `qdrant_fetch_points`
|
||||
|
||||
@@ -170,33 +174,15 @@ Fetch points by ID from a Qdrant collection
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | string |
|
||||
| `status` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | any | Search matches |
|
||||
| `upsertedCount` | any | Upserted count |
|
||||
| `data` | any | Response data |
|
||||
| `status` | any | Operation status |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `matches` | any | matches output from the block |
|
||||
| `upsertedCount` | any | upsertedCount output from the block |
|
||||
| `data` | any | data output from the block |
|
||||
| `status` | any | status output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -61,10 +61,12 @@ Fetch posts from a subreddit with different sorting options
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `subreddit` | string |
|
||||
| `posts` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `subreddit` | string | Subreddit name |
|
||||
| `posts` | json | Posts data |
|
||||
| `post` | json | Single post data |
|
||||
| `comments` | json | Comments data |
|
||||
|
||||
### `reddit_get_comments`
|
||||
|
||||
@@ -82,38 +84,15 @@ Fetch comments from a specific Reddit post
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `post` | string |
|
||||
| `title` | string |
|
||||
| `author` | string |
|
||||
| `selftext` | string |
|
||||
| `created_utc` | string |
|
||||
| `score` | string |
|
||||
| `permalink` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `subreddit` | string | Subreddit name |
|
||||
| `posts` | json | Posts data |
|
||||
| `post` | json | Single post data |
|
||||
| `comments` | json | Comments data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `subreddit` | string | subreddit output from the block |
|
||||
| `posts` | json | posts output from the block |
|
||||
| `post` | json | post output from the block |
|
||||
| `comments` | json | comments output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -78,38 +78,17 @@ Retrieve an object from an AWS S3 bucket
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessKeyId` | string | Yes | Your AWS Access Key ID |
|
||||
| `secretAccessKey` | string | Yes | Your AWS Secret Access Key |
|
||||
| `s3Uri` | string | Yes | S3 Object URL \(e.g., https://bucket-name.s3.region.amazonaws.com/path/to/file\) |
|
||||
| `s3Uri` | string | Yes | S3 Object URL |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `metadata` | string |
|
||||
| `size` | string |
|
||||
| `name` | string |
|
||||
| `lastModified` | string |
|
||||
| `url` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `url` | string | Presigned URL |
|
||||
| `metadata` | json | Object metadata |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessKeyId` | string | Yes | Access Key ID - Enter your AWS Access Key ID |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `url` | string | url output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -36,21 +36,6 @@ Configure automated workflow execution with flexible timing options. Set up recu
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `scheduleConfig` | schedule-config | Yes | Schedule Status |
|
||||
| `scheduleType` | dropdown | Yes | Frequency |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
This block does not produce any outputs.
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `triggers`
|
||||
|
||||
@@ -101,29 +101,12 @@ A powerful web search tool that provides access to Google search results through
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `searchResults` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `searchResults` | json | Search results data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | Search Query - Enter your search query... |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `searchResults` | json | searchResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -86,10 +86,13 @@ Send messages to Slack channels or users through the Slack API. Supports Slack m
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ts` | string |
|
||||
| `channel` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Message timestamp |
|
||||
| `channel` | string | Channel identifier |
|
||||
| `canvas_id` | string | Canvas identifier |
|
||||
| `title` | string | Canvas title |
|
||||
| `messages` | json | Message data |
|
||||
|
||||
### `slack_canvas`
|
||||
|
||||
@@ -109,11 +112,13 @@ Create and share Slack canvases in channels. Canvases are collaborative document
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `canvas_id` | string |
|
||||
| `channel` | string |
|
||||
| `title` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Message timestamp |
|
||||
| `channel` | string | Channel identifier |
|
||||
| `canvas_id` | string | Canvas identifier |
|
||||
| `title` | string | Canvas title |
|
||||
| `messages` | json | Message data |
|
||||
|
||||
### `slack_message_reader`
|
||||
|
||||
@@ -133,33 +138,16 @@ Read the latest messages from Slack channels. Retrieve conversation history with
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `messages` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Message timestamp |
|
||||
| `channel` | string | Channel identifier |
|
||||
| `canvas_id` | string | Canvas identifier |
|
||||
| `title` | string | Canvas title |
|
||||
| `messages` | json | Message data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `ts` | string | ts output from the block |
|
||||
| `channel` | string | channel output from the block |
|
||||
| `canvas_id` | string | canvas_id output from the block |
|
||||
| `title` | string | title output from the block |
|
||||
| `messages` | json | messages output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -212,29 +212,12 @@ Extract structured data from a webpage using Stagehand
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | json | Extracted data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `url` | string | Yes | URL - Enter the URL of the website to extract data from |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `data` | json | data output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -217,33 +217,13 @@ Run an autonomous web agent to complete tasks and extract structured data
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `agentResult` | string |
|
||||
| `completed` | string |
|
||||
| `message` | string |
|
||||
| `actions` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `agentResult` | json | Agent execution result |
|
||||
| `structuredOutput` | any | Structured output data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `startUrl` | string | Yes | Starting URL - Enter the starting URL for the agent |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `agentResult` | json | agentResult output from the block |
|
||||
| `structuredOutput` | any | structuredOutput output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -99,10 +99,10 @@ Query data from a Supabase table
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation message |
|
||||
| `results` | json | Query results |
|
||||
|
||||
### `supabase_insert`
|
||||
|
||||
@@ -119,10 +119,10 @@ Insert data into a Supabase table
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation message |
|
||||
| `results` | json | Query results |
|
||||
|
||||
### `supabase_get_row`
|
||||
|
||||
@@ -139,10 +139,10 @@ Get a single row from a Supabase table based on filter criteria
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| `results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation message |
|
||||
| `results` | json | Query results |
|
||||
|
||||
### `supabase_update`
|
||||
|
||||
@@ -160,9 +160,10 @@ Update rows in a Supabase table based on filter criteria
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation message |
|
||||
| `results` | json | Query results |
|
||||
|
||||
### `supabase_delete`
|
||||
|
||||
@@ -179,30 +180,13 @@ Delete rows from a Supabase table based on filter criteria
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `message` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation message |
|
||||
| `results` | json | Query results |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `message` | string | message output from the block |
|
||||
| `results` | json | results output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -78,13 +78,14 @@ Perform AI-powered web searches using Tavily
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `query` | string |
|
||||
| `results` | string |
|
||||
| `url` | string |
|
||||
| `snippet` | string |
|
||||
| `raw_content` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results data |
|
||||
| `answer` | any | Search answer |
|
||||
| `query` | string | Query used |
|
||||
| `content` | string | Extracted content |
|
||||
| `title` | string | Page title |
|
||||
| `url` | string | Source URL |
|
||||
|
||||
### `tavily_extract`
|
||||
|
||||
@@ -100,35 +101,17 @@ Extract raw content from multiple web pages simultaneously using Tavily
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `results` | string |
|
||||
| `failed_results` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | json | Search results data |
|
||||
| `answer` | any | Search answer |
|
||||
| `query` | string | Query used |
|
||||
| `content` | string | Extracted content |
|
||||
| `title` | string | Page title |
|
||||
| `url` | string | Source URL |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `results` | json | results output from the block |
|
||||
| `answer` | any | answer output from the block |
|
||||
| `query` | string | query output from the block |
|
||||
| `content` | string | content output from the block |
|
||||
| `title` | string | title output from the block |
|
||||
| `url` | string | url output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -87,31 +87,13 @@ Send messages to Telegram channels or users through the Telegram Bot API. Enable
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `ok` | string |
|
||||
| `date` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ok` | boolean | Success status |
|
||||
| `result` | json | Message result |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `botToken` | string | Yes | Bot Token - Enter your Telegram Bot Token |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `ok` | boolean | ok output from the block |
|
||||
| `result` | json | result output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -67,29 +67,12 @@ Processes a provided thought/instruction, making it available for subsequent ste
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `acknowledgedThought` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `acknowledgedThought` | string | Acknowledged thought process |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `thought` | string | Yes | Thought Process / Instruction - Describe the step-by-step thinking process here... |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `acknowledgedThought` | string | acknowledgedThought output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -63,7 +63,11 @@ Convert text between languages while preserving meaning, nuance, and formatting.
|
||||
|
||||
#### Output
|
||||
|
||||
This tool does not produce any outputs.
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Translated text |
|
||||
| `model` | string | Model used |
|
||||
| `tokens` | any | Token usage |
|
||||
|
||||
### `anthropic_chat`
|
||||
|
||||
@@ -77,29 +81,14 @@ This tool does not produce any outputs.
|
||||
|
||||
#### Output
|
||||
|
||||
This tool does not produce any outputs.
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Translated text |
|
||||
| `model` | string | Model used |
|
||||
| `tokens` | any | Token usage |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `context` | string | Yes | Text to Translate - Enter the text you want to translate |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `tokens` | any | tokens output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -56,34 +56,15 @@ Send text messages to single or multiple recipients using the Twilio API.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `success` | string |
|
||||
| `messageId` | string |
|
||||
| `status` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `success` | boolean | Send success status |
|
||||
| `messageId` | any | Message identifier |
|
||||
| `status` | any | Delivery status |
|
||||
| `error` | any | Error information |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `phoneNumbers` | string | Yes | Recipient Phone Numbers - Enter phone numbers with country code \(one per line, e.g., +1234567890\) |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `messageId` | any | messageId output from the block |
|
||||
| `status` | any | status output from the block |
|
||||
| `error` | any | error output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -70,14 +70,11 @@ Retrieve form responses from Typeform
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `total_items` | string |
|
||||
| `answers` | string |
|
||||
| `type` | string |
|
||||
| `hidden` | string |
|
||||
| `calculated` | string |
|
||||
| `variables` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `total_items` | number | Total response count |
|
||||
| `page_count` | number | Total page count |
|
||||
| `items` | json | Response items |
|
||||
|
||||
### `typeform_files`
|
||||
|
||||
@@ -96,11 +93,11 @@ Download files uploaded in Typeform responses
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `fileUrl` | string |
|
||||
| `contentType` | string |
|
||||
| `filename` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `total_items` | number | Total response count |
|
||||
| `page_count` | number | Total page count |
|
||||
| `items` | json | Response items |
|
||||
|
||||
### `typeform_insights`
|
||||
|
||||
@@ -115,29 +112,14 @@ Retrieve insights and analytics for Typeform forms
|
||||
|
||||
#### Output
|
||||
|
||||
This tool does not produce any outputs.
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `total_items` | number | Total response count |
|
||||
| `page_count` | number | Total page count |
|
||||
| `items` | json | Response items |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `total_items` | number | total_items output from the block |
|
||||
| `page_count` | number | page_count output from the block |
|
||||
| `items` | json | items output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -68,33 +68,14 @@ Process and analyze images using advanced vision models. Capable of understandin
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `model` | string |
|
||||
| `tokens` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | Analysis result |
|
||||
| `model` | any | Model used |
|
||||
| `tokens` | any | Token usage |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | any | model output from the block |
|
||||
| `tokens` | any | tokens output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -61,12 +61,16 @@ Read content from a Wealthbox note
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `note` | string |
|
||||
| `metadata` | string |
|
||||
| `noteId` | string |
|
||||
| `itemType` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `note` | any | Note data |
|
||||
| `notes` | any | Notes list |
|
||||
| `contact` | any | Contact data |
|
||||
| `contacts` | any | Contacts list |
|
||||
| `task` | any | Task data |
|
||||
| `tasks` | any | Tasks list |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `success` | any | Success status |
|
||||
|
||||
### `wealthbox_write_note`
|
||||
|
||||
@@ -82,9 +86,16 @@ Create or update a Wealthbox note
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `note` | any | Note data |
|
||||
| `notes` | any | Notes list |
|
||||
| `contact` | any | Contact data |
|
||||
| `contacts` | any | Contacts list |
|
||||
| `task` | any | Task data |
|
||||
| `tasks` | any | Tasks list |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `success` | any | Success status |
|
||||
|
||||
### `wealthbox_read_contact`
|
||||
|
||||
@@ -99,12 +110,16 @@ Read content from a Wealthbox contact
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `contact` | string |
|
||||
| `metadata` | string |
|
||||
| `contactId` | string |
|
||||
| `itemType` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `note` | any | Note data |
|
||||
| `notes` | any | Notes list |
|
||||
| `contact` | any | Contact data |
|
||||
| `contacts` | any | Contacts list |
|
||||
| `task` | any | Task data |
|
||||
| `tasks` | any | Tasks list |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `success` | any | Success status |
|
||||
|
||||
### `wealthbox_write_contact`
|
||||
|
||||
@@ -122,11 +137,16 @@ Create a new Wealthbox contact
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `contact` | string |
|
||||
| `metadata` | string |
|
||||
| `itemType` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `note` | any | Note data |
|
||||
| `notes` | any | Notes list |
|
||||
| `contact` | any | Contact data |
|
||||
| `contacts` | any | Contacts list |
|
||||
| `task` | any | Task data |
|
||||
| `tasks` | any | Tasks list |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `success` | any | Success status |
|
||||
|
||||
### `wealthbox_read_task`
|
||||
|
||||
@@ -141,12 +161,16 @@ Read content from a Wealthbox task
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `task` | string |
|
||||
| `metadata` | string |
|
||||
| `taskId` | string |
|
||||
| `itemType` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `note` | any | Note data |
|
||||
| `notes` | any | Notes list |
|
||||
| `contact` | any | Contact data |
|
||||
| `contacts` | any | Contacts list |
|
||||
| `task` | any | Task data |
|
||||
| `tasks` | any | Tasks list |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `success` | any | Success status |
|
||||
|
||||
### `wealthbox_write_task`
|
||||
|
||||
@@ -164,36 +188,19 @@ Create or update a Wealthbox task
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `data` | json |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `note` | any | Note data |
|
||||
| `notes` | any | Notes list |
|
||||
| `contact` | any | Contact data |
|
||||
| `contacts` | any | Contacts list |
|
||||
| `task` | any | Task data |
|
||||
| `tasks` | any | Tasks list |
|
||||
| `metadata` | json | Operation metadata |
|
||||
| `success` | any | Success status |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `note` | any | note output from the block |
|
||||
| `notes` | any | notes output from the block |
|
||||
| `contact` | any | contact output from the block |
|
||||
| `contacts` | any | contacts output from the block |
|
||||
| `task` | any | task output from the block |
|
||||
| `tasks` | any | tasks output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `success` | any | success output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -26,20 +26,6 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `webhookProvider` | dropdown | Yes | Webhook Provider |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
This block does not produce any outputs.
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `triggers`
|
||||
|
||||
@@ -58,32 +58,14 @@ Send WhatsApp messages
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `success` | string |
|
||||
| `messageId` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `success` | boolean | Send success status |
|
||||
| `messageId` | any | Message identifier |
|
||||
| `error` | any | Error information |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `phoneNumber` | string | Yes | Recipient Phone Number - Enter phone number with country code \(e.g., +1234567890\) |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `messageId` | any | messageId output from the block |
|
||||
| `error` | any | error output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -72,20 +72,13 @@ Get a summary and metadata for a specific Wikipedia page.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `summary` | string |
|
||||
| `title` | string |
|
||||
| `displaytitle` | string |
|
||||
| `description` | string |
|
||||
| `extract` | string |
|
||||
| `extract_html` | string |
|
||||
| `thumbnail` | string |
|
||||
| `originalimage` | string |
|
||||
| `content_urls` | string |
|
||||
| `revisions` | string |
|
||||
| `edit` | string |
|
||||
| `talk` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `summary` | json | Page summary data |
|
||||
| `searchResults` | json | Search results data |
|
||||
| `totalHits` | number | Total search hits |
|
||||
| `content` | json | Page content data |
|
||||
| `randomPage` | json | Random page data |
|
||||
|
||||
### `wikipedia_search`
|
||||
|
||||
@@ -100,11 +93,13 @@ Search for Wikipedia pages by title or content.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `totalHits` | string |
|
||||
| `query` | string |
|
||||
| `searchResults` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `summary` | json | Page summary data |
|
||||
| `searchResults` | json | Search results data |
|
||||
| `totalHits` | number | Total search hits |
|
||||
| `content` | json | Page content data |
|
||||
| `randomPage` | json | Random page data |
|
||||
|
||||
### `wikipedia_content`
|
||||
|
||||
@@ -118,16 +113,13 @@ Get the full HTML content of a Wikipedia page.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `pageid` | string |
|
||||
| `html` | string |
|
||||
| `revision` | string |
|
||||
| `tid` | string |
|
||||
| `timestamp` | string |
|
||||
| `content_model` | string |
|
||||
| `content_format` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `summary` | json | Page summary data |
|
||||
| `searchResults` | json | Search results data |
|
||||
| `totalHits` | number | Total search hits |
|
||||
| `content` | json | Page content data |
|
||||
| `randomPage` | json | Random page data |
|
||||
|
||||
### `wikipedia_random`
|
||||
|
||||
@@ -140,39 +132,16 @@ Get a random Wikipedia page.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `randomPage` | string |
|
||||
| `title` | string |
|
||||
| `displaytitle` | string |
|
||||
| `description` | string |
|
||||
| `extract` | string |
|
||||
| `thumbnail` | string |
|
||||
| `content_urls` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `summary` | json | Page summary data |
|
||||
| `searchResults` | json | Search results data |
|
||||
| `totalHits` | number | Total search hits |
|
||||
| `content` | json | Page content data |
|
||||
| `randomPage` | json | Random page data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `summary` | json | summary output from the block |
|
||||
| `searchResults` | json | searchResults output from the block |
|
||||
| `totalHits` | number | totalHits output from the block |
|
||||
| `content` | json | content output from the block |
|
||||
| `randomPage` | json | randomPage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -58,16 +58,16 @@ Post new tweets, reply to tweets, or create polls on X (Twitter)
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `tweet` | string |
|
||||
| `text` | string |
|
||||
| `createdAt` | string |
|
||||
| `authorId` | string |
|
||||
| `conversationId` | string |
|
||||
| `inReplyToUserId` | string |
|
||||
| `attachments` | string |
|
||||
| `pollId` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `tweet` | json | Tweet data |
|
||||
| `replies` | any | Tweet replies |
|
||||
| `context` | any | Tweet context |
|
||||
| `tweets` | json | Tweets data |
|
||||
| `includes` | any | Additional data |
|
||||
| `meta` | json | Response metadata |
|
||||
| `user` | json | User profile data |
|
||||
| `recentTweets` | any | Recent tweets data |
|
||||
|
||||
### `x_read`
|
||||
|
||||
@@ -83,10 +83,16 @@ Read tweet details, including replies and conversation context
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `tweet` | string |
|
||||
| `context` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `tweet` | json | Tweet data |
|
||||
| `replies` | any | Tweet replies |
|
||||
| `context` | any | Tweet context |
|
||||
| `tweets` | json | Tweets data |
|
||||
| `includes` | any | Additional data |
|
||||
| `meta` | json | Response metadata |
|
||||
| `user` | json | User profile data |
|
||||
| `recentTweets` | any | Recent tweets data |
|
||||
|
||||
### `x_search`
|
||||
|
||||
@@ -105,12 +111,16 @@ Search for tweets using keywords, hashtags, or advanced queries
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `tweets` | string |
|
||||
| `includes` | string |
|
||||
| `media` | string |
|
||||
| `polls` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `tweet` | json | Tweet data |
|
||||
| `replies` | any | Tweet replies |
|
||||
| `context` | any | Tweet context |
|
||||
| `tweets` | json | Tweets data |
|
||||
| `includes` | any | Additional data |
|
||||
| `meta` | json | Response metadata |
|
||||
| `user` | json | User profile data |
|
||||
| `recentTweets` | any | Recent tweets data |
|
||||
|
||||
### `x_user`
|
||||
|
||||
@@ -125,36 +135,19 @@ Get user profile information
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `user` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `tweet` | json | Tweet data |
|
||||
| `replies` | any | Tweet replies |
|
||||
| `context` | any | Tweet context |
|
||||
| `tweets` | json | Tweets data |
|
||||
| `includes` | any | Additional data |
|
||||
| `meta` | json | Response metadata |
|
||||
| `user` | json | User profile data |
|
||||
| `recentTweets` | any | Recent tweets data |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `tweet` | json | tweet output from the block |
|
||||
| `replies` | any | replies output from the block |
|
||||
| `context` | any | context output from the block |
|
||||
| `tweets` | json | tweets output from the block |
|
||||
| `includes` | any | includes output from the block |
|
||||
| `meta` | json | meta output from the block |
|
||||
| `user` | json | user output from the block |
|
||||
| `recentTweets` | any | recentTweets output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -60,32 +60,13 @@ Search for videos on YouTube using the YouTube Data API.
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `totalResults` | string |
|
||||
| `nextPageToken` | string |
|
||||
| `items` | string |
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `items` | json | The items returned by the YouTube search |
|
||||
| `totalResults` | number | The total number of results returned by the YouTube search |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | YouTube API Key - Enter YouTube API Key |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `items` | json | items output from the block |
|
||||
| `totalResults` | number | totalResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
|
||||
@@ -33,30 +33,54 @@ properties:
|
||||
enum: [GET, POST, PUT, DELETE, PATCH]
|
||||
description: HTTP method for the request
|
||||
default: GET
|
||||
queryParams:
|
||||
params:
|
||||
type: array
|
||||
description: Query parameters as key-value pairs
|
||||
description: Query parameters as table entries
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- cells
|
||||
properties:
|
||||
key:
|
||||
id:
|
||||
type: string
|
||||
description: Parameter name
|
||||
value:
|
||||
type: string
|
||||
description: Parameter value
|
||||
description: Unique identifier for the parameter entry
|
||||
cells:
|
||||
type: object
|
||||
required:
|
||||
- Key
|
||||
- Value
|
||||
properties:
|
||||
Key:
|
||||
type: string
|
||||
description: Parameter name
|
||||
Value:
|
||||
type: string
|
||||
description: Parameter value
|
||||
headers:
|
||||
type: array
|
||||
description: HTTP headers as key-value pairs
|
||||
description: HTTP headers as table entries
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- cells
|
||||
properties:
|
||||
key:
|
||||
id:
|
||||
type: string
|
||||
description: Header name
|
||||
value:
|
||||
type: string
|
||||
description: Header value
|
||||
description: Unique identifier for the header entry
|
||||
cells:
|
||||
type: object
|
||||
required:
|
||||
- Key
|
||||
- Value
|
||||
properties:
|
||||
Key:
|
||||
type: string
|
||||
description: Header name
|
||||
Value:
|
||||
type: string
|
||||
description: Header value
|
||||
body:
|
||||
type: string
|
||||
description: Request body for POST/PUT/PATCH methods
|
||||
@@ -99,15 +123,21 @@ user-api:
|
||||
url: "https://api.example.com/users/123"
|
||||
method: GET
|
||||
headers:
|
||||
- key: "Authorization"
|
||||
value: "Bearer {{API_TOKEN}}"
|
||||
- key: "Content-Type"
|
||||
value: "application/json"
|
||||
- id: header-1-uuid-here
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{API_TOKEN}}"
|
||||
- id: header-2-uuid-here
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
connections:
|
||||
success: process-user-data
|
||||
error: handle-api-error
|
||||
```
|
||||
|
||||
|
||||
|
||||
### POST Request with Body
|
||||
|
||||
```yaml
|
||||
@@ -118,10 +148,14 @@ create-ticket:
|
||||
url: "https://api.support.com/tickets"
|
||||
method: POST
|
||||
headers:
|
||||
- key: "Authorization"
|
||||
value: "Bearer {{SUPPORT_API_KEY}}"
|
||||
- key: "Content-Type"
|
||||
value: "application/json"
|
||||
- id: auth-header-uuid
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{SUPPORT_API_KEY}}"
|
||||
- id: content-type-uuid
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
body: |
|
||||
{
|
||||
"title": "<agent.title>",
|
||||
@@ -142,32 +176,249 @@ search-api:
|
||||
inputs:
|
||||
url: "https://api.store.com/products"
|
||||
method: GET
|
||||
queryParams:
|
||||
- key: "q"
|
||||
value: <start.searchTerm>
|
||||
- key: "limit"
|
||||
value: "10"
|
||||
- key: "category"
|
||||
value: <filter.category>
|
||||
params:
|
||||
- id: search-param-uuid
|
||||
cells:
|
||||
Key: "q"
|
||||
Value: <start.searchTerm>
|
||||
- id: limit-param-uuid
|
||||
cells:
|
||||
Key: "limit"
|
||||
Value: "10"
|
||||
- id: category-param-uuid
|
||||
cells:
|
||||
Key: "category"
|
||||
Value: <filter.category>
|
||||
headers:
|
||||
- key: "Authorization"
|
||||
value: "Bearer {{STORE_API_KEY}}"
|
||||
- id: auth-header-uuid
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{STORE_API_KEY}}"
|
||||
connections:
|
||||
success: display-results
|
||||
```
|
||||
|
||||
## Output References
|
||||
## Parameter Format
|
||||
|
||||
After an API block executes, you can reference its outputs:
|
||||
Headers and params (query parameters) use the table format with the following structure:
|
||||
|
||||
```yaml
|
||||
# In subsequent blocks
|
||||
next-block:
|
||||
headers:
|
||||
- id: unique-identifier-here
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
- id: another-unique-identifier
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{API_TOKEN}}"
|
||||
|
||||
params:
|
||||
- id: param-identifier-here
|
||||
cells:
|
||||
Key: "limit"
|
||||
Value: "10"
|
||||
```
|
||||
|
||||
**Structure Details:**
|
||||
- `id`: Unique identifier for tracking the table row
|
||||
- `cells.Key`: The parameter/header name
|
||||
- `cells.Value`: The parameter/header value
|
||||
- This format allows for proper table management and UI state preservation
|
||||
|
||||
## Output References
|
||||
|
||||
After an API block executes, you can reference its outputs in subsequent blocks. The API block provides three main outputs:
|
||||
|
||||
### Available Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `data` | any | The response body/payload from the API |
|
||||
| `status` | number | HTTP status code (200, 404, 500, etc.) |
|
||||
| `headers` | object | Response headers returned by the server |
|
||||
|
||||
### Usage Examples
|
||||
|
||||
```yaml
|
||||
# Reference API response data
|
||||
process-data:
|
||||
type: function
|
||||
name: "Process API Data"
|
||||
inputs:
|
||||
data: <api-block-name.output> # Response data
|
||||
status: <api-block-name.status> # HTTP status code
|
||||
headers: <api-block-name.headers> # Response headers
|
||||
error: <api-block-name.error> # Error details (if any)
|
||||
code: |
|
||||
const responseData = <fetchuserdata.data>;
|
||||
const statusCode = <fetchuserdata.status>;
|
||||
const responseHeaders = <fetchuserdata.headers>;
|
||||
|
||||
if (statusCode === 200) {
|
||||
return {
|
||||
success: true,
|
||||
user: responseData,
|
||||
contentType: responseHeaders['content-type']
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
success: false,
|
||||
error: `API call failed with status ${statusCode}`
|
||||
};
|
||||
}
|
||||
|
||||
# Use API data in an agent block
|
||||
analyze-response:
|
||||
type: agent
|
||||
name: "Analyze Response"
|
||||
inputs:
|
||||
userPrompt: |
|
||||
Analyze this API response:
|
||||
|
||||
Status: <fetchuserdata.status>
|
||||
Data: <fetchuserdata.data>
|
||||
|
||||
Provide insights about the response.
|
||||
|
||||
# Conditional logic based on status
|
||||
check-status:
|
||||
type: condition
|
||||
name: "Check API Status"
|
||||
inputs:
|
||||
condition: <fetchuserdata.status> === 200
|
||||
connections:
|
||||
true: success-handler
|
||||
false: error-handler
|
||||
```
|
||||
|
||||
### Practical Example
|
||||
|
||||
```yaml
|
||||
user-api:
|
||||
type: api
|
||||
name: "Fetch User Data"
|
||||
inputs:
|
||||
url: "https://api.example.com/users/123"
|
||||
method: GET
|
||||
connections:
|
||||
success: process-response
|
||||
|
||||
process-response:
|
||||
type: function
|
||||
name: "Process Response"
|
||||
inputs:
|
||||
code: |
|
||||
const user = <fetchuserdata.data>;
|
||||
const status = <fetchuserdata.status>;
|
||||
|
||||
console.log(`API returned status: ${status}`);
|
||||
console.log(`User data:`, user);
|
||||
|
||||
return {
|
||||
userId: user.id,
|
||||
email: user.email,
|
||||
isActive: status === 200
|
||||
};
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```yaml
|
||||
api-with-error-handling:
|
||||
type: api
|
||||
name: "API Call"
|
||||
inputs:
|
||||
url: "https://api.example.com/data"
|
||||
method: GET
|
||||
connections:
|
||||
success: check-response
|
||||
error: handle-error
|
||||
|
||||
check-response:
|
||||
type: condition
|
||||
name: "Check Response Status"
|
||||
inputs:
|
||||
condition: <apicall.status> >= 200 && <apicall.status> < 300
|
||||
connections:
|
||||
true: process-success
|
||||
false: handle-api-error
|
||||
|
||||
process-success:
|
||||
type: function
|
||||
name: "Process Success"
|
||||
inputs:
|
||||
code: |
|
||||
return {
|
||||
success: true,
|
||||
data: <apicall.data>,
|
||||
message: "API call successful"
|
||||
};
|
||||
|
||||
handle-api-error:
|
||||
type: function
|
||||
name: "Handle API Error"
|
||||
inputs:
|
||||
code: |
|
||||
return {
|
||||
success: false,
|
||||
status: <apicall.status>,
|
||||
error: "API call failed",
|
||||
data: <apicall.data>
|
||||
};
|
||||
```
|
||||
|
||||
## YAML String Escaping
|
||||
|
||||
When writing YAML, certain strings must be quoted to be properly parsed:
|
||||
|
||||
### Strings That Must Be Quoted
|
||||
|
||||
```yaml
|
||||
# URLs with hyphens, colons, special characters
|
||||
url: "https://api.example.com/users/123"
|
||||
url: "https://my-api.example.com/data"
|
||||
|
||||
# Header values with hyphens or special characters
|
||||
headers:
|
||||
- id: header-uuid
|
||||
cells:
|
||||
Key: "User-Agent"
|
||||
Value: "My-Application/1.0"
|
||||
- id: auth-uuid
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer my-token-123"
|
||||
|
||||
# Parameter values with hyphens
|
||||
params:
|
||||
- id: param-uuid
|
||||
cells:
|
||||
Key: "sort-by"
|
||||
Value: "created-at"
|
||||
```
|
||||
|
||||
### When to Use Quotes
|
||||
|
||||
- ✅ **Always quote**: URLs, tokens, values with hyphens, colons, or special characters
|
||||
- ✅ **Always quote**: Values that start with numbers but should be strings
|
||||
- ✅ **Always quote**: Boolean-looking strings that should remain as strings
|
||||
- ❌ **Don't quote**: Simple alphanumeric strings without special characters
|
||||
|
||||
### Examples
|
||||
|
||||
```yaml
|
||||
# ✅ Correct
|
||||
url: "https://api.stripe.com/v1/charges"
|
||||
headers:
|
||||
- id: auth-header
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer sk-test-123456789"
|
||||
|
||||
# ❌ Incorrect (may cause parsing errors)
|
||||
url: https://api.stripe.com/v1/charges
|
||||
headers:
|
||||
- id: auth-header
|
||||
cells:
|
||||
Key: Authorization
|
||||
Value: Bearer sk-test-123456789
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
@@ -176,4 +427,5 @@ next-block:
|
||||
- Include error handling with error connections
|
||||
- Set appropriate timeouts for your use case
|
||||
- Validate response status codes in subsequent blocks
|
||||
- Use meaningful block names for easier reference
|
||||
- Use meaningful block names for easier reference
|
||||
- **Always quote strings with special characters, URLs, and tokens**
|
||||
@@ -59,9 +59,6 @@ properties:
|
||||
end:
|
||||
type: string
|
||||
description: Target block ID for loop completion (optional)
|
||||
success:
|
||||
type: string
|
||||
description: Target block ID after loop completion (alternative format)
|
||||
error:
|
||||
type: string
|
||||
description: Target block ID for error handling
|
||||
@@ -79,13 +76,6 @@ connections:
|
||||
error: <string> # Target block ID for error handling (optional)
|
||||
```
|
||||
|
||||
Alternative format (legacy):
|
||||
```yaml
|
||||
connections:
|
||||
success: <string> # Target block ID after loop completion
|
||||
error: <string> # Target block ID for error handling (optional)
|
||||
```
|
||||
|
||||
## Child Block Configuration
|
||||
|
||||
Blocks inside a loop must have their `parentId` set to the loop block ID:
|
||||
@@ -166,7 +156,7 @@ process-single-email:
|
||||
apiKey: '{{OPENAI_API_KEY}}'
|
||||
```
|
||||
|
||||
### Complex Loop with Multiple Child Blocks
|
||||
### Loop with Multiple Child Blocks
|
||||
|
||||
```yaml
|
||||
data-analysis-loop:
|
||||
|
||||
@@ -59,9 +59,6 @@ properties:
|
||||
end:
|
||||
type: string
|
||||
description: Target block ID after all parallel instances complete (optional)
|
||||
success:
|
||||
type: string
|
||||
description: Target block ID after all instances complete (alternative format)
|
||||
error:
|
||||
type: string
|
||||
description: Target block ID for error handling
|
||||
@@ -79,13 +76,6 @@ connections:
|
||||
error: <string> # Target block ID for error handling (optional)
|
||||
```
|
||||
|
||||
Alternative format (legacy):
|
||||
```yaml
|
||||
connections:
|
||||
success: <string> # Target block ID after all instances complete
|
||||
error: <string> # Target block ID for error handling (optional)
|
||||
```
|
||||
|
||||
## Child Block Configuration
|
||||
|
||||
Blocks inside a parallel block must have their `parentId` set to the parallel block ID:
|
||||
|
||||
@@ -40,16 +40,29 @@ properties:
|
||||
maximum: 599
|
||||
headers:
|
||||
type: array
|
||||
description: Response headers as key-value pairs
|
||||
description: Response headers as table entries
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier for the header entry
|
||||
key:
|
||||
type: string
|
||||
description: Header name
|
||||
value:
|
||||
type: string
|
||||
description: Header value
|
||||
cells:
|
||||
type: object
|
||||
description: Cell display values for the table interface
|
||||
properties:
|
||||
Key:
|
||||
type: string
|
||||
description: Display value for the key column
|
||||
Value:
|
||||
type: string
|
||||
description: Display value for the value column
|
||||
```
|
||||
|
||||
## Connection Configuration
|
||||
@@ -97,6 +110,40 @@ success-response:
|
||||
value: "workflow-engine"
|
||||
```
|
||||
|
||||
### Response with Complete Table Header Format
|
||||
|
||||
When headers are created through the UI table interface, the YAML includes additional metadata:
|
||||
|
||||
```yaml
|
||||
api-response:
|
||||
type: response
|
||||
name: "API Response"
|
||||
inputs:
|
||||
data:
|
||||
message: "Request processed successfully"
|
||||
id: <agent.request_id>
|
||||
status: 200
|
||||
headers:
|
||||
- id: header-1-uuid-here
|
||||
key: "Content-Type"
|
||||
value: "application/json"
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
- id: header-2-uuid-here
|
||||
key: "Cache-Control"
|
||||
value: "no-cache"
|
||||
cells:
|
||||
Key: "Cache-Control"
|
||||
Value: "no-cache"
|
||||
- id: header-3-uuid-here
|
||||
key: "X-API-Version"
|
||||
value: "2.1"
|
||||
cells:
|
||||
Key: "X-API-Version"
|
||||
Value: "2.1"
|
||||
```
|
||||
|
||||
### Error Response
|
||||
|
||||
```yaml
|
||||
@@ -137,4 +184,55 @@ paginated-response:
|
||||
value: "public, max-age=300"
|
||||
- key: "Content-Type"
|
||||
value: "application/json"
|
||||
```
|
||||
|
||||
## Table Parameter Formats
|
||||
|
||||
The Response block supports two formats for headers:
|
||||
|
||||
### Simplified Format (Manual YAML)
|
||||
|
||||
When writing YAML manually, you can use the simplified format:
|
||||
|
||||
```yaml
|
||||
headers:
|
||||
- key: "Content-Type"
|
||||
value: "application/json"
|
||||
- key: "Cache-Control"
|
||||
value: "no-cache"
|
||||
```
|
||||
|
||||
### Complete Table Format (UI Generated)
|
||||
|
||||
When headers are created through the UI table interface, the YAML includes additional metadata:
|
||||
|
||||
```yaml
|
||||
headers:
|
||||
- id: unique-identifier-here
|
||||
key: "Content-Type"
|
||||
value: "application/json"
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
```
|
||||
|
||||
**Key Differences:**
|
||||
- `id`: Unique identifier for tracking the table row
|
||||
- `cells`: Display values used by the UI table interface
|
||||
- Both formats are functionally equivalent for workflow execution
|
||||
- The complete format preserves UI state when importing/exporting workflows
|
||||
|
||||
**Important:** Always quote header names and values that contain special characters:
|
||||
|
||||
```yaml
|
||||
headers:
|
||||
- id: content-type-uuid
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
- id: cache-control-uuid
|
||||
cells:
|
||||
Key: "Cache-Control"
|
||||
Value: "no-cache"
|
||||
```
|
||||
```
|
||||
@@ -34,16 +34,29 @@ properties:
|
||||
description: Secret key for webhook verification
|
||||
headers:
|
||||
type: array
|
||||
description: Expected headers for validation
|
||||
description: Expected headers for validation as table entries
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier for the header entry
|
||||
key:
|
||||
type: string
|
||||
description: Header name
|
||||
value:
|
||||
type: string
|
||||
description: Expected header value
|
||||
cells:
|
||||
type: object
|
||||
description: Cell display values for the table interface
|
||||
properties:
|
||||
Key:
|
||||
type: string
|
||||
description: Display value for the key column
|
||||
Value:
|
||||
type: string
|
||||
description: Display value for the value column
|
||||
methods:
|
||||
type: array
|
||||
description: Allowed HTTP methods
|
||||
@@ -63,16 +76,29 @@ properties:
|
||||
maximum: 599
|
||||
headers:
|
||||
type: array
|
||||
description: Response headers
|
||||
description: Response headers as table entries
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier for the header entry
|
||||
key:
|
||||
type: string
|
||||
description: Header name
|
||||
value:
|
||||
type: string
|
||||
description: Header value
|
||||
cells:
|
||||
type: object
|
||||
description: Cell display values for the table interface
|
||||
properties:
|
||||
Key:
|
||||
type: string
|
||||
description: Display value for the key column
|
||||
Value:
|
||||
type: string
|
||||
description: Display value for the value column
|
||||
body:
|
||||
type: string
|
||||
description: Response body content
|
||||
@@ -180,6 +206,55 @@ stripe-webhook:
|
||||
error: payment-webhook-error
|
||||
```
|
||||
|
||||
### Webhook with Complete Table Header Format
|
||||
|
||||
When headers are created through the UI table interface, the YAML includes additional metadata:
|
||||
|
||||
```yaml
|
||||
api-webhook-complete:
|
||||
type: webhook
|
||||
name: "API Webhook with Table Headers"
|
||||
inputs:
|
||||
webhookConfig:
|
||||
enabled: true
|
||||
methods: [POST]
|
||||
headers:
|
||||
- id: header-1-uuid-here
|
||||
key: "Authorization"
|
||||
value: "Bearer {{WEBHOOK_API_KEY}}"
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{WEBHOOK_API_KEY}}"
|
||||
- id: header-2-uuid-here
|
||||
key: "Content-Type"
|
||||
value: "application/json"
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
responseConfig:
|
||||
status: 200
|
||||
headers:
|
||||
- id: response-header-1-uuid
|
||||
key: "Content-Type"
|
||||
value: "application/json"
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
- id: response-header-2-uuid
|
||||
key: "X-Webhook-Response"
|
||||
value: "processed"
|
||||
cells:
|
||||
Key: "X-Webhook-Response"
|
||||
Value: "processed"
|
||||
body: |
|
||||
{
|
||||
"status": "received",
|
||||
"timestamp": "{{new Date().toISOString()}}"
|
||||
}
|
||||
connections:
|
||||
success: process-webhook-complete
|
||||
```
|
||||
|
||||
### Generic API Webhook
|
||||
|
||||
```yaml
|
||||
@@ -240,6 +315,56 @@ crud-webhook:
|
||||
success: route-by-method
|
||||
```
|
||||
|
||||
## Table Parameter Formats
|
||||
|
||||
The Webhook block supports two formats for headers (both validation headers and response headers):
|
||||
|
||||
### Simplified Format (Manual YAML)
|
||||
|
||||
When writing YAML manually, you can use the simplified format:
|
||||
|
||||
```yaml
|
||||
headers:
|
||||
- key: "Authorization"
|
||||
value: "Bearer {{API_TOKEN}}"
|
||||
- key: "Content-Type"
|
||||
value: "application/json"
|
||||
```
|
||||
|
||||
### Complete Table Format (UI Generated)
|
||||
|
||||
When headers are created through the UI table interface, the YAML includes additional metadata:
|
||||
|
||||
```yaml
|
||||
headers:
|
||||
- id: unique-identifier-here
|
||||
key: "Authorization"
|
||||
value: "Bearer {{API_TOKEN}}"
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{API_TOKEN}}"
|
||||
```
|
||||
|
||||
**Key Differences:**
|
||||
- `id`: Unique identifier for tracking the table row
|
||||
- `cells`: Display values used by the UI table interface
|
||||
- Both formats are functionally equivalent for webhook processing
|
||||
- The complete format preserves UI state when importing/exporting workflows
|
||||
|
||||
**Important:** Always quote header names and values that contain special characters:
|
||||
|
||||
```yaml
|
||||
headers:
|
||||
- id: auth-header-uuid
|
||||
cells:
|
||||
Key: "Authorization"
|
||||
Value: "Bearer {{WEBHOOK_API_KEY}}"
|
||||
- id: content-type-uuid
|
||||
cells:
|
||||
Key: "Content-Type"
|
||||
Value: "application/json"
|
||||
```
|
||||
|
||||
## Webhook Variables
|
||||
|
||||
Inside webhook-triggered workflows, these special variables are available:
|
||||
|
||||
@@ -159,7 +159,7 @@ function Footer() {
|
||||
<DiscordIcon className='h-9 w-9 fill-[#9E91AA] hover:fill-[#bdaecb] md:h-10 md:w-10' />
|
||||
</Link>
|
||||
<Link
|
||||
href={'https://x.com/simstudioai'}
|
||||
href={'https://x.com/simdotai'}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
className='text-2xl transition-all duration-500'
|
||||
@@ -349,7 +349,7 @@ function Footer() {
|
||||
<DiscordIcon className='h-9 w-9 fill-[#9E91AA] hover:fill-[#bdaecb] md:h-10 md:w-10' />
|
||||
</Link>
|
||||
<Link
|
||||
href={'https://x.com/simstudioai'}
|
||||
href={'https://x.com/simdotai'}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
className='text-2xl transition-all duration-500'
|
||||
|
||||
@@ -98,6 +98,7 @@ export const sampleWorkflowState = {
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
isWide: false,
|
||||
advancedMode: false,
|
||||
height: 95,
|
||||
},
|
||||
'agent-id': {
|
||||
@@ -125,6 +126,7 @@ export const sampleWorkflowState = {
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
isWide: false,
|
||||
advancedMode: false,
|
||||
height: 680,
|
||||
},
|
||||
},
|
||||
@@ -784,6 +786,10 @@ export function createStorageProviderMocks(options: StorageProviderMockOptions =
|
||||
bucket: 'test-s3-kb-bucket',
|
||||
region: 'us-east-1',
|
||||
},
|
||||
S3_CHAT_CONFIG: {
|
||||
bucket: 'test-s3-chat-bucket',
|
||||
region: 'us-east-1',
|
||||
},
|
||||
BLOB_CONFIG: {
|
||||
accountName: 'testaccount',
|
||||
accountKey: 'testkey',
|
||||
@@ -794,6 +800,11 @@ export function createStorageProviderMocks(options: StorageProviderMockOptions =
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-kb-container',
|
||||
},
|
||||
BLOB_CHAT_CONFIG: {
|
||||
accountName: 'testaccount',
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-chat-container',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@aws-sdk/client-s3', () => ({
|
||||
@@ -809,7 +820,7 @@ export function createStorageProviderMocks(options: StorageProviderMockOptions =
|
||||
}),
|
||||
}))
|
||||
} else if (provider === 'blob') {
|
||||
const baseUrl = presignedUrl.replace('?sas-token-string', '')
|
||||
const baseUrl = 'https://testaccount.blob.core.windows.net/test-container'
|
||||
const mockBlockBlobClient = {
|
||||
url: baseUrl,
|
||||
}
|
||||
@@ -841,6 +852,11 @@ export function createStorageProviderMocks(options: StorageProviderMockOptions =
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-kb-container',
|
||||
},
|
||||
BLOB_CHAT_CONFIG: {
|
||||
accountName: 'testaccount',
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-chat-container',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@azure/storage-blob', () => ({
|
||||
|
||||
214
apps/sim/app/api/billing/update-cost/route.ts
Normal file
214
apps/sim/app/api/billing/update-cost/route.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import crypto from 'crypto'
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { env } from '@/lib/env'
|
||||
import { isProd } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { userStats } from '@/db/schema'
|
||||
import { calculateCost } from '@/providers/utils'
|
||||
|
||||
const logger = createLogger('billing-update-cost')
|
||||
|
||||
// Schema for the request body
|
||||
const UpdateCostSchema = z.object({
|
||||
userId: z.string().min(1, 'User ID is required'),
|
||||
input: z.number().min(0, 'Input tokens must be a non-negative number'),
|
||||
output: z.number().min(0, 'Output tokens must be a non-negative number'),
|
||||
model: z.string().min(1, 'Model is required'),
|
||||
})
|
||||
|
||||
// Authentication function (reused from copilot/methods route)
|
||||
function checkInternalApiKey(req: NextRequest) {
|
||||
const apiKey = req.headers.get('x-api-key')
|
||||
const expectedApiKey = env.INTERNAL_API_SECRET
|
||||
|
||||
if (!expectedApiKey) {
|
||||
return { success: false, error: 'Internal API key not configured' }
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return { success: false, error: 'API key required' }
|
||||
}
|
||||
|
||||
if (apiKey !== expectedApiKey) {
|
||||
return { success: false, error: 'Invalid API key' }
|
||||
}
|
||||
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/billing/update-cost
|
||||
* Update user cost based on token usage with internal API key auth
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
const startTime = Date.now()
|
||||
|
||||
try {
|
||||
logger.info(`[${requestId}] Update cost request started`)
|
||||
|
||||
// Check authentication (internal API key)
|
||||
const authResult = checkInternalApiKey(req)
|
||||
if (!authResult.success) {
|
||||
logger.warn(`[${requestId}] Authentication failed: ${authResult.error}`)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: authResult.error || 'Authentication failed',
|
||||
},
|
||||
{ status: 401 }
|
||||
)
|
||||
}
|
||||
|
||||
// Parse and validate request body
|
||||
const body = await req.json()
|
||||
const validation = UpdateCostSchema.safeParse(body)
|
||||
|
||||
if (!validation.success) {
|
||||
logger.warn(`[${requestId}] Invalid request body`, {
|
||||
errors: validation.error.issues,
|
||||
body,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Invalid request body',
|
||||
details: validation.error.issues,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const { userId, input, output, model } = validation.data
|
||||
|
||||
logger.info(`[${requestId}] Processing cost update`, {
|
||||
userId,
|
||||
input,
|
||||
output,
|
||||
model,
|
||||
})
|
||||
|
||||
const finalPromptTokens = input
|
||||
const finalCompletionTokens = output
|
||||
const totalTokens = input + output
|
||||
|
||||
// Calculate cost using COPILOT_COST_MULTIPLIER (only in production, like normal executions)
|
||||
const copilotMultiplier = isProd ? env.COPILOT_COST_MULTIPLIER || 1 : 1
|
||||
const costResult = calculateCost(
|
||||
model,
|
||||
finalPromptTokens,
|
||||
finalCompletionTokens,
|
||||
false,
|
||||
copilotMultiplier
|
||||
)
|
||||
|
||||
logger.info(`[${requestId}] Cost calculation result`, {
|
||||
userId,
|
||||
model,
|
||||
promptTokens: finalPromptTokens,
|
||||
completionTokens: finalCompletionTokens,
|
||||
totalTokens: totalTokens,
|
||||
copilotMultiplier,
|
||||
costResult,
|
||||
})
|
||||
|
||||
// Follow the exact same logic as ExecutionLogger.updateUserStats but with direct userId
|
||||
const costToStore = costResult.total // No additional multiplier needed since calculateCost already applied it
|
||||
|
||||
// Check if user stats record exists (same as ExecutionLogger)
|
||||
const userStatsRecords = await db.select().from(userStats).where(eq(userStats.userId, userId))
|
||||
|
||||
if (userStatsRecords.length === 0) {
|
||||
// Create new user stats record (same logic as ExecutionLogger)
|
||||
await db.insert(userStats).values({
|
||||
id: crypto.randomUUID(),
|
||||
userId: userId,
|
||||
totalManualExecutions: 0,
|
||||
totalApiCalls: 0,
|
||||
totalWebhookTriggers: 0,
|
||||
totalScheduledExecutions: 0,
|
||||
totalChatExecutions: 0,
|
||||
totalTokensUsed: totalTokens,
|
||||
totalCost: costToStore.toString(),
|
||||
currentPeriodCost: costToStore.toString(),
|
||||
lastActive: new Date(),
|
||||
})
|
||||
|
||||
logger.info(`[${requestId}] Created new user stats record`, {
|
||||
userId,
|
||||
totalCost: costToStore,
|
||||
totalTokens,
|
||||
})
|
||||
} else {
|
||||
// Update existing user stats record (same logic as ExecutionLogger)
|
||||
const updateFields = {
|
||||
totalTokensUsed: sql`total_tokens_used + ${totalTokens}`,
|
||||
totalCost: sql`total_cost + ${costToStore}`,
|
||||
currentPeriodCost: sql`current_period_cost + ${costToStore}`,
|
||||
totalApiCalls: sql`total_api_calls`,
|
||||
lastActive: new Date(),
|
||||
}
|
||||
|
||||
await db.update(userStats).set(updateFields).where(eq(userStats.userId, userId))
|
||||
|
||||
logger.info(`[${requestId}] Updated user stats record`, {
|
||||
userId,
|
||||
addedCost: costToStore,
|
||||
addedTokens: totalTokens,
|
||||
})
|
||||
}
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
|
||||
logger.info(`[${requestId}] Cost update completed successfully`, {
|
||||
userId,
|
||||
duration,
|
||||
cost: costResult.total,
|
||||
totalTokens,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: {
|
||||
userId,
|
||||
input,
|
||||
output,
|
||||
totalTokens,
|
||||
model,
|
||||
cost: {
|
||||
input: costResult.input,
|
||||
output: costResult.output,
|
||||
total: costResult.total,
|
||||
},
|
||||
tokenBreakdown: {
|
||||
prompt: finalPromptTokens,
|
||||
completion: finalCompletionTokens,
|
||||
total: totalTokens,
|
||||
},
|
||||
pricing: costResult.pricing,
|
||||
processedAt: new Date().toISOString(),
|
||||
requestId,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime
|
||||
|
||||
logger.error(`[${requestId}] Cost update failed`, {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
duration,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Internal server error',
|
||||
requestId,
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -29,6 +29,7 @@ const chatUpdateSchema = z.object({
|
||||
.object({
|
||||
primaryColor: z.string(),
|
||||
welcomeMessage: z.string(),
|
||||
imageUrl: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
authType: z.enum(['public', 'password', 'email']).optional(),
|
||||
|
||||
@@ -27,6 +27,7 @@ const chatSchema = z.object({
|
||||
customizations: z.object({
|
||||
primaryColor: z.string(),
|
||||
welcomeMessage: z.string(),
|
||||
imageUrl: z.string().optional(),
|
||||
}),
|
||||
authType: z.enum(['public', 'password', 'email']).default('public'),
|
||||
password: z.string().optional(),
|
||||
|
||||
@@ -350,4 +350,77 @@ describe('Chat API Utils', () => {
|
||||
expect(result3.error).toBe('Email not authorized')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Execution Result Processing', () => {
|
||||
it('should process logs regardless of overall success status', () => {
|
||||
// Test that logs are processed even when overall execution fails
|
||||
// This is key for partial success scenarios
|
||||
const executionResult = {
|
||||
success: false, // Overall execution failed
|
||||
output: {},
|
||||
logs: [
|
||||
{
|
||||
blockId: 'agent1',
|
||||
startedAt: '2023-01-01T00:00:00Z',
|
||||
endedAt: '2023-01-01T00:00:01Z',
|
||||
durationMs: 1000,
|
||||
success: true,
|
||||
output: { content: 'Agent 1 succeeded' },
|
||||
error: undefined,
|
||||
},
|
||||
{
|
||||
blockId: 'agent2',
|
||||
startedAt: '2023-01-01T00:00:00Z',
|
||||
endedAt: '2023-01-01T00:00:01Z',
|
||||
durationMs: 500,
|
||||
success: false,
|
||||
output: null,
|
||||
error: 'Agent 2 failed',
|
||||
},
|
||||
],
|
||||
metadata: { duration: 1000 },
|
||||
}
|
||||
|
||||
// Test the key logic: logs should be processed regardless of overall success
|
||||
expect(executionResult.success).toBe(false)
|
||||
expect(executionResult.logs).toBeDefined()
|
||||
expect(executionResult.logs).toHaveLength(2)
|
||||
|
||||
// First log should be successful
|
||||
expect(executionResult.logs[0].success).toBe(true)
|
||||
expect(executionResult.logs[0].output?.content).toBe('Agent 1 succeeded')
|
||||
|
||||
// Second log should be failed
|
||||
expect(executionResult.logs[1].success).toBe(false)
|
||||
expect(executionResult.logs[1].error).toBe('Agent 2 failed')
|
||||
})
|
||||
|
||||
it('should handle ExecutionResult vs StreamingExecution types correctly', () => {
|
||||
const executionResult = {
|
||||
success: true,
|
||||
output: { content: 'test' },
|
||||
logs: [],
|
||||
metadata: { duration: 100 },
|
||||
}
|
||||
|
||||
// Test direct ExecutionResult
|
||||
const directResult = executionResult
|
||||
const extractedDirect = directResult
|
||||
expect(extractedDirect).toBe(executionResult)
|
||||
|
||||
// Test StreamingExecution with embedded ExecutionResult
|
||||
const streamingResult = {
|
||||
stream: new ReadableStream(),
|
||||
execution: executionResult,
|
||||
}
|
||||
|
||||
// Simulate the type extraction logic from executeWorkflowForChat
|
||||
const extractedFromStreaming =
|
||||
streamingResult && typeof streamingResult === 'object' && 'execution' in streamingResult
|
||||
? streamingResult.execution
|
||||
: streamingResult
|
||||
|
||||
expect(extractedFromStreaming).toBe(executionResult)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,10 +10,11 @@ import { hasAdminPermission } from '@/lib/permissions/utils'
|
||||
import { processStreamingBlockLogs } from '@/lib/tokenization'
|
||||
import { getEmailDomain } from '@/lib/urls/utils'
|
||||
import { decryptSecret } from '@/lib/utils'
|
||||
import { getBlock } from '@/blocks'
|
||||
import { db } from '@/db'
|
||||
import { chat, environment as envTable, userStats, workflow } from '@/db/schema'
|
||||
import { Executor } from '@/executor'
|
||||
import type { BlockLog } from '@/executor/types'
|
||||
import type { BlockLog, ExecutionResult } from '@/executor/types'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
import type { WorkflowState } from '@/stores/workflows/workflow/types'
|
||||
@@ -423,7 +424,22 @@ export async function executeWorkflowForChat(
|
||||
|
||||
// Prepare for execution, similar to use-workflow-execution.ts
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
const currentBlockStates = Object.entries(mergedStates).reduce(
|
||||
|
||||
const filteredStates = Object.entries(mergedStates).reduce(
|
||||
(acc, [id, block]) => {
|
||||
const blockConfig = getBlock(block.type)
|
||||
const isTriggerBlock = blockConfig?.category === 'triggers'
|
||||
|
||||
// Skip trigger blocks during chat execution
|
||||
if (!isTriggerBlock) {
|
||||
acc[id] = block
|
||||
}
|
||||
return acc
|
||||
},
|
||||
{} as typeof mergedStates
|
||||
)
|
||||
|
||||
const currentBlockStates = Object.entries(filteredStates).reduce(
|
||||
(acc, [id, block]) => {
|
||||
acc[id] = Object.entries(block.subBlocks).reduce(
|
||||
(subAcc, [key, subBlock]) => {
|
||||
@@ -465,12 +481,23 @@ export async function executeWorkflowForChat(
|
||||
logger.warn(`[${requestId}] Could not parse workflow variables:`, error)
|
||||
}
|
||||
|
||||
// Create serialized workflow
|
||||
// Filter edges to exclude connections to/from trigger blocks (same as manual execution)
|
||||
const triggerBlockIds = Object.keys(mergedStates).filter((id) => {
|
||||
const blockConfig = getBlock(mergedStates[id].type)
|
||||
return blockConfig?.category === 'triggers'
|
||||
})
|
||||
|
||||
const filteredEdges = edges.filter(
|
||||
(edge) => !triggerBlockIds.includes(edge.source) && !triggerBlockIds.includes(edge.target)
|
||||
)
|
||||
|
||||
// Create serialized workflow with filtered blocks and edges
|
||||
const serializedWorkflow = new Serializer().serializeWorkflow(
|
||||
mergedStates,
|
||||
edges,
|
||||
filteredStates,
|
||||
filteredEdges,
|
||||
loops,
|
||||
parallels
|
||||
parallels,
|
||||
true // Enable validation during execution
|
||||
)
|
||||
|
||||
// Decrypt environment variables
|
||||
@@ -522,6 +549,7 @@ export async function executeWorkflowForChat(
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
const streamedContent = new Map<string, string>()
|
||||
const streamedBlocks = new Set<string>() // Track which blocks have started streaming
|
||||
|
||||
const onStream = async (streamingExecution: any): Promise<void> => {
|
||||
if (!streamingExecution.stream) return
|
||||
@@ -530,6 +558,15 @@ export async function executeWorkflowForChat(
|
||||
const reader = streamingExecution.stream.getReader()
|
||||
if (blockId) {
|
||||
streamedContent.set(blockId, '')
|
||||
|
||||
// Add separator if this is not the first block to stream
|
||||
if (streamedBlocks.size > 0) {
|
||||
// Send separator before the new block starts
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({ blockId, chunk: '\n\n' })}\n\n`)
|
||||
)
|
||||
}
|
||||
streamedBlocks.add(blockId)
|
||||
}
|
||||
try {
|
||||
while (true) {
|
||||
@@ -561,7 +598,7 @@ export async function executeWorkflowForChat(
|
||||
contextExtensions: {
|
||||
stream: true,
|
||||
selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
|
||||
edges: edges.map((e: any) => ({
|
||||
edges: filteredEdges.map((e: any) => ({
|
||||
source: e.source,
|
||||
target: e.target,
|
||||
})),
|
||||
@@ -588,25 +625,117 @@ export async function executeWorkflowForChat(
|
||||
throw error
|
||||
}
|
||||
|
||||
if (result && 'success' in result) {
|
||||
// Update streamed content and apply tokenization
|
||||
if (result.logs) {
|
||||
result.logs.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId)
|
||||
if (log.output) {
|
||||
log.output.content = content
|
||||
}
|
||||
}
|
||||
})
|
||||
// Handle both ExecutionResult and StreamingExecution types
|
||||
const executionResult =
|
||||
result && typeof result === 'object' && 'execution' in result
|
||||
? (result.execution as ExecutionResult)
|
||||
: (result as ExecutionResult)
|
||||
|
||||
// Process all logs for streaming tokenization
|
||||
const processedCount = processStreamingBlockLogs(result.logs, streamedContent)
|
||||
logger.info(`[CHAT-API] Processed ${processedCount} blocks for streaming tokenization`)
|
||||
if (executionResult?.logs) {
|
||||
// Update streamed content and apply tokenization - process regardless of overall success
|
||||
// This ensures partial successes (some agents succeed, some fail) still return results
|
||||
|
||||
// Add newlines between different agent outputs for better readability
|
||||
const processedOutputs = new Set<string>()
|
||||
executionResult.logs.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId)
|
||||
if (log.output && content) {
|
||||
// Add newline separation between different outputs (but not before the first one)
|
||||
const separator = processedOutputs.size > 0 ? '\n\n' : ''
|
||||
log.output.content = separator + content
|
||||
processedOutputs.add(log.blockId)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Also process non-streamed outputs from selected blocks (like function blocks)
|
||||
// This uses the same logic as the chat panel to ensure identical behavior
|
||||
const nonStreamingLogs = executionResult.logs.filter(
|
||||
(log: BlockLog) => !streamedContent.has(log.blockId)
|
||||
)
|
||||
|
||||
// Extract the exact same functions used by the chat panel
|
||||
const extractBlockIdFromOutputId = (outputId: string): string => {
|
||||
return outputId.includes('_') ? outputId.split('_')[0] : outputId.split('.')[0]
|
||||
}
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(result)
|
||||
const enrichedResult = { ...result, traceSpans, totalDuration }
|
||||
const extractPathFromOutputId = (outputId: string, blockId: string): string => {
|
||||
return outputId.substring(blockId.length + 1)
|
||||
}
|
||||
|
||||
const parseOutputContentSafely = (output: any): any => {
|
||||
if (!output?.content) {
|
||||
return output
|
||||
}
|
||||
|
||||
if (typeof output.content === 'string') {
|
||||
try {
|
||||
return JSON.parse(output.content)
|
||||
} catch (e) {
|
||||
// Fallback to original structure if parsing fails
|
||||
return output
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// Filter outputs that have matching logs (exactly like chat panel)
|
||||
const outputsToRender = selectedOutputIds.filter((outputId) => {
|
||||
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
|
||||
return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
|
||||
})
|
||||
|
||||
// Process each selected output (exactly like chat panel)
|
||||
for (const outputId of outputsToRender) {
|
||||
const blockIdForOutput = extractBlockIdFromOutputId(outputId)
|
||||
const path = extractPathFromOutputId(outputId, blockIdForOutput)
|
||||
const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
|
||||
|
||||
if (log) {
|
||||
let outputValue: any = log.output
|
||||
|
||||
if (path) {
|
||||
// Parse JSON content safely (exactly like chat panel)
|
||||
outputValue = parseOutputContentSafely(outputValue)
|
||||
|
||||
const pathParts = path.split('.')
|
||||
for (const part of pathParts) {
|
||||
if (outputValue && typeof outputValue === 'object' && part in outputValue) {
|
||||
outputValue = outputValue[part]
|
||||
} else {
|
||||
outputValue = undefined
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (outputValue !== undefined) {
|
||||
// Add newline separation between different outputs
|
||||
const separator = processedOutputs.size > 0 ? '\n\n' : ''
|
||||
|
||||
// Format the output exactly like the chat panel
|
||||
const formattedOutput =
|
||||
typeof outputValue === 'string' ? outputValue : JSON.stringify(outputValue, null, 2)
|
||||
|
||||
// Update the log content
|
||||
if (!log.output.content) {
|
||||
log.output.content = separator + formattedOutput
|
||||
} else {
|
||||
log.output.content = separator + formattedOutput
|
||||
}
|
||||
processedOutputs.add(log.blockId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process all logs for streaming tokenization
|
||||
const processedCount = processStreamingBlockLogs(executionResult.logs, streamedContent)
|
||||
logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
|
||||
const enrichedResult = { ...executionResult, traceSpans, totalDuration }
|
||||
if (conversationId) {
|
||||
if (!enrichedResult.metadata) {
|
||||
enrichedResult.metadata = {
|
||||
@@ -619,7 +748,7 @@ export async function executeWorkflowForChat(
|
||||
const executionId = uuidv4()
|
||||
logger.debug(`Generated execution ID for deployed chat: ${executionId}`)
|
||||
|
||||
if (result.success) {
|
||||
if (executionResult.success) {
|
||||
try {
|
||||
await db
|
||||
.update(userStats)
|
||||
@@ -642,12 +771,12 @@ export async function executeWorkflowForChat(
|
||||
}
|
||||
|
||||
// Complete logging session (for both success and failure)
|
||||
if (result && 'success' in result) {
|
||||
const { traceSpans } = buildTraceSpans(result)
|
||||
if (executionResult?.logs) {
|
||||
const { traceSpans } = buildTraceSpans(executionResult)
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: result.metadata?.duration || 0,
|
||||
finalOutput: result.output,
|
||||
totalDurationMs: executionResult.metadata?.duration || 0,
|
||||
finalOutput: executionResult.output,
|
||||
traceSpans,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,390 +0,0 @@
|
||||
/**
|
||||
* Tests for codegen API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { createMockRequest } from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Codegen API Route', () => {
|
||||
const mockOpenAI = {
|
||||
chat: {
|
||||
completions: {
|
||||
create: vi.fn(),
|
||||
},
|
||||
},
|
||||
}
|
||||
const mockLogger = {
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
debug: vi.fn(),
|
||||
}
|
||||
const mockEnv = {
|
||||
OPENAI_API_KEY: 'test-api-key',
|
||||
}
|
||||
|
||||
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
mockEnv.OPENAI_API_KEY = 'test-api-key'
|
||||
|
||||
vi.stubGlobal('crypto', {
|
||||
randomUUID: vi.fn().mockReturnValue(mockUUID),
|
||||
})
|
||||
|
||||
const MockAPIError = class extends Error {
|
||||
status: number
|
||||
constructor(message: string, status?: number) {
|
||||
super(message)
|
||||
this.status = status || 500
|
||||
}
|
||||
}
|
||||
|
||||
vi.doMock('openai', () => ({
|
||||
default: vi.fn().mockImplementation(() => mockOpenAI),
|
||||
APIError: MockAPIError,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: mockEnv,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/logs/console/logger', () => ({
|
||||
createLogger: vi.fn().mockReturnValue(mockLogger),
|
||||
}))
|
||||
|
||||
vi.doMock('next/cache', () => ({
|
||||
unstable_noStore: vi.fn(),
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should generate JSON schema successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: JSON.stringify({
|
||||
name: 'test_function',
|
||||
description: 'A test function',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
input: { type: 'string', description: 'Test input' },
|
||||
},
|
||||
additionalProperties: false,
|
||||
required: ['input'],
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Create a function that takes a string input',
|
||||
generationType: 'json-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.generatedContent).toBeDefined()
|
||||
expect(() => JSON.parse(data.generatedContent)).not.toThrow()
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({
|
||||
role: 'user',
|
||||
content: 'Create a function that takes a string input',
|
||||
}),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: { type: 'json_object' },
|
||||
})
|
||||
})
|
||||
|
||||
it('should generate JavaScript function body successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'const input = <input>;\nreturn input.toUpperCase();',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Convert input to uppercase',
|
||||
generationType: 'javascript-function-body',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.generatedContent).toBe('const input = <input>;\nreturn input.toUpperCase();')
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({ role: 'user' }),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('should generate custom tool schema successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: JSON.stringify({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'testFunction',
|
||||
description: 'A test function',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
input: { type: 'string', description: 'Test input' },
|
||||
},
|
||||
required: ['input'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Create a custom tool for testing',
|
||||
generationType: 'custom-tool-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.generatedContent).toBeDefined()
|
||||
})
|
||||
|
||||
it('should include context in the prompt', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'const result = <input>;\nreturn result;',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Modify this function',
|
||||
generationType: 'javascript-function-body',
|
||||
context: 'existing function code here',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({
|
||||
role: 'user',
|
||||
content:
|
||||
'Prompt: Modify this function\\n\\nExisting Content/Context:\\nexisting function code here',
|
||||
}),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('should include conversation history', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'Updated function code',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Update the function',
|
||||
generationType: 'javascript-function-body',
|
||||
history: [
|
||||
{ role: 'user', content: 'Create a function' },
|
||||
{ role: 'assistant', content: 'function created' },
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({ role: 'user', content: 'Create a function' }),
|
||||
expect.objectContaining({ role: 'assistant', content: 'function created' }),
|
||||
expect.objectContaining({ role: 'user', content: 'Update the function' }),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle missing OpenAI API key', async () => {
|
||||
mockEnv.OPENAI_API_KEY = ''
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Test prompt',
|
||||
generationType: 'json-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(503)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Code generation service is not configured.')
|
||||
})
|
||||
|
||||
it('should handle missing required fields', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: '',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Missing required fields: prompt and generationType.')
|
||||
expect(mockLogger.warn).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle invalid generation type', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Test prompt',
|
||||
generationType: 'invalid-type',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Invalid generationType: invalid-type')
|
||||
expect(mockLogger.warn).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle empty OpenAI response', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: null,
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Test prompt',
|
||||
generationType: 'javascript-function-body',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Failed to generate content. OpenAI response was empty.')
|
||||
expect(mockLogger.error).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle invalid JSON schema generation', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'invalid json content',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Create a schema',
|
||||
generationType: 'json-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Generated JSON schema was invalid.')
|
||||
expect(mockLogger.error).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -1,535 +0,0 @@
|
||||
import { unstable_noStore as noStore } from 'next/cache'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import OpenAI from 'openai'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'edge'
|
||||
export const maxDuration = 60
|
||||
|
||||
const logger = createLogger('GenerateCodeAPI')
|
||||
|
||||
const openai = env.OPENAI_API_KEY
|
||||
? new OpenAI({
|
||||
apiKey: env.OPENAI_API_KEY,
|
||||
})
|
||||
: null
|
||||
|
||||
if (!env.OPENAI_API_KEY) {
|
||||
logger.warn('OPENAI_API_KEY not found. Code generation API will not function.')
|
||||
}
|
||||
|
||||
type GenerationType =
|
||||
| 'json-schema'
|
||||
| 'javascript-function-body'
|
||||
| 'typescript-function-body'
|
||||
| 'custom-tool-schema'
|
||||
| 'json-object'
|
||||
|
||||
// Define the structure for a single message in the history
|
||||
interface ChatMessage {
|
||||
role: 'user' | 'assistant' | 'system' // System role might be needed if we include the initial system prompt in history
|
||||
content: string
|
||||
}
|
||||
|
||||
interface RequestBody {
|
||||
prompt: string
|
||||
generationType: GenerationType
|
||||
context?: string
|
||||
stream?: boolean
|
||||
history?: ChatMessage[] // Optional conversation history
|
||||
}
|
||||
|
||||
const systemPrompts: Record<GenerationType, string> = {
|
||||
'json-schema': `You are an expert programmer specializing in creating JSON schemas according to a specific format.
|
||||
Generate ONLY the JSON schema based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
The JSON object MUST have the following top-level properties: 'name' (string), 'description' (string), 'strict' (boolean, usually true), and 'schema' (object).
|
||||
The 'schema' object must define the structure and MUST contain 'type': 'object', 'properties': {...}, 'additionalProperties': false, and 'required': [...].
|
||||
Inside 'properties', use standard JSON Schema properties (type, description, enum, items for arrays, etc.).
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
Valid Schema Examples:
|
||||
|
||||
Example 1:
|
||||
{
|
||||
"name": "reddit_post",
|
||||
"description": "Fetches the reddit posts in the given subreddit",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "The title of the post"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "The content of the post"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [ "title", "content" ]
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Fetches the current weather for a specific location.",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g., San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "Temperature unit",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": ["location", "unit"]
|
||||
}
|
||||
}
|
||||
|
||||
Example 3 (Array Input):
|
||||
{
|
||||
"name": "process_items",
|
||||
"description": "Processes a list of items with specific IDs.",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"item_ids": {
|
||||
"type": "array",
|
||||
"description": "A list of unique item identifiers to process.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "An item ID"
|
||||
}
|
||||
},
|
||||
"processing_mode": {
|
||||
"type": "string",
|
||||
"description": "The mode for processing",
|
||||
"enum": ["fast", "thorough"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": ["item_ids", "processing_mode"]
|
||||
}
|
||||
}
|
||||
`,
|
||||
'custom-tool-schema': `You are an expert programmer specializing in creating OpenAI function calling format JSON schemas for custom tools.
|
||||
Generate ONLY the JSON schema based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
The JSON schema MUST follow this specific format:
|
||||
1. Top-level property "type" must be set to "function"
|
||||
2. A "function" object containing:
|
||||
- "name": A concise, camelCase name for the function
|
||||
- "description": A clear description of what the function does
|
||||
- "parameters": A JSON Schema object describing the function's parameters with:
|
||||
- "type": "object"
|
||||
- "properties": An object containing parameter definitions
|
||||
- "required": An array of required parameter names
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
Valid Schema Examples:
|
||||
|
||||
Example 1:
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "getWeather",
|
||||
"description": "Fetches the current weather for a specific location.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g., San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "Temperature unit",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "addItemToOrder",
|
||||
"description": "Add one quantity of a food item to the order.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"itemName": {
|
||||
"type": "string",
|
||||
"description": "The name of the food item to add to order"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"description": "The quantity of the item to add",
|
||||
"default": 1
|
||||
}
|
||||
},
|
||||
"required": ["itemName"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Example 3 (Array Input):
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "processItems",
|
||||
"description": "Processes a list of items with specific IDs.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"itemIds": {
|
||||
"type": "array",
|
||||
"description": "A list of unique item identifiers to process.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "An item ID"
|
||||
}
|
||||
},
|
||||
"processingMode": {
|
||||
"type": "string",
|
||||
"description": "The mode for processing",
|
||||
"enum": ["fast", "thorough"]
|
||||
}
|
||||
},
|
||||
"required": ["itemIds"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
`,
|
||||
'javascript-function-body': `You are an expert JavaScript programmer.
|
||||
Generate ONLY the raw body of a JavaScript function based on the user's request.
|
||||
The code should be executable within an 'async function(params, environmentVariables) {...}' context.
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
IMPORTANT FORMATTING RULES:
|
||||
1. Reference Environment Variables: Use the exact syntax {{VARIABLE_NAME}}. Do NOT wrap it in quotes (e.g., use 'apiKey = {{SERVICE_API_KEY}}' not 'apiKey = "{{SERVICE_API_KEY}}"'). Our system replaces these placeholders before execution.
|
||||
2. Reference Input Parameters/Workflow Variables: Use the exact syntax <variable_name>. Do NOT wrap it in quotes (e.g., use 'userId = <userId>;' not 'userId = "<userId>";'). This includes parameters defined in the block's schema and outputs from previous blocks.
|
||||
3. Function Body ONLY: Do NOT include the function signature (e.g., 'async function myFunction() {' or the surrounding '}').
|
||||
4. Imports: Do NOT include import/require statements unless they are standard Node.js built-in modules (e.g., 'crypto', 'fs'). External libraries are not supported in this context.
|
||||
5. Output: Ensure the code returns a value if the function is expected to produce output. Use 'return'.
|
||||
6. Clarity: Write clean, readable code.
|
||||
7. No Explanations: Do NOT include markdown formatting, comments explaining the rules, or any text other than the raw JavaScript code for the function body.
|
||||
|
||||
Example Scenario:
|
||||
User Prompt: "Fetch user data from an API. Use the User ID passed in as 'userId' and an API Key stored as the 'SERVICE_API_KEY' environment variable."
|
||||
|
||||
Generated Code:
|
||||
const userId = <block.content>; // Correct: Accessing input parameter without quotes
|
||||
const apiKey = {{SERVICE_API_KEY}}; // Correct: Accessing environment variable without quotes
|
||||
const url = \`https://api.example.com/users/\${userId}\`;
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Authorization': \`Bearer \${apiKey}\`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
// Throwing an error will mark the block execution as failed
|
||||
throw new Error(\`API request failed with status \${response.status}: \${await response.text()}\`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log('User data fetched successfully.'); // Optional: logging for debugging
|
||||
return data; // Return the fetched data which becomes the block's output
|
||||
} catch (error) {
|
||||
console.error(\`Error fetching user data: \${error.message}\`);
|
||||
// Re-throwing the error ensures the workflow knows this step failed.
|
||||
throw error;
|
||||
}`,
|
||||
'typescript-function-body': `You are an expert TypeScript programmer.
|
||||
Generate ONLY the body of a TypeScript function based on the user's request.
|
||||
The code should be executable within an async context. You have access to a 'params' object (typed as Record<string, any>) containing input parameters and an 'environmentVariables' object (typed as Record<string, string>) for env vars.
|
||||
Do not include the function signature (e.g., 'async function myFunction(): Promise<any> {').
|
||||
Do not include import/require statements unless absolutely necessary and they are standard Node.js modules.
|
||||
Do not include markdown formatting or explanations.
|
||||
Output only the raw TypeScript code. Use modern TypeScript features where appropriate. Do not use semicolons.
|
||||
Example:
|
||||
const userId = <block.content> as string
|
||||
const apiKey = {{SERVICE_API_KEY}}
|
||||
const response = await fetch(\`https://api.example.com/users/\${userId}\`, { headers: { Authorization: \`Bearer \${apiKey}\` } })
|
||||
if (!response.ok) {
|
||||
throw new Error(\`Failed to fetch user data: \${response.statusText}\`)
|
||||
}
|
||||
const data: unknown = await response.json()
|
||||
// Add type checking/assertion if necessary
|
||||
return data // Ensure you return a value if expected`,
|
||||
|
||||
'json-object': `You are an expert JSON programmer.
|
||||
Generate ONLY the raw JSON object based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
You have access to the following variables you can use to generate the JSON body:
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
Example:
|
||||
{
|
||||
"name": "<block.agent.response.content>",
|
||||
"age": <block.function.output.age>,
|
||||
"success": true
|
||||
}
|
||||
`,
|
||||
}
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
logger.info(`[${requestId}] Received code generation request`)
|
||||
|
||||
if (!openai) {
|
||||
logger.error(`[${requestId}] OpenAI client not initialized. Missing API key.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Code generation service is not configured.' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
const body = (await req.json()) as RequestBody
|
||||
noStore()
|
||||
|
||||
// Destructure history along with other fields
|
||||
const { prompt, generationType, context, stream = false, history = [] } = body
|
||||
|
||||
if (!prompt || !generationType) {
|
||||
logger.warn(`[${requestId}] Invalid request: Missing prompt or generationType.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Missing required fields: prompt and generationType.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
if (!systemPrompts[generationType]) {
|
||||
logger.warn(`[${requestId}] Invalid generationType: ${generationType}`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: `Invalid generationType: ${generationType}` },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const systemPrompt = systemPrompts[generationType]
|
||||
|
||||
// Construct the user message, potentially including context
|
||||
const currentUserMessageContent = context
|
||||
? `Prompt: ${prompt}\\n\\nExisting Content/Context:\\n${context}`
|
||||
: `${prompt}` // Keep it simple for follow-ups, context is in history
|
||||
|
||||
// Prepare messages for OpenAI API
|
||||
// Start with the system prompt
|
||||
const messages: ChatMessage[] = [{ role: 'system', content: systemPrompt }]
|
||||
|
||||
// Add previous messages from history
|
||||
// Filter out any potential system messages from history if we always prepend a fresh one
|
||||
messages.push(...history.filter((msg) => msg.role !== 'system'))
|
||||
|
||||
// Add the current user prompt
|
||||
messages.push({ role: 'user', content: currentUserMessageContent })
|
||||
|
||||
logger.debug(`[${requestId}] Calling OpenAI API`, {
|
||||
generationType,
|
||||
stream,
|
||||
historyLength: history.length,
|
||||
})
|
||||
|
||||
// For streaming responses
|
||||
if (stream) {
|
||||
try {
|
||||
const streamCompletion = await openai?.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: messages,
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
stream: true,
|
||||
})
|
||||
|
||||
// Use ReadableStream for Edge runtime
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
let fullContent = generationType === 'json-schema' ? '' : undefined
|
||||
|
||||
// Process each chunk
|
||||
for await (const chunk of streamCompletion) {
|
||||
const content = chunk.choices[0]?.delta?.content || ''
|
||||
if (content) {
|
||||
// Only append if fullContent is defined (i.e., for json-schema)
|
||||
if (fullContent !== undefined) {
|
||||
fullContent += content
|
||||
}
|
||||
|
||||
// Send the chunk to the client
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`${JSON.stringify({
|
||||
chunk: content,
|
||||
done: false,
|
||||
})}\n`
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Check JSON validity for json-schema type when streaming is complete
|
||||
if (generationType === 'json-schema' && fullContent) {
|
||||
try {
|
||||
JSON.parse(fullContent)
|
||||
} catch (parseError: any) {
|
||||
logger.error(`[${requestId}] Generated JSON schema is invalid`, {
|
||||
error: parseError.message,
|
||||
content: fullContent,
|
||||
})
|
||||
|
||||
// Send error to client
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`${JSON.stringify({
|
||||
error: 'Generated JSON schema was invalid.',
|
||||
done: true,
|
||||
})}\n`
|
||||
)
|
||||
)
|
||||
controller.close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send the final done message
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`${JSON.stringify({
|
||||
done: true,
|
||||
...(fullContent !== undefined && { fullContent: fullContent }),
|
||||
})}\n`
|
||||
)
|
||||
)
|
||||
controller.close()
|
||||
logger.info(`[${requestId}] Code generation streaming completed`, { generationType })
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache, no-transform',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
}
|
||||
)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Streaming error`, {
|
||||
error: error.message || 'Unknown error',
|
||||
stack: error.stack,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'An error occurred during code generation streaming.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// For non-streaming responses (original implementation)
|
||||
const completion = await openai?.chat.completions.create({
|
||||
// Use non-null assertion
|
||||
model: 'gpt-4o',
|
||||
// Pass the constructed messages array
|
||||
messages: messages,
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: generationType === 'json-schema' ? { type: 'json_object' } : undefined,
|
||||
})
|
||||
|
||||
const generatedContent = completion.choices[0]?.message?.content?.trim()
|
||||
|
||||
if (!generatedContent) {
|
||||
logger.error(`[${requestId}] OpenAI response was empty or invalid.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Failed to generate content. OpenAI response was empty.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Code generation successful`, { generationType })
|
||||
|
||||
if (generationType === 'json-schema') {
|
||||
try {
|
||||
JSON.parse(generatedContent)
|
||||
return NextResponse.json({ success: true, generatedContent })
|
||||
} catch (parseError: any) {
|
||||
logger.error(`[${requestId}] Generated JSON schema is invalid`, {
|
||||
error: parseError.message,
|
||||
content: generatedContent,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Generated JSON schema was invalid.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
} else {
|
||||
return NextResponse.json({ success: true, generatedContent })
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Code generation failed`, {
|
||||
error: error.message || 'Unknown error',
|
||||
stack: error.stack,
|
||||
})
|
||||
|
||||
let clientErrorMessage = 'Code generation failed. Please try again later.'
|
||||
// Keep original message for server logging
|
||||
let serverErrorMessage = error.message || 'Unknown error'
|
||||
|
||||
let status = 500
|
||||
if (error instanceof OpenAI.APIError) {
|
||||
status = error.status || 500
|
||||
serverErrorMessage = error.message // Use specific API error for server logs
|
||||
logger.error(`[${requestId}] OpenAI API Error: ${status} - ${serverErrorMessage}`)
|
||||
// Optionally, customize client message based on status, but keep it generic
|
||||
if (status === 401) {
|
||||
clientErrorMessage = 'Authentication failed. Please check your API key configuration.'
|
||||
} else if (status === 429) {
|
||||
clientErrorMessage = 'Rate limit exceeded. Please try again later.'
|
||||
} else if (status >= 500) {
|
||||
clientErrorMessage =
|
||||
'The code generation service is currently unavailable. Please try again later.'
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: clientErrorMessage,
|
||||
},
|
||||
{ status }
|
||||
)
|
||||
}
|
||||
}
|
||||
617
apps/sim/app/api/copilot/chat/route.test.ts
Normal file
617
apps/sim/app/api/copilot/chat/route.test.ts
Normal file
@@ -0,0 +1,617 @@
|
||||
/**
|
||||
* Tests for copilot chat API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockAuth,
|
||||
mockCryptoUuid,
|
||||
setupCommonApiMocks,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Copilot Chat API Route', () => {
|
||||
const mockSelect = vi.fn()
|
||||
const mockFrom = vi.fn()
|
||||
const mockWhere = vi.fn()
|
||||
const mockLimit = vi.fn()
|
||||
const mockOrderBy = vi.fn()
|
||||
const mockInsert = vi.fn()
|
||||
const mockValues = vi.fn()
|
||||
const mockReturning = vi.fn()
|
||||
const mockUpdate = vi.fn()
|
||||
const mockSet = vi.fn()
|
||||
|
||||
const mockExecuteProviderRequest = vi.fn()
|
||||
const mockGetCopilotModel = vi.fn()
|
||||
const mockGetRotatingApiKey = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid()
|
||||
|
||||
mockSelect.mockReturnValue({ from: mockFrom })
|
||||
mockFrom.mockReturnValue({ where: mockWhere })
|
||||
mockWhere.mockReturnValue({
|
||||
orderBy: mockOrderBy,
|
||||
limit: mockLimit,
|
||||
})
|
||||
mockOrderBy.mockResolvedValue([])
|
||||
mockLimit.mockResolvedValue([])
|
||||
mockInsert.mockReturnValue({ values: mockValues })
|
||||
mockValues.mockReturnValue({ returning: mockReturning })
|
||||
mockUpdate.mockReturnValue({ set: mockSet })
|
||||
mockSet.mockReturnValue({ where: mockWhere })
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
insert: mockInsert,
|
||||
update: mockUpdate,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/db/schema', () => ({
|
||||
copilotChats: {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
messages: 'messages',
|
||||
title: 'title',
|
||||
model: 'model',
|
||||
workflowId: 'workflowId',
|
||||
createdAt: 'createdAt',
|
||||
updatedAt: 'updatedAt',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
desc: vi.fn((field) => ({ field, type: 'desc' })),
|
||||
}))
|
||||
|
||||
mockGetCopilotModel.mockReturnValue({
|
||||
provider: 'anthropic',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
})
|
||||
|
||||
vi.doMock('@/lib/copilot/config', () => ({
|
||||
getCopilotModel: mockGetCopilotModel,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/copilot/prompts', () => ({
|
||||
TITLE_GENERATION_SYSTEM_PROMPT: 'Generate a title',
|
||||
TITLE_GENERATION_USER_PROMPT: vi.fn((msg) => `Generate title for: ${msg}`),
|
||||
}))
|
||||
|
||||
mockExecuteProviderRequest.mockResolvedValue({
|
||||
content: 'Generated Title',
|
||||
})
|
||||
|
||||
vi.doMock('@/providers', () => ({
|
||||
executeProviderRequest: mockExecuteProviderRequest,
|
||||
}))
|
||||
|
||||
mockGetRotatingApiKey.mockReturnValue('test-api-key')
|
||||
|
||||
vi.doMock('@/lib/utils', () => ({
|
||||
getRotatingApiKey: mockGetRotatingApiKey,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
SIM_AGENT_API_URL: 'http://localhost:8000',
|
||||
SIM_AGENT_API_KEY: 'test-sim-agent-key',
|
||||
},
|
||||
}))
|
||||
|
||||
global.fetch = vi.fn()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('POST', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'Hello',
|
||||
workflowId: 'workflow-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 400 for invalid request body', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
// Missing required fields
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Invalid request data')
|
||||
expect(responseData.details).toBeDefined()
|
||||
})
|
||||
|
||||
it('should handle new chat creation and forward to sim agent', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock successful chat creation
|
||||
const newChat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
title: null,
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [],
|
||||
}
|
||||
mockReturning.mockResolvedValue([newChat])
|
||||
|
||||
// Mock successful sim agent response
|
||||
const mockReadableStream = new ReadableStream({
|
||||
start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
controller.enqueue(
|
||||
encoder.encode('data: {"type": "assistant_message", "content": "Hello response"}\\n\\n')
|
||||
)
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
body: mockReadableStream,
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'Hello',
|
||||
workflowId: 'workflow-123',
|
||||
createNewChat: true,
|
||||
stream: true,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(mockInsert).toHaveBeenCalled()
|
||||
expect(mockValues).toHaveBeenCalledWith({
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
title: null,
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [],
|
||||
})
|
||||
|
||||
// Verify sim agent was called
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:8000/api/chat-completion-streaming',
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-sim-agent-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
},
|
||||
],
|
||||
workflowId: 'workflow-123',
|
||||
userId: 'user-123',
|
||||
stream: true,
|
||||
streamToolCalls: true,
|
||||
mode: 'agent',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
it('should load existing chat and include conversation history', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock existing chat with history
|
||||
const existingChat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
title: 'Existing Chat',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Previous message' },
|
||||
{ role: 'assistant', content: 'Previous response' },
|
||||
],
|
||||
}
|
||||
// For POST route, the select query uses limit not orderBy
|
||||
mockLimit.mockResolvedValue([existingChat])
|
||||
|
||||
// Mock sim agent response
|
||||
const mockReadableStream = new ReadableStream({
|
||||
start(controller) {
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
body: mockReadableStream,
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'New message',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
// Verify conversation history was included
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:8000/api/chat-completion-streaming',
|
||||
expect.objectContaining({
|
||||
body: JSON.stringify({
|
||||
messages: [
|
||||
{ role: 'user', content: 'Previous message' },
|
||||
{ role: 'assistant', content: 'Previous response' },
|
||||
{ role: 'user', content: 'New message' },
|
||||
],
|
||||
workflowId: 'workflow-123',
|
||||
userId: 'user-123',
|
||||
stream: true,
|
||||
streamToolCalls: true,
|
||||
mode: 'agent',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
it('should include implicit feedback in messages', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock new chat creation
|
||||
const newChat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
messages: [],
|
||||
}
|
||||
mockReturning.mockResolvedValue([newChat])
|
||||
|
||||
// Mock sim agent response
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
body: new ReadableStream({
|
||||
start(controller) {
|
||||
controller.close()
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'Hello',
|
||||
workflowId: 'workflow-123',
|
||||
createNewChat: true,
|
||||
implicitFeedback: 'User seems confused about the workflow',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
await POST(req)
|
||||
|
||||
// Verify implicit feedback was included as system message
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:8000/api/chat-completion-streaming',
|
||||
expect.objectContaining({
|
||||
body: JSON.stringify({
|
||||
messages: [
|
||||
{ role: 'system', content: 'User seems confused about the workflow' },
|
||||
{ role: 'user', content: 'Hello' },
|
||||
],
|
||||
workflowId: 'workflow-123',
|
||||
userId: 'user-123',
|
||||
stream: true,
|
||||
streamToolCalls: true,
|
||||
mode: 'agent',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle sim agent API errors', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock new chat creation
|
||||
mockReturning.mockResolvedValue([{ id: 'chat-123', messages: [] }])
|
||||
|
||||
// Mock sim agent error
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: false,
|
||||
status: 500,
|
||||
text: () => Promise.resolve('Internal server error'),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'Hello',
|
||||
workflowId: 'workflow-123',
|
||||
createNewChat: true,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toContain('Sim agent API error')
|
||||
})
|
||||
|
||||
it('should handle database errors during chat creation', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database error
|
||||
mockReturning.mockRejectedValue(new Error('Database connection failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'Hello',
|
||||
workflowId: 'workflow-123',
|
||||
createNewChat: true,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Database connection failed')
|
||||
})
|
||||
|
||||
it('should use ask mode when specified', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock new chat creation
|
||||
mockReturning.mockResolvedValue([{ id: 'chat-123', messages: [] }])
|
||||
|
||||
// Mock sim agent response
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
body: new ReadableStream({
|
||||
start(controller) {
|
||||
controller.close()
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'What is this workflow?',
|
||||
workflowId: 'workflow-123',
|
||||
createNewChat: true,
|
||||
mode: 'ask',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/route')
|
||||
await POST(req)
|
||||
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:8000/api/chat-completion-streaming',
|
||||
expect.objectContaining({
|
||||
body: JSON.stringify({
|
||||
messages: [{ role: 'user', content: 'What is this workflow?' }],
|
||||
workflowId: 'workflow-123',
|
||||
userId: 'user-123',
|
||||
stream: true,
|
||||
streamToolCalls: true,
|
||||
mode: 'ask',
|
||||
}),
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('GET', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/chat?workflowId=workflow-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 400 when workflowId is missing', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/chat')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('workflowId is required')
|
||||
})
|
||||
|
||||
it('should return chats for authenticated user and workflow', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database response (what comes from DB)
|
||||
const mockDbChats = [
|
||||
{
|
||||
id: 'chat-1',
|
||||
title: 'First Chat',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Message 1' },
|
||||
{ role: 'assistant', content: 'Response 1' },
|
||||
{ role: 'user', content: 'Message 2' },
|
||||
{ role: 'assistant', content: 'Response 2' },
|
||||
],
|
||||
createdAt: new Date('2024-01-01'),
|
||||
updatedAt: new Date('2024-01-02'),
|
||||
},
|
||||
{
|
||||
id: 'chat-2',
|
||||
title: 'Second Chat',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Message 1' },
|
||||
{ role: 'assistant', content: 'Response 1' },
|
||||
],
|
||||
createdAt: new Date('2024-01-03'),
|
||||
updatedAt: new Date('2024-01-04'),
|
||||
},
|
||||
]
|
||||
|
||||
// Expected transformed response (what the route returns)
|
||||
const expectedChats = [
|
||||
{
|
||||
id: 'chat-1',
|
||||
title: 'First Chat',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Message 1' },
|
||||
{ role: 'assistant', content: 'Response 1' },
|
||||
{ role: 'user', content: 'Message 2' },
|
||||
{ role: 'assistant', content: 'Response 2' },
|
||||
],
|
||||
messageCount: 4,
|
||||
previewYaml: null,
|
||||
createdAt: new Date('2024-01-01'),
|
||||
updatedAt: new Date('2024-01-02'),
|
||||
},
|
||||
{
|
||||
id: 'chat-2',
|
||||
title: 'Second Chat',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Message 1' },
|
||||
{ role: 'assistant', content: 'Response 1' },
|
||||
],
|
||||
messageCount: 2,
|
||||
previewYaml: null,
|
||||
createdAt: new Date('2024-01-03'),
|
||||
updatedAt: new Date('2024-01-04'),
|
||||
},
|
||||
]
|
||||
|
||||
mockOrderBy.mockResolvedValue(mockDbChats)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/chat?workflowId=workflow-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
chats: [
|
||||
{
|
||||
id: 'chat-1',
|
||||
title: 'First Chat',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Message 1' },
|
||||
{ role: 'assistant', content: 'Response 1' },
|
||||
{ role: 'user', content: 'Message 2' },
|
||||
{ role: 'assistant', content: 'Response 2' },
|
||||
],
|
||||
messageCount: 4,
|
||||
previewYaml: null,
|
||||
createdAt: '2024-01-01T00:00:00.000Z',
|
||||
updatedAt: '2024-01-02T00:00:00.000Z',
|
||||
},
|
||||
{
|
||||
id: 'chat-2',
|
||||
title: 'Second Chat',
|
||||
model: 'claude-3-haiku-20240307',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Message 1' },
|
||||
{ role: 'assistant', content: 'Response 1' },
|
||||
],
|
||||
messageCount: 2,
|
||||
previewYaml: null,
|
||||
createdAt: '2024-01-03T00:00:00.000Z',
|
||||
updatedAt: '2024-01-04T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
// Verify database query was made correctly
|
||||
expect(mockSelect).toHaveBeenCalled()
|
||||
expect(mockWhere).toHaveBeenCalled()
|
||||
expect(mockOrderBy).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle database errors when fetching chats', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database error
|
||||
mockOrderBy.mockRejectedValue(new Error('Database query failed'))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/chat?workflowId=workflow-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to fetch chats')
|
||||
})
|
||||
|
||||
it('should return empty array when no chats found', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
mockOrderBy.mockResolvedValue([])
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/chat?workflowId=workflow-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/chat/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
chats: [],
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
719
apps/sim/app/api/copilot/chat/route.ts
Normal file
719
apps/sim/app/api/copilot/chat/route.ts
Normal file
@@ -0,0 +1,719 @@
|
||||
import { and, desc, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { getCopilotModel } from '@/lib/copilot/config'
|
||||
import { TITLE_GENERATION_SYSTEM_PROMPT, TITLE_GENERATION_USER_PROMPT } from '@/lib/copilot/prompts'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { copilotChats } from '@/db/schema'
|
||||
import { executeProviderRequest } from '@/providers'
|
||||
|
||||
const logger = createLogger('CopilotChatAPI')
|
||||
|
||||
// Schema for chat messages
|
||||
const ChatMessageSchema = z.object({
|
||||
message: z.string().min(1, 'Message is required'),
|
||||
userMessageId: z.string().optional(), // ID from frontend for the user message
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().min(1, 'Workflow ID is required'),
|
||||
mode: z.enum(['ask', 'agent']).optional().default('agent'),
|
||||
createNewChat: z.boolean().optional().default(false),
|
||||
stream: z.boolean().optional().default(true),
|
||||
implicitFeedback: z.string().optional(),
|
||||
})
|
||||
|
||||
// Sim Agent API configuration
|
||||
const SIM_AGENT_API_URL = env.SIM_AGENT_API_URL || 'http://localhost:8000'
|
||||
const SIM_AGENT_API_KEY = env.SIM_AGENT_API_KEY
|
||||
|
||||
/**
|
||||
* Generate a chat title using LLM
|
||||
*/
|
||||
async function generateChatTitle(userMessage: string): Promise<string> {
|
||||
try {
|
||||
const { provider, model } = getCopilotModel('title')
|
||||
|
||||
// Get the appropriate API key for the provider
|
||||
let apiKey: string | undefined
|
||||
if (provider === 'anthropic') {
|
||||
// Use rotating API key for Anthropic
|
||||
const { getRotatingApiKey } = require('@/lib/utils')
|
||||
try {
|
||||
apiKey = getRotatingApiKey('anthropic')
|
||||
logger.debug(`Using rotating API key for Anthropic title generation`)
|
||||
} catch (e) {
|
||||
// If rotation fails, let the provider handle it
|
||||
logger.warn(`Failed to get rotating API key for Anthropic:`, e)
|
||||
}
|
||||
}
|
||||
|
||||
const response = await executeProviderRequest(provider, {
|
||||
model,
|
||||
systemPrompt: TITLE_GENERATION_SYSTEM_PROMPT,
|
||||
context: TITLE_GENERATION_USER_PROMPT(userMessage),
|
||||
temperature: 0.3,
|
||||
maxTokens: 50,
|
||||
apiKey: apiKey || '',
|
||||
stream: false,
|
||||
})
|
||||
|
||||
if (typeof response === 'object' && 'content' in response) {
|
||||
return response.content?.trim() || 'New Chat'
|
||||
}
|
||||
|
||||
return 'New Chat'
|
||||
} catch (error) {
|
||||
logger.error('Failed to generate chat title:', error)
|
||||
return 'New Chat'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate chat title asynchronously and update the database
|
||||
*/
|
||||
async function generateChatTitleAsync(
|
||||
chatId: string,
|
||||
userMessage: string,
|
||||
requestId: string,
|
||||
streamController?: ReadableStreamDefaultController<Uint8Array>
|
||||
): Promise<void> {
|
||||
try {
|
||||
logger.info(`[${requestId}] Starting async title generation for chat ${chatId}`)
|
||||
|
||||
const title = await generateChatTitle(userMessage)
|
||||
|
||||
// Update the chat with the generated title
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
title,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(copilotChats.id, chatId))
|
||||
|
||||
// Send title_updated event to client if streaming
|
||||
if (streamController) {
|
||||
const encoder = new TextEncoder()
|
||||
const titleEvent = `data: ${JSON.stringify({
|
||||
type: 'title_updated',
|
||||
title: title,
|
||||
})}\n\n`
|
||||
streamController.enqueue(encoder.encode(titleEvent))
|
||||
logger.debug(`[${requestId}] Sent title_updated event to client: "${title}"`)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Generated title for chat ${chatId}: "${title}"`)
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to generate title for chat ${chatId}:`, error)
|
||||
// Don't throw - this is a background operation
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/copilot/chat
|
||||
* Send messages to sim agent and handle chat persistence
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
// Get session to access user information including name
|
||||
const session = await getSession()
|
||||
|
||||
if (!session?.user?.id) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const authenticatedUserId = session.user.id
|
||||
|
||||
const body = await req.json()
|
||||
const {
|
||||
message,
|
||||
userMessageId,
|
||||
chatId,
|
||||
workflowId,
|
||||
mode,
|
||||
createNewChat,
|
||||
stream,
|
||||
implicitFeedback,
|
||||
} = ChatMessageSchema.parse(body)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Processing copilot chat request`, {
|
||||
userId: authenticatedUserId,
|
||||
workflowId,
|
||||
chatId,
|
||||
mode,
|
||||
stream,
|
||||
createNewChat,
|
||||
messageLength: message.length,
|
||||
hasImplicitFeedback: !!implicitFeedback,
|
||||
})
|
||||
|
||||
// Handle chat context
|
||||
let currentChat: any = null
|
||||
let conversationHistory: any[] = []
|
||||
let actualChatId = chatId
|
||||
|
||||
if (chatId) {
|
||||
// Load existing chat
|
||||
const [chat] = await db
|
||||
.select()
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, authenticatedUserId)))
|
||||
.limit(1)
|
||||
|
||||
if (chat) {
|
||||
currentChat = chat
|
||||
conversationHistory = Array.isArray(chat.messages) ? chat.messages : []
|
||||
}
|
||||
} else if (createNewChat && workflowId) {
|
||||
// Create new chat
|
||||
const { provider, model } = getCopilotModel('chat')
|
||||
const [newChat] = await db
|
||||
.insert(copilotChats)
|
||||
.values({
|
||||
userId: authenticatedUserId,
|
||||
workflowId,
|
||||
title: null,
|
||||
model,
|
||||
messages: [],
|
||||
})
|
||||
.returning()
|
||||
|
||||
if (newChat) {
|
||||
currentChat = newChat
|
||||
actualChatId = newChat.id
|
||||
}
|
||||
}
|
||||
|
||||
// Build messages array for sim agent with conversation history
|
||||
const messages = []
|
||||
|
||||
// Add conversation history
|
||||
for (const msg of conversationHistory) {
|
||||
messages.push({
|
||||
role: msg.role,
|
||||
content: msg.content,
|
||||
})
|
||||
}
|
||||
|
||||
// Add implicit feedback if provided
|
||||
if (implicitFeedback) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: implicitFeedback,
|
||||
})
|
||||
}
|
||||
|
||||
// Add current user message
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: message,
|
||||
})
|
||||
|
||||
// Start title generation in parallel if this is a new chat with first message
|
||||
if (actualChatId && !currentChat?.title && conversationHistory.length === 0) {
|
||||
logger.info(`[${tracker.requestId}] Will start parallel title generation inside stream`)
|
||||
}
|
||||
|
||||
// Forward to sim agent API
|
||||
logger.info(`[${tracker.requestId}] Sending request to sim agent API`, {
|
||||
messageCount: messages.length,
|
||||
endpoint: `${SIM_AGENT_API_URL}/api/chat-completion-streaming`,
|
||||
})
|
||||
|
||||
const simAgentResponse = await fetch(`${SIM_AGENT_API_URL}/api/chat-completion-streaming`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(SIM_AGENT_API_KEY && { 'x-api-key': SIM_AGENT_API_KEY }),
|
||||
},
|
||||
body: JSON.stringify({
|
||||
messages,
|
||||
workflowId,
|
||||
userId: authenticatedUserId,
|
||||
stream: stream,
|
||||
streamToolCalls: true,
|
||||
mode: mode,
|
||||
...(session?.user?.name && { userName: session.user.name }),
|
||||
}),
|
||||
})
|
||||
|
||||
if (!simAgentResponse.ok) {
|
||||
const errorText = await simAgentResponse.text()
|
||||
logger.error(`[${tracker.requestId}] Sim agent API error:`, {
|
||||
status: simAgentResponse.status,
|
||||
error: errorText,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: `Sim agent API error: ${simAgentResponse.statusText}` },
|
||||
{ status: simAgentResponse.status }
|
||||
)
|
||||
}
|
||||
|
||||
// If streaming is requested, forward the stream and update chat later
|
||||
if (stream && simAgentResponse.body) {
|
||||
logger.info(`[${tracker.requestId}] Streaming response from sim agent`)
|
||||
|
||||
// Create user message to save
|
||||
const userMessage = {
|
||||
id: userMessageId || crypto.randomUUID(), // Use frontend ID if provided
|
||||
role: 'user',
|
||||
content: message,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
// Create a pass-through stream that captures the response
|
||||
const transformedStream = new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
let assistantContent = ''
|
||||
const toolCalls: any[] = []
|
||||
let buffer = ''
|
||||
let isFirstDone = true
|
||||
|
||||
// Send chatId as first event
|
||||
if (actualChatId) {
|
||||
const chatIdEvent = `data: ${JSON.stringify({
|
||||
type: 'chat_id',
|
||||
chatId: actualChatId,
|
||||
})}\n\n`
|
||||
controller.enqueue(encoder.encode(chatIdEvent))
|
||||
logger.debug(`[${tracker.requestId}] Sent initial chatId event to client`)
|
||||
}
|
||||
|
||||
// Start title generation in parallel if needed
|
||||
if (actualChatId && !currentChat?.title && conversationHistory.length === 0) {
|
||||
logger.info(`[${tracker.requestId}] Starting title generation with stream updates`, {
|
||||
chatId: actualChatId,
|
||||
hasTitle: !!currentChat?.title,
|
||||
conversationLength: conversationHistory.length,
|
||||
message: message.substring(0, 100) + (message.length > 100 ? '...' : ''),
|
||||
})
|
||||
generateChatTitleAsync(actualChatId, message, tracker.requestId, controller).catch(
|
||||
(error) => {
|
||||
logger.error(`[${tracker.requestId}] Title generation failed:`, error)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
logger.debug(`[${tracker.requestId}] Skipping title generation`, {
|
||||
chatId: actualChatId,
|
||||
hasTitle: !!currentChat?.title,
|
||||
conversationLength: conversationHistory.length,
|
||||
reason: !actualChatId
|
||||
? 'no chatId'
|
||||
: currentChat?.title
|
||||
? 'already has title'
|
||||
: conversationHistory.length > 0
|
||||
? 'not first message'
|
||||
: 'unknown',
|
||||
})
|
||||
}
|
||||
|
||||
// Forward the sim agent stream and capture assistant response
|
||||
const reader = simAgentResponse.body!.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
logger.info(`[${tracker.requestId}] Stream reading completed`)
|
||||
break
|
||||
}
|
||||
|
||||
// Check if client disconnected before processing chunk
|
||||
try {
|
||||
// Forward the chunk to client immediately
|
||||
controller.enqueue(value)
|
||||
} catch (error) {
|
||||
// Client disconnected - stop reading from sim agent
|
||||
logger.info(
|
||||
`[${tracker.requestId}] Client disconnected, stopping stream processing`
|
||||
)
|
||||
reader.cancel() // Stop reading from sim agent
|
||||
break
|
||||
}
|
||||
const chunkSize = value.byteLength
|
||||
|
||||
// Decode and parse SSE events for logging and capturing content
|
||||
const decodedChunk = decoder.decode(value, { stream: true })
|
||||
buffer += decodedChunk
|
||||
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || '' // Keep incomplete line in buffer
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.trim() === '') continue // Skip empty lines
|
||||
|
||||
if (line.startsWith('data: ') && line.length > 6) {
|
||||
try {
|
||||
const jsonStr = line.slice(6)
|
||||
|
||||
// Check if the JSON string is unusually large (potential streaming issue)
|
||||
if (jsonStr.length > 50000) {
|
||||
// 50KB limit
|
||||
logger.warn(`[${tracker.requestId}] Large SSE event detected`, {
|
||||
size: jsonStr.length,
|
||||
preview: `${jsonStr.substring(0, 100)}...`,
|
||||
})
|
||||
}
|
||||
|
||||
const event = JSON.parse(jsonStr)
|
||||
|
||||
// Log different event types comprehensively
|
||||
switch (event.type) {
|
||||
case 'content':
|
||||
if (event.data) {
|
||||
assistantContent += event.data
|
||||
}
|
||||
break
|
||||
|
||||
case 'tool_call':
|
||||
logger.info(
|
||||
`[${tracker.requestId}] Tool call ${event.data?.partial ? '(partial)' : '(complete)'}:`,
|
||||
{
|
||||
id: event.data?.id,
|
||||
name: event.data?.name,
|
||||
arguments: event.data?.arguments,
|
||||
blockIndex: event.data?._blockIndex,
|
||||
}
|
||||
)
|
||||
if (!event.data?.partial) {
|
||||
toolCalls.push(event.data)
|
||||
}
|
||||
break
|
||||
|
||||
case 'tool_execution':
|
||||
logger.info(`[${tracker.requestId}] Tool execution started:`, {
|
||||
toolCallId: event.toolCallId,
|
||||
toolName: event.toolName,
|
||||
status: event.status,
|
||||
})
|
||||
break
|
||||
|
||||
case 'tool_result':
|
||||
logger.info(`[${tracker.requestId}] Tool result received:`, {
|
||||
toolCallId: event.toolCallId,
|
||||
toolName: event.toolName,
|
||||
success: event.success,
|
||||
result: `${JSON.stringify(event.result).substring(0, 200)}...`,
|
||||
resultSize: JSON.stringify(event.result).length,
|
||||
})
|
||||
break
|
||||
|
||||
case 'tool_error':
|
||||
logger.error(`[${tracker.requestId}] Tool error:`, {
|
||||
toolCallId: event.toolCallId,
|
||||
toolName: event.toolName,
|
||||
error: event.error,
|
||||
success: event.success,
|
||||
})
|
||||
break
|
||||
|
||||
case 'done':
|
||||
if (isFirstDone) {
|
||||
logger.info(
|
||||
`[${tracker.requestId}] Initial AI response complete, tool count: ${toolCalls.length}`
|
||||
)
|
||||
isFirstDone = false
|
||||
} else {
|
||||
logger.info(`[${tracker.requestId}] Conversation round complete`)
|
||||
}
|
||||
break
|
||||
|
||||
case 'error':
|
||||
logger.error(`[${tracker.requestId}] Stream error event:`, event.error)
|
||||
break
|
||||
|
||||
default:
|
||||
logger.debug(
|
||||
`[${tracker.requestId}] Unknown event type: ${event.type}`,
|
||||
event
|
||||
)
|
||||
}
|
||||
} catch (e) {
|
||||
// Enhanced error handling for large payloads and parsing issues
|
||||
const lineLength = line.length
|
||||
const isLargePayload = lineLength > 10000
|
||||
|
||||
if (isLargePayload) {
|
||||
logger.error(
|
||||
`[${tracker.requestId}] Failed to parse large SSE event (${lineLength} chars)`,
|
||||
{
|
||||
error: e,
|
||||
preview: `${line.substring(0, 200)}...`,
|
||||
size: lineLength,
|
||||
}
|
||||
)
|
||||
} else {
|
||||
logger.warn(
|
||||
`[${tracker.requestId}] Failed to parse SSE event: "${line.substring(0, 200)}..."`,
|
||||
e
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if (line.trim() && line !== 'data: [DONE]') {
|
||||
logger.debug(`[${tracker.requestId}] Non-SSE line from sim agent: "${line}"`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process any remaining buffer
|
||||
if (buffer.trim()) {
|
||||
logger.debug(`[${tracker.requestId}] Processing remaining buffer: "${buffer}"`)
|
||||
if (buffer.startsWith('data: ')) {
|
||||
try {
|
||||
const event = JSON.parse(buffer.slice(6))
|
||||
if (event.type === 'content' && event.data) {
|
||||
assistantContent += event.data
|
||||
}
|
||||
} catch (e) {
|
||||
logger.warn(`[${tracker.requestId}] Failed to parse final buffer: "${buffer}"`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log final streaming summary
|
||||
logger.info(`[${tracker.requestId}] Streaming complete summary:`, {
|
||||
totalContentLength: assistantContent.length,
|
||||
toolCallsCount: toolCalls.length,
|
||||
hasContent: assistantContent.length > 0,
|
||||
toolNames: toolCalls.map((tc) => tc?.name).filter(Boolean),
|
||||
})
|
||||
|
||||
// Save messages to database after streaming completes (including aborted messages)
|
||||
if (currentChat) {
|
||||
const updatedMessages = [...conversationHistory, userMessage]
|
||||
|
||||
// Save assistant message if there's any content or tool calls (even partial from abort)
|
||||
if (assistantContent.trim() || toolCalls.length > 0) {
|
||||
const assistantMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'assistant',
|
||||
content: assistantContent,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(toolCalls.length > 0 && { toolCalls }),
|
||||
}
|
||||
updatedMessages.push(assistantMessage)
|
||||
logger.info(
|
||||
`[${tracker.requestId}] Saving assistant message with content (${assistantContent.length} chars) and ${toolCalls.length} tool calls`
|
||||
)
|
||||
} else {
|
||||
logger.info(
|
||||
`[${tracker.requestId}] No assistant content or tool calls to save (aborted before response)`
|
||||
)
|
||||
}
|
||||
|
||||
// Update chat in database immediately (without title)
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
messages: updatedMessages,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(copilotChats.id, actualChatId!))
|
||||
|
||||
logger.info(`[${tracker.requestId}] Updated chat ${actualChatId} with new messages`, {
|
||||
messageCount: updatedMessages.length,
|
||||
savedUserMessage: true,
|
||||
savedAssistantMessage: assistantContent.trim().length > 0,
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Error processing stream:`, error)
|
||||
controller.error(error)
|
||||
} finally {
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
const response = new Response(transformedStream, {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
'X-Accel-Buffering': 'no',
|
||||
},
|
||||
})
|
||||
|
||||
logger.info(`[${tracker.requestId}] Returning streaming response to client`, {
|
||||
duration: tracker.getDuration(),
|
||||
chatId: actualChatId,
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
})
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// For non-streaming responses
|
||||
const responseData = await simAgentResponse.json()
|
||||
logger.info(`[${tracker.requestId}] Non-streaming response from sim agent:`, {
|
||||
hasContent: !!responseData.content,
|
||||
contentLength: responseData.content?.length || 0,
|
||||
model: responseData.model,
|
||||
provider: responseData.provider,
|
||||
toolCallsCount: responseData.toolCalls?.length || 0,
|
||||
hasTokens: !!responseData.tokens,
|
||||
})
|
||||
|
||||
// Log tool calls if present
|
||||
if (responseData.toolCalls?.length > 0) {
|
||||
responseData.toolCalls.forEach((toolCall: any) => {
|
||||
logger.info(`[${tracker.requestId}] Tool call in response:`, {
|
||||
id: toolCall.id,
|
||||
name: toolCall.name,
|
||||
success: toolCall.success,
|
||||
result: `${JSON.stringify(toolCall.result).substring(0, 200)}...`,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Save messages if we have a chat
|
||||
if (currentChat && responseData.content) {
|
||||
const userMessage = {
|
||||
id: userMessageId || crypto.randomUUID(), // Use frontend ID if provided
|
||||
role: 'user',
|
||||
content: message,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
const assistantMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'assistant',
|
||||
content: responseData.content,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
|
||||
|
||||
// Start title generation in parallel if this is first message (non-streaming)
|
||||
if (actualChatId && !currentChat.title && conversationHistory.length === 0) {
|
||||
logger.info(`[${tracker.requestId}] Starting title generation for non-streaming response`)
|
||||
generateChatTitleAsync(actualChatId, message, tracker.requestId).catch((error) => {
|
||||
logger.error(`[${tracker.requestId}] Title generation failed:`, error)
|
||||
})
|
||||
}
|
||||
|
||||
// Update chat in database immediately (without blocking for title)
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
messages: updatedMessages,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(copilotChats.id, actualChatId!))
|
||||
}
|
||||
|
||||
logger.info(`[${tracker.requestId}] Returning non-streaming response`, {
|
||||
duration: tracker.getDuration(),
|
||||
chatId: actualChatId,
|
||||
responseLength: responseData.content?.length || 0,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
response: responseData,
|
||||
chatId: actualChatId,
|
||||
metadata: {
|
||||
requestId: tracker.requestId,
|
||||
message,
|
||||
duration: tracker.getDuration(),
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
const duration = tracker.getDuration()
|
||||
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.error(`[${tracker.requestId}] Validation error:`, {
|
||||
duration,
|
||||
errors: error.errors,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${tracker.requestId}] Error handling copilot chat:`, {
|
||||
duration,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{ error: error instanceof Error ? error.message : 'Internal server error' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export async function GET(req: NextRequest) {
|
||||
try {
|
||||
const { searchParams } = new URL(req.url)
|
||||
const workflowId = searchParams.get('workflowId')
|
||||
|
||||
if (!workflowId) {
|
||||
return createBadRequestResponse('workflowId is required')
|
||||
}
|
||||
|
||||
// Get authenticated user using consolidated helper
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !authenticatedUserId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
// Fetch chats for this user and workflow
|
||||
const chats = await db
|
||||
.select({
|
||||
id: copilotChats.id,
|
||||
title: copilotChats.title,
|
||||
model: copilotChats.model,
|
||||
messages: copilotChats.messages,
|
||||
createdAt: copilotChats.createdAt,
|
||||
updatedAt: copilotChats.updatedAt,
|
||||
})
|
||||
.from(copilotChats)
|
||||
.where(
|
||||
and(eq(copilotChats.userId, authenticatedUserId), eq(copilotChats.workflowId, workflowId))
|
||||
)
|
||||
.orderBy(desc(copilotChats.updatedAt))
|
||||
|
||||
// Transform the data to include message count
|
||||
const transformedChats = chats.map((chat) => ({
|
||||
id: chat.id,
|
||||
title: chat.title,
|
||||
model: chat.model,
|
||||
messages: Array.isArray(chat.messages) ? chat.messages : [],
|
||||
messageCount: Array.isArray(chat.messages) ? chat.messages.length : 0,
|
||||
previewYaml: null, // Not needed for chat list
|
||||
createdAt: chat.createdAt,
|
||||
updatedAt: chat.updatedAt,
|
||||
}))
|
||||
|
||||
logger.info(`Retrieved ${transformedChats.length} chats for workflow ${workflowId}`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chats: transformedChats,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error fetching copilot chats:', error)
|
||||
return createInternalServerErrorResponse('Failed to fetch chats')
|
||||
}
|
||||
}
|
||||
561
apps/sim/app/api/copilot/chat/update-messages/route.test.ts
Normal file
561
apps/sim/app/api/copilot/chat/update-messages/route.test.ts
Normal file
@@ -0,0 +1,561 @@
|
||||
/**
|
||||
* Tests for copilot chat update-messages API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockAuth,
|
||||
mockCryptoUuid,
|
||||
setupCommonApiMocks,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Copilot Chat Update Messages API Route', () => {
|
||||
const mockSelect = vi.fn()
|
||||
const mockFrom = vi.fn()
|
||||
const mockWhere = vi.fn()
|
||||
const mockLimit = vi.fn()
|
||||
const mockUpdate = vi.fn()
|
||||
const mockSet = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid()
|
||||
|
||||
mockSelect.mockReturnValue({ from: mockFrom })
|
||||
mockFrom.mockReturnValue({ where: mockWhere })
|
||||
mockWhere.mockReturnValue({ limit: mockLimit })
|
||||
mockLimit.mockResolvedValue([]) // Default: no chat found
|
||||
mockUpdate.mockReturnValue({ set: mockSet })
|
||||
mockSet.mockReturnValue({ where: vi.fn().mockResolvedValue(undefined) }) // Different where for update
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
update: mockUpdate,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/db/schema', () => ({
|
||||
copilotChats: {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
messages: 'messages',
|
||||
updatedAt: 'updatedAt',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('POST', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 400 for invalid request body - missing chatId', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
// Missing chatId
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should return 400 for invalid request body - missing messages', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
// Missing messages
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should return 400 for invalid message structure - missing required fields', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
// Missing role, content, timestamp
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should return 400 for invalid message role', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'invalid-role',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should return 404 when chat is not found', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat not found
|
||||
mockLimit.mockResolvedValueOnce([])
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'non-existent-chat',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Chat not found or unauthorized')
|
||||
})
|
||||
|
||||
it('should return 404 when chat belongs to different user', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat not found (due to user mismatch)
|
||||
mockLimit.mockResolvedValueOnce([])
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'other-user-chat',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Chat not found or unauthorized')
|
||||
})
|
||||
|
||||
it('should successfully update chat messages', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists - override the default empty array
|
||||
const existingChat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
messages: [],
|
||||
}
|
||||
mockLimit.mockResolvedValueOnce([existingChat])
|
||||
|
||||
const messages = [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello, how are you?',
|
||||
timestamp: '2024-01-01T10:00:00.000Z',
|
||||
},
|
||||
{
|
||||
id: 'msg-2',
|
||||
role: 'assistant',
|
||||
content: 'I am doing well, thank you!',
|
||||
timestamp: '2024-01-01T10:01:00.000Z',
|
||||
},
|
||||
]
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
messages,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
messageCount: 2,
|
||||
})
|
||||
|
||||
// Verify database operations
|
||||
expect(mockSelect).toHaveBeenCalled()
|
||||
expect(mockUpdate).toHaveBeenCalled()
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
messages,
|
||||
updatedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('should successfully update chat messages with optional fields', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const existingChat = {
|
||||
id: 'chat-456',
|
||||
userId: 'user-123',
|
||||
messages: [],
|
||||
}
|
||||
mockLimit.mockResolvedValueOnce([existingChat])
|
||||
|
||||
const messages = [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T10:00:00.000Z',
|
||||
},
|
||||
{
|
||||
id: 'msg-2',
|
||||
role: 'assistant',
|
||||
content: 'Hi there!',
|
||||
timestamp: '2024-01-01T10:01:00.000Z',
|
||||
toolCalls: [
|
||||
{
|
||||
id: 'tool-1',
|
||||
name: 'get_weather',
|
||||
arguments: { location: 'NYC' },
|
||||
},
|
||||
],
|
||||
contentBlocks: [
|
||||
{
|
||||
type: 'text',
|
||||
content: 'Here is the weather information',
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-456',
|
||||
messages,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
messageCount: 2,
|
||||
})
|
||||
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
messages,
|
||||
updatedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle empty messages array', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const existingChat = {
|
||||
id: 'chat-789',
|
||||
userId: 'user-123',
|
||||
messages: [],
|
||||
}
|
||||
mockLimit.mockResolvedValueOnce([existingChat])
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-789',
|
||||
messages: [],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
messageCount: 0,
|
||||
})
|
||||
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
messages: [],
|
||||
updatedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle database errors during chat lookup', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database error during chat lookup
|
||||
mockLimit.mockRejectedValueOnce(new Error('Database connection failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should handle database errors during update operation', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const existingChat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
messages: [],
|
||||
}
|
||||
mockLimit.mockResolvedValueOnce([existingChat])
|
||||
|
||||
// Mock database error during update
|
||||
mockSet.mockReturnValueOnce({
|
||||
where: vi.fn().mockRejectedValue(new Error('Update operation failed')),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-123',
|
||||
messages: [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'Hello',
|
||||
timestamp: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should handle JSON parsing errors in request body', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Create a request with invalid JSON
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/chat/update-messages', {
|
||||
method: 'POST',
|
||||
body: '{invalid-json',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update chat messages')
|
||||
})
|
||||
|
||||
it('should handle large message arrays', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const existingChat = {
|
||||
id: 'chat-large',
|
||||
userId: 'user-123',
|
||||
messages: [],
|
||||
}
|
||||
mockLimit.mockResolvedValueOnce([existingChat])
|
||||
|
||||
// Create a large array of messages
|
||||
const messages = Array.from({ length: 100 }, (_, i) => ({
|
||||
id: `msg-${i + 1}`,
|
||||
role: i % 2 === 0 ? 'user' : 'assistant',
|
||||
content: `Message ${i + 1}`,
|
||||
timestamp: new Date(2024, 0, 1, 10, i).toISOString(),
|
||||
}))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-large',
|
||||
messages,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
messageCount: 100,
|
||||
})
|
||||
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
messages,
|
||||
updatedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle messages with both user and assistant roles', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const existingChat = {
|
||||
id: 'chat-mixed',
|
||||
userId: 'user-123',
|
||||
messages: [],
|
||||
}
|
||||
mockLimit.mockResolvedValueOnce([existingChat])
|
||||
|
||||
const messages = [
|
||||
{
|
||||
id: 'msg-1',
|
||||
role: 'user',
|
||||
content: 'What is the weather like?',
|
||||
timestamp: '2024-01-01T10:00:00.000Z',
|
||||
},
|
||||
{
|
||||
id: 'msg-2',
|
||||
role: 'assistant',
|
||||
content: 'Let me check the weather for you.',
|
||||
timestamp: '2024-01-01T10:01:00.000Z',
|
||||
toolCalls: [
|
||||
{
|
||||
id: 'tool-weather',
|
||||
name: 'get_weather',
|
||||
arguments: { location: 'current' },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: 'msg-3',
|
||||
role: 'assistant',
|
||||
content: 'The weather is sunny and 75°F.',
|
||||
timestamp: '2024-01-01T10:02:00.000Z',
|
||||
},
|
||||
{
|
||||
id: 'msg-4',
|
||||
role: 'user',
|
||||
content: 'Thank you!',
|
||||
timestamp: '2024-01-01T10:03:00.000Z',
|
||||
},
|
||||
]
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
chatId: 'chat-mixed',
|
||||
messages,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/chat/update-messages/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
messageCount: 4,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
82
apps/sim/app/api/copilot/chat/update-messages/route.ts
Normal file
82
apps/sim/app/api/copilot/chat/update-messages/route.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createInternalServerErrorResponse,
|
||||
createNotFoundResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { copilotChats } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('CopilotChatUpdateAPI')
|
||||
|
||||
const UpdateMessagesSchema = z.object({
|
||||
chatId: z.string(),
|
||||
messages: z.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
role: z.enum(['user', 'assistant']),
|
||||
content: z.string(),
|
||||
timestamp: z.string(),
|
||||
toolCalls: z.array(z.any()).optional(),
|
||||
contentBlocks: z.array(z.any()).optional(),
|
||||
})
|
||||
),
|
||||
})
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { chatId, messages } = UpdateMessagesSchema.parse(body)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Updating chat messages`, {
|
||||
userId,
|
||||
chatId,
|
||||
messageCount: messages.length,
|
||||
})
|
||||
|
||||
// Verify that the chat belongs to the user
|
||||
const [chat] = await db
|
||||
.select()
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
if (!chat) {
|
||||
return createNotFoundResponse('Chat not found or unauthorized')
|
||||
}
|
||||
|
||||
// Update chat with new messages
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
messages: messages,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(copilotChats.id, chatId))
|
||||
|
||||
logger.info(`[${tracker.requestId}] Successfully updated chat messages`, {
|
||||
chatId,
|
||||
newMessageCount: messages.length,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
messageCount: messages.length,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Error updating chat messages:`, error)
|
||||
return createInternalServerErrorResponse('Failed to update chat messages')
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { copilotCheckpoints, workflow as workflowTable } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('RevertCheckpointAPI')
|
||||
|
||||
/**
|
||||
* POST /api/copilot/checkpoints/[id]/revert
|
||||
* Revert workflow to a specific checkpoint
|
||||
*/
|
||||
export async function POST(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
const checkpointId = (await params).id
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Reverting to checkpoint: ${checkpointId}`, {
|
||||
userId: session.user.id,
|
||||
})
|
||||
|
||||
// Get the checkpoint
|
||||
const checkpoint = await db
|
||||
.select()
|
||||
.from(copilotCheckpoints)
|
||||
.where(
|
||||
and(eq(copilotCheckpoints.id, checkpointId), eq(copilotCheckpoints.userId, session.user.id))
|
||||
)
|
||||
.limit(1)
|
||||
|
||||
if (!checkpoint.length) {
|
||||
return NextResponse.json({ error: 'Checkpoint not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const checkpointData = checkpoint[0]
|
||||
const { workflowId, yaml: yamlContent } = checkpointData
|
||||
|
||||
logger.info(`[${requestId}] Processing checkpoint revert`, {
|
||||
workflowId,
|
||||
yamlLength: yamlContent.length,
|
||||
})
|
||||
|
||||
// Use the consolidated YAML endpoint instead of duplicating the processing logic
|
||||
const yamlEndpointUrl = `${process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000'}/api/workflows/${workflowId}/yaml`
|
||||
|
||||
const yamlResponse = await fetch(yamlEndpointUrl, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
// Forward auth cookies from the original request
|
||||
Cookie: request.headers.get('Cookie') || '',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
yamlContent,
|
||||
description: `Reverted to checkpoint from ${new Date(checkpointData.createdAt).toLocaleString()}`,
|
||||
source: 'checkpoint_revert',
|
||||
applyAutoLayout: true,
|
||||
createCheckpoint: false, // Don't create a checkpoint when reverting to one
|
||||
}),
|
||||
})
|
||||
|
||||
if (!yamlResponse.ok) {
|
||||
const errorData = await yamlResponse.json()
|
||||
logger.error(`[${requestId}] Consolidated YAML endpoint failed:`, errorData)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Failed to revert checkpoint via YAML endpoint',
|
||||
details: errorData.errors || [errorData.error || 'Unknown error'],
|
||||
},
|
||||
{ status: yamlResponse.status }
|
||||
)
|
||||
}
|
||||
|
||||
const yamlResult = await yamlResponse.json()
|
||||
|
||||
if (!yamlResult.success) {
|
||||
logger.error(`[${requestId}] YAML endpoint returned failure:`, yamlResult)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Failed to process checkpoint YAML',
|
||||
details: yamlResult.errors || ['Unknown error'],
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Update workflow's lastSynced timestamp
|
||||
await db
|
||||
.update(workflowTable)
|
||||
.set({
|
||||
lastSynced: new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(workflowTable.id, workflowId))
|
||||
|
||||
// Notify the socket server to tell clients to rehydrate stores from database
|
||||
try {
|
||||
const socketUrl = process.env.SOCKET_URL || 'http://localhost:3002'
|
||||
await fetch(`${socketUrl}/api/copilot-workflow-edit`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
workflowId,
|
||||
description: `Reverted to checkpoint from ${new Date(checkpointData.createdAt).toLocaleString()}`,
|
||||
}),
|
||||
})
|
||||
logger.info(`[${requestId}] Notified socket server of checkpoint revert`)
|
||||
} catch (socketError) {
|
||||
logger.warn(`[${requestId}] Failed to notify socket server:`, socketError)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Successfully reverted to checkpoint`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: `Successfully reverted to checkpoint from ${new Date(checkpointData.createdAt).toLocaleString()}`,
|
||||
summary: yamlResult.summary || `Restored workflow from checkpoint.`,
|
||||
warnings: yamlResult.warnings || [],
|
||||
data: yamlResult.data,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error reverting checkpoint:`, error)
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: `Failed to revert checkpoint: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
778
apps/sim/app/api/copilot/checkpoints/revert/route.test.ts
Normal file
778
apps/sim/app/api/copilot/checkpoints/revert/route.test.ts
Normal file
@@ -0,0 +1,778 @@
|
||||
/**
|
||||
* Tests for copilot checkpoints revert API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockAuth,
|
||||
mockCryptoUuid,
|
||||
setupCommonApiMocks,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Copilot Checkpoints Revert API Route', () => {
|
||||
const mockSelect = vi.fn()
|
||||
const mockFrom = vi.fn()
|
||||
const mockWhere = vi.fn()
|
||||
const mockThen = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid()
|
||||
|
||||
mockSelect.mockReturnValue({ from: mockFrom })
|
||||
mockFrom.mockReturnValue({ where: mockWhere })
|
||||
mockWhere.mockReturnValue({ then: mockThen })
|
||||
mockThen.mockResolvedValue(null) // Default: no data found
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/db/schema', () => ({
|
||||
workflowCheckpoints: {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
workflowId: 'workflowId',
|
||||
workflowState: 'workflowState',
|
||||
},
|
||||
workflow: {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
}))
|
||||
|
||||
global.fetch = vi.fn()
|
||||
|
||||
vi.spyOn(Date, 'now').mockReturnValue(1640995200000)
|
||||
|
||||
const originalDate = Date
|
||||
vi.spyOn(global, 'Date').mockImplementation(((...args: any[]) => {
|
||||
if (args.length === 0) {
|
||||
const mockDate = new originalDate('2024-01-01T00:00:00.000Z')
|
||||
return mockDate
|
||||
}
|
||||
if (args.length === 1) {
|
||||
return new originalDate(args[0])
|
||||
}
|
||||
return new originalDate(args[0], args[1], args[2], args[3], args[4], args[5], args[6])
|
||||
}) as any)
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('POST', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 500 for invalid request body - missing checkpointId', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
// Missing checkpointId
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert to checkpoint')
|
||||
})
|
||||
|
||||
it('should return 500 for empty checkpointId', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: '',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert to checkpoint')
|
||||
})
|
||||
|
||||
it('should return 404 when checkpoint is not found', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock checkpoint not found
|
||||
mockThen.mockResolvedValueOnce(undefined)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'non-existent-checkpoint',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Checkpoint not found or access denied')
|
||||
})
|
||||
|
||||
it('should return 404 when checkpoint belongs to different user', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock checkpoint not found (due to user mismatch in query)
|
||||
mockThen.mockResolvedValueOnce(undefined)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'other-user-checkpoint',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Checkpoint not found or access denied')
|
||||
})
|
||||
|
||||
it('should return 404 when workflow is not found', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock checkpoint found but workflow not found
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
mockThen
|
||||
.mockResolvedValueOnce(mockCheckpoint) // Checkpoint found
|
||||
.mockResolvedValueOnce(undefined) // Workflow not found
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Workflow not found')
|
||||
})
|
||||
|
||||
it('should return 401 when workflow belongs to different user', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock checkpoint found but workflow belongs to different user
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'different-user',
|
||||
}
|
||||
|
||||
mockThen
|
||||
.mockResolvedValueOnce(mockCheckpoint) // Checkpoint found
|
||||
.mockResolvedValueOnce(mockWorkflow) // Workflow found but different user
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should successfully revert checkpoint with basic workflow state', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: { block1: { type: 'start' } },
|
||||
edges: [{ from: 'block1', to: 'block2' }],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
isDeployed: true,
|
||||
deploymentStatuses: { production: 'deployed' },
|
||||
hasActiveWebhook: false,
|
||||
},
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen
|
||||
.mockResolvedValueOnce(mockCheckpoint) // Checkpoint found
|
||||
.mockResolvedValueOnce(mockWorkflow) // Workflow found
|
||||
|
||||
// Mock successful state API call
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints/revert', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: 'session=test-session',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
checkpointId: 'checkpoint-123',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
workflowId: 'workflow-456',
|
||||
checkpointId: 'checkpoint-123',
|
||||
revertedAt: '2024-01-01T00:00:00.000Z',
|
||||
checkpoint: {
|
||||
id: 'checkpoint-123',
|
||||
workflowState: {
|
||||
blocks: { block1: { type: 'start' } },
|
||||
edges: [{ from: 'block1', to: 'block2' }],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
isDeployed: true,
|
||||
deploymentStatuses: { production: 'deployed' },
|
||||
hasActiveWebhook: false,
|
||||
lastSaved: 1640995200000,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Verify fetch was called with correct parameters
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:3000/api/workflows/workflow-456/state',
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: 'session=test-session',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
blocks: { block1: { type: 'start' } },
|
||||
edges: [{ from: 'block1', to: 'block2' }],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
isDeployed: true,
|
||||
deploymentStatuses: { production: 'deployed' },
|
||||
hasActiveWebhook: false,
|
||||
lastSaved: 1640995200000,
|
||||
}),
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle checkpoint state with valid deployedAt date', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-with-date',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: {},
|
||||
edges: [],
|
||||
deployedAt: '2024-01-01T12:00:00.000Z',
|
||||
isDeployed: true,
|
||||
},
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen.mockResolvedValueOnce(mockCheckpoint).mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-with-date',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.checkpoint.workflowState.deployedAt).toBeDefined()
|
||||
expect(responseData.checkpoint.workflowState.deployedAt).toEqual('2024-01-01T12:00:00.000Z')
|
||||
})
|
||||
|
||||
it('should handle checkpoint state with invalid deployedAt date', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-invalid-date',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: {},
|
||||
edges: [],
|
||||
deployedAt: 'invalid-date',
|
||||
isDeployed: true,
|
||||
},
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen.mockResolvedValueOnce(mockCheckpoint).mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-invalid-date',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
// Invalid date should be filtered out
|
||||
expect(responseData.checkpoint.workflowState.deployedAt).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should handle checkpoint state with null/undefined values', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-null-values',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: null,
|
||||
edges: undefined,
|
||||
loops: null,
|
||||
parallels: undefined,
|
||||
deploymentStatuses: null,
|
||||
},
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen.mockResolvedValueOnce(mockCheckpoint).mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-null-values',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
|
||||
// Null/undefined values should be replaced with defaults
|
||||
expect(responseData.checkpoint.workflowState).toEqual({
|
||||
blocks: {},
|
||||
edges: [],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
isDeployed: false,
|
||||
deploymentStatuses: {},
|
||||
hasActiveWebhook: false,
|
||||
lastSaved: 1640995200000,
|
||||
})
|
||||
})
|
||||
|
||||
it('should return 500 when state API call fails', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen
|
||||
.mockResolvedValueOnce(mockCheckpoint)
|
||||
.mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
// Mock failed state API call
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: false,
|
||||
text: () => Promise.resolve('State validation failed'),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert workflow to checkpoint')
|
||||
})
|
||||
|
||||
it('should handle database errors during checkpoint lookup', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database error
|
||||
mockThen.mockRejectedValueOnce(new Error('Database connection failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert to checkpoint')
|
||||
})
|
||||
|
||||
it('should handle database errors during workflow lookup', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
mockThen
|
||||
.mockResolvedValueOnce(mockCheckpoint) // Checkpoint found
|
||||
.mockRejectedValueOnce(new Error('Database error during workflow lookup')) // Workflow lookup fails
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert to checkpoint')
|
||||
})
|
||||
|
||||
it('should handle fetch network errors', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen
|
||||
.mockResolvedValueOnce(mockCheckpoint)
|
||||
.mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
// Mock fetch network error
|
||||
|
||||
;(global.fetch as any).mockRejectedValue(new Error('Network error'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert to checkpoint')
|
||||
})
|
||||
|
||||
it('should handle JSON parsing errors in request body', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Create a request with invalid JSON
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints/revert', {
|
||||
method: 'POST',
|
||||
body: '{invalid-json',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to revert to checkpoint')
|
||||
})
|
||||
|
||||
it('should forward cookies to state API call', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen.mockResolvedValueOnce(mockCheckpoint).mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints/revert', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: 'session=test-session; auth=token123',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
checkpointId: 'checkpoint-123',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
await POST(req)
|
||||
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:3000/api/workflows/workflow-456/state',
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: 'session=test-session; auth=token123',
|
||||
},
|
||||
body: expect.any(String),
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle missing cookies gracefully', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-123',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: { blocks: {}, edges: [] },
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen.mockResolvedValueOnce(mockCheckpoint).mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints/revert', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
// No Cookie header
|
||||
},
|
||||
body: JSON.stringify({
|
||||
checkpointId: 'checkpoint-123',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'http://localhost:3000/api/workflows/workflow-456/state',
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: '', // Empty string when no cookies
|
||||
},
|
||||
body: expect.any(String),
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle complex checkpoint state with all fields', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoint = {
|
||||
id: 'checkpoint-complex',
|
||||
workflowId: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
workflowState: {
|
||||
blocks: {
|
||||
start: { type: 'start', config: {} },
|
||||
http: { type: 'http', config: { url: 'https://api.example.com' } },
|
||||
end: { type: 'end', config: {} },
|
||||
},
|
||||
edges: [
|
||||
{ from: 'start', to: 'http' },
|
||||
{ from: 'http', to: 'end' },
|
||||
],
|
||||
loops: {
|
||||
loop1: { condition: 'true', iterations: 3 },
|
||||
},
|
||||
parallels: {
|
||||
parallel1: { branches: ['branch1', 'branch2'] },
|
||||
},
|
||||
isDeployed: true,
|
||||
deploymentStatuses: {
|
||||
production: 'deployed',
|
||||
staging: 'pending',
|
||||
},
|
||||
hasActiveWebhook: true,
|
||||
deployedAt: '2024-01-01T10:00:00.000Z',
|
||||
},
|
||||
}
|
||||
|
||||
const mockWorkflow = {
|
||||
id: 'workflow-456',
|
||||
userId: 'user-123',
|
||||
}
|
||||
|
||||
mockThen.mockResolvedValueOnce(mockCheckpoint).mockResolvedValueOnce(mockWorkflow)
|
||||
|
||||
;(global.fetch as any).mockResolvedValue({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ success: true }),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
checkpointId: 'checkpoint-complex',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/revert/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.checkpoint.workflowState).toEqual({
|
||||
blocks: {
|
||||
start: { type: 'start', config: {} },
|
||||
http: { type: 'http', config: { url: 'https://api.example.com' } },
|
||||
end: { type: 'end', config: {} },
|
||||
},
|
||||
edges: [
|
||||
{ from: 'start', to: 'http' },
|
||||
{ from: 'http', to: 'end' },
|
||||
],
|
||||
loops: {
|
||||
loop1: { condition: 'true', iterations: 3 },
|
||||
},
|
||||
parallels: {
|
||||
parallel1: { branches: ['branch1', 'branch2'] },
|
||||
},
|
||||
isDeployed: true,
|
||||
deploymentStatuses: {
|
||||
production: 'deployed',
|
||||
staging: 'pending',
|
||||
},
|
||||
hasActiveWebhook: true,
|
||||
deployedAt: '2024-01-01T10:00:00.000Z',
|
||||
lastSaved: 1640995200000,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
134
apps/sim/app/api/copilot/checkpoints/revert/route.ts
Normal file
134
apps/sim/app/api/copilot/checkpoints/revert/route.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createInternalServerErrorResponse,
|
||||
createNotFoundResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { workflowCheckpoints, workflow as workflowTable } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('CheckpointRevertAPI')
|
||||
|
||||
const RevertCheckpointSchema = z.object({
|
||||
checkpointId: z.string().min(1),
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/copilot/checkpoints/revert
|
||||
* Revert workflow to a specific checkpoint state
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const body = await request.json()
|
||||
const { checkpointId } = RevertCheckpointSchema.parse(body)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Reverting to checkpoint ${checkpointId}`)
|
||||
|
||||
// Get the checkpoint and verify ownership
|
||||
const checkpoint = await db
|
||||
.select()
|
||||
.from(workflowCheckpoints)
|
||||
.where(and(eq(workflowCheckpoints.id, checkpointId), eq(workflowCheckpoints.userId, userId)))
|
||||
.then((rows) => rows[0])
|
||||
|
||||
if (!checkpoint) {
|
||||
return createNotFoundResponse('Checkpoint not found or access denied')
|
||||
}
|
||||
|
||||
// Verify user still has access to the workflow
|
||||
const workflowData = await db
|
||||
.select()
|
||||
.from(workflowTable)
|
||||
.where(eq(workflowTable.id, checkpoint.workflowId))
|
||||
.then((rows) => rows[0])
|
||||
|
||||
if (!workflowData) {
|
||||
return createNotFoundResponse('Workflow not found')
|
||||
}
|
||||
|
||||
if (workflowData.userId !== userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
// Apply the checkpoint state to the workflow using the existing state endpoint
|
||||
const checkpointState = checkpoint.workflowState as any // Cast to any for property access
|
||||
|
||||
// Clean the checkpoint state to remove any null/undefined values that could cause validation errors
|
||||
const cleanedState = {
|
||||
blocks: checkpointState?.blocks || {},
|
||||
edges: checkpointState?.edges || [],
|
||||
loops: checkpointState?.loops || {},
|
||||
parallels: checkpointState?.parallels || {},
|
||||
isDeployed: checkpointState?.isDeployed || false,
|
||||
deploymentStatuses: checkpointState?.deploymentStatuses || {},
|
||||
hasActiveWebhook: checkpointState?.hasActiveWebhook || false,
|
||||
lastSaved: Date.now(),
|
||||
// Only include deployedAt if it's a valid date string that can be converted
|
||||
...(checkpointState?.deployedAt &&
|
||||
checkpointState.deployedAt !== null &&
|
||||
checkpointState.deployedAt !== undefined &&
|
||||
!Number.isNaN(new Date(checkpointState.deployedAt).getTime())
|
||||
? { deployedAt: new Date(checkpointState.deployedAt) }
|
||||
: {}),
|
||||
}
|
||||
|
||||
logger.info(`[${tracker.requestId}] Applying cleaned checkpoint state`, {
|
||||
blocksCount: Object.keys(cleanedState.blocks).length,
|
||||
edgesCount: cleanedState.edges.length,
|
||||
hasDeployedAt: !!cleanedState.deployedAt,
|
||||
isDeployed: cleanedState.isDeployed,
|
||||
})
|
||||
|
||||
const stateResponse = await fetch(
|
||||
`${request.nextUrl.origin}/api/workflows/${checkpoint.workflowId}/state`,
|
||||
{
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Cookie: request.headers.get('Cookie') || '', // Forward auth cookies
|
||||
},
|
||||
body: JSON.stringify(cleanedState),
|
||||
}
|
||||
)
|
||||
|
||||
if (!stateResponse.ok) {
|
||||
const errorData = await stateResponse.text()
|
||||
logger.error(`[${tracker.requestId}] Failed to apply checkpoint state: ${errorData}`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to revert workflow to checkpoint' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
|
||||
const result = await stateResponse.json()
|
||||
logger.info(
|
||||
`[${tracker.requestId}] Successfully reverted workflow ${checkpoint.workflowId} to checkpoint ${checkpointId}`
|
||||
)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
workflowId: checkpoint.workflowId,
|
||||
checkpointId,
|
||||
revertedAt: new Date().toISOString(),
|
||||
checkpoint: {
|
||||
id: checkpoint.id,
|
||||
workflowState: cleanedState, // Return the reverted state for frontend use
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Error reverting to checkpoint:`, error)
|
||||
return createInternalServerErrorResponse('Failed to revert to checkpoint')
|
||||
}
|
||||
}
|
||||
438
apps/sim/app/api/copilot/checkpoints/route.test.ts
Normal file
438
apps/sim/app/api/copilot/checkpoints/route.test.ts
Normal file
@@ -0,0 +1,438 @@
|
||||
/**
|
||||
* Tests for copilot checkpoints API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockAuth,
|
||||
mockCryptoUuid,
|
||||
setupCommonApiMocks,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Copilot Checkpoints API Route', () => {
|
||||
const mockSelect = vi.fn()
|
||||
const mockFrom = vi.fn()
|
||||
const mockWhere = vi.fn()
|
||||
const mockLimit = vi.fn()
|
||||
const mockOrderBy = vi.fn()
|
||||
const mockInsert = vi.fn()
|
||||
const mockValues = vi.fn()
|
||||
const mockReturning = vi.fn()
|
||||
|
||||
const mockCopilotChats = { id: 'id', userId: 'userId' }
|
||||
const mockWorkflowCheckpoints = {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
workflowId: 'workflowId',
|
||||
chatId: 'chatId',
|
||||
messageId: 'messageId',
|
||||
createdAt: 'createdAt',
|
||||
updatedAt: 'updatedAt',
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid()
|
||||
|
||||
mockSelect.mockReturnValue({ from: mockFrom })
|
||||
mockFrom.mockReturnValue({ where: mockWhere })
|
||||
mockWhere.mockReturnValue({
|
||||
orderBy: mockOrderBy,
|
||||
limit: mockLimit,
|
||||
})
|
||||
mockOrderBy.mockResolvedValue([])
|
||||
mockLimit.mockResolvedValue([])
|
||||
mockInsert.mockReturnValue({ values: mockValues })
|
||||
mockValues.mockReturnValue({ returning: mockReturning })
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
insert: mockInsert,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/db/schema', () => ({
|
||||
copilotChats: mockCopilotChats,
|
||||
workflowCheckpoints: mockWorkflowCheckpoints,
|
||||
}))
|
||||
|
||||
vi.doMock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions) => ({ conditions, type: 'and' })),
|
||||
eq: vi.fn((field, value) => ({ field, value, type: 'eq' })),
|
||||
desc: vi.fn((field) => ({ field, type: 'desc' })),
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('POST', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
workflowState: '{"blocks": []}',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 500 for invalid request body', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
// Missing required fields
|
||||
workflowId: 'workflow-123',
|
||||
// Missing chatId and workflowState
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to create checkpoint')
|
||||
})
|
||||
|
||||
it('should return 400 when chat not found or unauthorized', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat not found
|
||||
mockLimit.mockResolvedValue([])
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
workflowState: '{"blocks": []}',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Chat not found or unauthorized')
|
||||
})
|
||||
|
||||
it('should return 400 for invalid workflow state JSON', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const chat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
}
|
||||
mockLimit.mockResolvedValue([chat])
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
workflowState: 'invalid-json', // Invalid JSON
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Invalid workflow state JSON')
|
||||
})
|
||||
|
||||
it('should successfully create a checkpoint', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const chat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
}
|
||||
mockLimit.mockResolvedValue([chat])
|
||||
|
||||
// Mock successful checkpoint creation
|
||||
const checkpoint = {
|
||||
id: 'checkpoint-123',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-123',
|
||||
createdAt: new Date('2024-01-01'),
|
||||
updatedAt: new Date('2024-01-01'),
|
||||
}
|
||||
mockReturning.mockResolvedValue([checkpoint])
|
||||
|
||||
const workflowState = { blocks: [], connections: [] }
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-123',
|
||||
workflowState: JSON.stringify(workflowState),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
checkpoint: {
|
||||
id: 'checkpoint-123',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-123',
|
||||
createdAt: '2024-01-01T00:00:00.000Z',
|
||||
updatedAt: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
})
|
||||
|
||||
// Verify database operations
|
||||
expect(mockInsert).toHaveBeenCalled()
|
||||
expect(mockValues).toHaveBeenCalledWith({
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-123',
|
||||
workflowState: workflowState, // Should be parsed JSON object
|
||||
})
|
||||
})
|
||||
|
||||
it('should create checkpoint without messageId', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const chat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
}
|
||||
mockLimit.mockResolvedValue([chat])
|
||||
|
||||
// Mock successful checkpoint creation
|
||||
const checkpoint = {
|
||||
id: 'checkpoint-123',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: undefined,
|
||||
createdAt: new Date('2024-01-01'),
|
||||
updatedAt: new Date('2024-01-01'),
|
||||
}
|
||||
mockReturning.mockResolvedValue([checkpoint])
|
||||
|
||||
const workflowState = { blocks: [] }
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
// No messageId provided
|
||||
workflowState: JSON.stringify(workflowState),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(true)
|
||||
expect(responseData.checkpoint.messageId).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should handle database errors during checkpoint creation', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock chat exists
|
||||
const chat = {
|
||||
id: 'chat-123',
|
||||
userId: 'user-123',
|
||||
}
|
||||
mockLimit.mockResolvedValue([chat])
|
||||
|
||||
// Mock database error
|
||||
mockReturning.mockRejectedValue(new Error('Database insert failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
workflowState: '{"blocks": []}',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to create checkpoint')
|
||||
})
|
||||
|
||||
it('should handle database errors during chat lookup', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database error during chat lookup
|
||||
mockLimit.mockRejectedValue(new Error('Database query failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
workflowState: '{"blocks": []}',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to create checkpoint')
|
||||
})
|
||||
})
|
||||
|
||||
describe('GET', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints?chatId=chat-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 400 when chatId is missing', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('chatId is required')
|
||||
})
|
||||
|
||||
it('should return checkpoints for authenticated user and chat', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const mockCheckpoints = [
|
||||
{
|
||||
id: 'checkpoint-1',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-1',
|
||||
createdAt: new Date('2024-01-01'),
|
||||
updatedAt: new Date('2024-01-01'),
|
||||
},
|
||||
{
|
||||
id: 'checkpoint-2',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-2',
|
||||
createdAt: new Date('2024-01-02'),
|
||||
updatedAt: new Date('2024-01-02'),
|
||||
},
|
||||
]
|
||||
|
||||
mockOrderBy.mockResolvedValue(mockCheckpoints)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints?chatId=chat-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
checkpoints: [
|
||||
{
|
||||
id: 'checkpoint-1',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-1',
|
||||
createdAt: '2024-01-01T00:00:00.000Z',
|
||||
updatedAt: '2024-01-01T00:00:00.000Z',
|
||||
},
|
||||
{
|
||||
id: 'checkpoint-2',
|
||||
userId: 'user-123',
|
||||
workflowId: 'workflow-123',
|
||||
chatId: 'chat-123',
|
||||
messageId: 'message-2',
|
||||
createdAt: '2024-01-02T00:00:00.000Z',
|
||||
updatedAt: '2024-01-02T00:00:00.000Z',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
// Verify database query was made correctly
|
||||
expect(mockSelect).toHaveBeenCalled()
|
||||
expect(mockWhere).toHaveBeenCalled()
|
||||
expect(mockOrderBy).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle database errors when fetching checkpoints', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock database error
|
||||
mockOrderBy.mockRejectedValue(new Error('Database query failed'))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints?chatId=chat-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to fetch checkpoints')
|
||||
})
|
||||
|
||||
it('should return empty array when no checkpoints found', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
mockOrderBy.mockResolvedValue([])
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/checkpoints?chatId=chat-123')
|
||||
|
||||
const { GET } = await import('@/app/api/copilot/checkpoints/route')
|
||||
const response = await GET(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
checkpoints: [],
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,64 +1,162 @@
|
||||
import { and, desc, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { z } from 'zod'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { copilotCheckpoints } from '@/db/schema'
|
||||
import { copilotChats, workflowCheckpoints } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('CopilotCheckpointsAPI')
|
||||
const logger = createLogger('WorkflowCheckpointsAPI')
|
||||
|
||||
const CreateCheckpointSchema = z.object({
|
||||
workflowId: z.string(),
|
||||
chatId: z.string(),
|
||||
messageId: z.string().optional(), // ID of the user message that triggered this checkpoint
|
||||
workflowState: z.string(), // JSON stringified workflow state
|
||||
})
|
||||
|
||||
/**
|
||||
* GET /api/copilot/checkpoints
|
||||
* List checkpoints for a specific chat
|
||||
* POST /api/copilot/checkpoints
|
||||
* Create a new checkpoint with JSON workflow state
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID()
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(request.url)
|
||||
const chatId = searchParams.get('chatId')
|
||||
const limit = Number(searchParams.get('limit')) || 10
|
||||
const offset = Number(searchParams.get('offset')) || 0
|
||||
const body = await req.json()
|
||||
const { workflowId, chatId, messageId, workflowState } = CreateCheckpointSchema.parse(body)
|
||||
|
||||
if (!chatId) {
|
||||
return NextResponse.json({ error: 'chatId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Listing checkpoints for chat: ${chatId}`, {
|
||||
userId: session.user.id,
|
||||
limit,
|
||||
offset,
|
||||
logger.info(`[${tracker.requestId}] Creating workflow checkpoint`, {
|
||||
userId,
|
||||
workflowId,
|
||||
chatId,
|
||||
messageId,
|
||||
fullRequestBody: body,
|
||||
parsedData: { workflowId, chatId, messageId },
|
||||
messageIdType: typeof messageId,
|
||||
messageIdExists: !!messageId,
|
||||
})
|
||||
|
||||
const checkpoints = await db
|
||||
// Verify that the chat belongs to the user
|
||||
const [chat] = await db
|
||||
.select()
|
||||
.from(copilotCheckpoints)
|
||||
.where(
|
||||
and(eq(copilotCheckpoints.userId, session.user.id), eq(copilotCheckpoints.chatId, chatId))
|
||||
)
|
||||
.orderBy(desc(copilotCheckpoints.createdAt))
|
||||
.limit(limit)
|
||||
.offset(offset)
|
||||
.from(copilotChats)
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
|
||||
// Format timestamps to ISO strings for consistent timezone handling
|
||||
const formattedCheckpoints = checkpoints.map((checkpoint) => ({
|
||||
id: checkpoint.id,
|
||||
userId: checkpoint.userId,
|
||||
workflowId: checkpoint.workflowId,
|
||||
chatId: checkpoint.chatId,
|
||||
yaml: checkpoint.yaml,
|
||||
createdAt: checkpoint.createdAt.toISOString(),
|
||||
updatedAt: checkpoint.updatedAt.toISOString(),
|
||||
}))
|
||||
if (!chat) {
|
||||
return createBadRequestResponse('Chat not found or unauthorized')
|
||||
}
|
||||
|
||||
return NextResponse.json({ checkpoints: formattedCheckpoints })
|
||||
// Parse the workflow state to validate it's valid JSON
|
||||
let parsedWorkflowState
|
||||
try {
|
||||
parsedWorkflowState = JSON.parse(workflowState)
|
||||
} catch (error) {
|
||||
return createBadRequestResponse('Invalid workflow state JSON')
|
||||
}
|
||||
|
||||
// Create checkpoint with JSON workflow state
|
||||
const [checkpoint] = await db
|
||||
.insert(workflowCheckpoints)
|
||||
.values({
|
||||
userId,
|
||||
workflowId,
|
||||
chatId,
|
||||
messageId,
|
||||
workflowState: parsedWorkflowState, // Store as JSON object
|
||||
})
|
||||
.returning()
|
||||
|
||||
logger.info(`[${tracker.requestId}] Workflow checkpoint created successfully`, {
|
||||
checkpointId: checkpoint.id,
|
||||
savedData: {
|
||||
checkpointId: checkpoint.id,
|
||||
userId: checkpoint.userId,
|
||||
workflowId: checkpoint.workflowId,
|
||||
chatId: checkpoint.chatId,
|
||||
messageId: checkpoint.messageId,
|
||||
createdAt: checkpoint.createdAt,
|
||||
},
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
checkpoint: {
|
||||
id: checkpoint.id,
|
||||
userId: checkpoint.userId,
|
||||
workflowId: checkpoint.workflowId,
|
||||
chatId: checkpoint.chatId,
|
||||
messageId: checkpoint.messageId,
|
||||
createdAt: checkpoint.createdAt,
|
||||
updatedAt: checkpoint.updatedAt,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error listing checkpoints:`, error)
|
||||
return NextResponse.json({ error: 'Failed to list checkpoints' }, { status: 500 })
|
||||
logger.error(`[${tracker.requestId}] Failed to create workflow checkpoint:`, error)
|
||||
return createInternalServerErrorResponse('Failed to create checkpoint')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/copilot/checkpoints?chatId=xxx
|
||||
* Retrieve workflow checkpoints for a chat
|
||||
*/
|
||||
export async function GET(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
const { userId, isAuthenticated } = await authenticateCopilotRequestSessionOnly()
|
||||
if (!isAuthenticated || !userId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(req.url)
|
||||
const chatId = searchParams.get('chatId')
|
||||
|
||||
if (!chatId) {
|
||||
return createBadRequestResponse('chatId is required')
|
||||
}
|
||||
|
||||
logger.info(`[${tracker.requestId}] Fetching workflow checkpoints for chat`, {
|
||||
userId,
|
||||
chatId,
|
||||
})
|
||||
|
||||
// Fetch checkpoints for this user and chat
|
||||
const checkpoints = await db
|
||||
.select({
|
||||
id: workflowCheckpoints.id,
|
||||
userId: workflowCheckpoints.userId,
|
||||
workflowId: workflowCheckpoints.workflowId,
|
||||
chatId: workflowCheckpoints.chatId,
|
||||
messageId: workflowCheckpoints.messageId,
|
||||
createdAt: workflowCheckpoints.createdAt,
|
||||
updatedAt: workflowCheckpoints.updatedAt,
|
||||
})
|
||||
.from(workflowCheckpoints)
|
||||
.where(and(eq(workflowCheckpoints.chatId, chatId), eq(workflowCheckpoints.userId, userId)))
|
||||
.orderBy(desc(workflowCheckpoints.createdAt))
|
||||
|
||||
logger.info(`[${tracker.requestId}] Retrieved ${checkpoints.length} workflow checkpoints`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
checkpoints,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Failed to fetch workflow checkpoints:`, error)
|
||||
return createInternalServerErrorResponse('Failed to fetch checkpoints')
|
||||
}
|
||||
}
|
||||
|
||||
393
apps/sim/app/api/copilot/confirm/route.test.ts
Normal file
393
apps/sim/app/api/copilot/confirm/route.test.ts
Normal file
@@ -0,0 +1,393 @@
|
||||
/**
|
||||
* Tests for copilot confirm API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockAuth,
|
||||
mockCryptoUuid,
|
||||
setupCommonApiMocks,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Copilot Confirm API Route', () => {
|
||||
const mockRedisExists = vi.fn()
|
||||
const mockRedisSet = vi.fn()
|
||||
const mockGetRedisClient = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid()
|
||||
|
||||
const mockRedisClient = {
|
||||
exists: mockRedisExists,
|
||||
set: mockRedisSet,
|
||||
}
|
||||
|
||||
mockGetRedisClient.mockReturnValue(mockRedisClient)
|
||||
mockRedisExists.mockResolvedValue(1) // Tool call exists by default
|
||||
mockRedisSet.mockResolvedValue('OK')
|
||||
|
||||
vi.doMock('@/lib/redis', () => ({
|
||||
getRedisClient: mockGetRedisClient,
|
||||
}))
|
||||
|
||||
// Mock setTimeout to control polling behavior
|
||||
vi.spyOn(global, 'setTimeout').mockImplementation((callback, _delay) => {
|
||||
// Immediately call callback to avoid delays
|
||||
if (typeof callback === 'function') {
|
||||
setImmediate(callback)
|
||||
}
|
||||
return setTimeout(() => {}, 0) as any
|
||||
})
|
||||
|
||||
// Mock Date.now to control timeout behavior
|
||||
let mockTime = 1640995200000
|
||||
vi.spyOn(Date, 'now').mockImplementation(() => {
|
||||
// Increment time rapidly to trigger timeout for non-existent keys
|
||||
mockTime += 10000 // Add 10 seconds each call
|
||||
return mockTime
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('POST', () => {
|
||||
it('should return 401 when user is not authenticated', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setUnauthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('should return 400 for invalid request body - missing toolCallId', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
status: 'success',
|
||||
// Missing toolCallId
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toContain('Required')
|
||||
})
|
||||
|
||||
it('should return 400 for invalid request body - missing status', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
// Missing status
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toContain('Invalid request data')
|
||||
})
|
||||
|
||||
it('should return 400 for invalid status value', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'invalid-status',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toContain('Invalid notification status')
|
||||
})
|
||||
|
||||
it('should successfully confirm tool call with success status', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'success',
|
||||
message: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
message: 'Tool executed successfully',
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
// Verify Redis operations were called
|
||||
expect(mockRedisExists).toHaveBeenCalled()
|
||||
expect(mockRedisSet).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should successfully confirm tool call with error status', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-456',
|
||||
status: 'error',
|
||||
message: 'Tool execution failed',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
message: 'Tool execution failed',
|
||||
toolCallId: 'tool-call-456',
|
||||
status: 'error',
|
||||
})
|
||||
|
||||
expect(mockRedisSet).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should successfully confirm tool call with accepted status', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-789',
|
||||
status: 'accepted',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
message: 'Tool call tool-call-789 has been accepted',
|
||||
toolCallId: 'tool-call-789',
|
||||
status: 'accepted',
|
||||
})
|
||||
|
||||
expect(mockRedisSet).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should successfully confirm tool call with rejected status', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-101',
|
||||
status: 'rejected',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
message: 'Tool call tool-call-101 has been rejected',
|
||||
toolCallId: 'tool-call-101',
|
||||
status: 'rejected',
|
||||
})
|
||||
})
|
||||
|
||||
it('should successfully confirm tool call with background status', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-bg',
|
||||
status: 'background',
|
||||
message: 'Moved to background execution',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
message: 'Moved to background execution',
|
||||
toolCallId: 'tool-call-bg',
|
||||
status: 'background',
|
||||
})
|
||||
})
|
||||
|
||||
it('should return 400 when Redis client is not available', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock Redis client as unavailable
|
||||
mockGetRedisClient.mockReturnValue(null)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update tool call status or tool call not found')
|
||||
})
|
||||
|
||||
it('should return 400 when tool call is not found in Redis', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock tool call as not existing in Redis
|
||||
mockRedisExists.mockResolvedValue(0)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'non-existent-tool',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update tool call status or tool call not found')
|
||||
}, 10000) // 10 second timeout for this specific test
|
||||
|
||||
it('should handle Redis errors gracefully', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Mock Redis operations to throw an error
|
||||
mockRedisExists.mockRejectedValue(new Error('Redis connection failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update tool call status or tool call not found')
|
||||
})
|
||||
|
||||
it('should handle Redis set operation failure', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Tool call exists but set operation fails
|
||||
mockRedisExists.mockResolvedValue(1)
|
||||
mockRedisSet.mockRejectedValue(new Error('Redis set failed'))
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: 'tool-call-123',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toBe('Failed to update tool call status or tool call not found')
|
||||
})
|
||||
|
||||
it('should handle JSON parsing errors in request body', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
// Create a request with invalid JSON
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/confirm', {
|
||||
method: 'POST',
|
||||
body: '{invalid-json',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toContain('JSON')
|
||||
})
|
||||
|
||||
it('should validate empty toolCallId', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: '',
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.error).toContain('Tool call ID is required')
|
||||
})
|
||||
|
||||
it('should handle all valid status types', async () => {
|
||||
const authMocks = mockAuth()
|
||||
authMocks.setAuthenticated()
|
||||
|
||||
const validStatuses = ['success', 'error', 'accepted', 'rejected', 'background']
|
||||
|
||||
for (const status of validStatuses) {
|
||||
const req = createMockRequest('POST', {
|
||||
toolCallId: `tool-call-${status}`,
|
||||
status,
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/confirm/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(true)
|
||||
expect(responseData.status).toBe(status)
|
||||
expect(responseData.toolCallId).toBe(`tool-call-${status}`)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
193
apps/sim/app/api/copilot/confirm/route.ts
Normal file
193
apps/sim/app/api/copilot/confirm/route.ts
Normal file
@@ -0,0 +1,193 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
type NotificationStatus,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getRedisClient } from '@/lib/redis'
|
||||
|
||||
const logger = createLogger('CopilotConfirmAPI')
|
||||
|
||||
// Schema for confirmation request
|
||||
const ConfirmationSchema = z.object({
|
||||
toolCallId: z.string().min(1, 'Tool call ID is required'),
|
||||
status: z.enum(['success', 'error', 'accepted', 'rejected', 'background'] as const, {
|
||||
errorMap: () => ({ message: 'Invalid notification status' }),
|
||||
}),
|
||||
message: z.string().optional(), // Optional message for background moves or additional context
|
||||
})
|
||||
|
||||
/**
|
||||
* Update tool call status in Redis
|
||||
*/
|
||||
async function updateToolCallStatus(
|
||||
toolCallId: string,
|
||||
status: NotificationStatus,
|
||||
message?: string
|
||||
): Promise<boolean> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
logger.warn('updateToolCallStatus: Redis client not available')
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
const key = `tool_call:${toolCallId}`
|
||||
const timeout = 60000 // 1 minute timeout
|
||||
const pollInterval = 100 // Poll every 100ms
|
||||
const startTime = Date.now()
|
||||
|
||||
logger.info('Polling for tool call in Redis', { toolCallId, key, timeout })
|
||||
|
||||
// Poll until the key exists or timeout
|
||||
while (Date.now() - startTime < timeout) {
|
||||
const exists = await redis.exists(key)
|
||||
if (exists) {
|
||||
logger.info('Tool call found in Redis, updating status', {
|
||||
toolCallId,
|
||||
key,
|
||||
pollDuration: Date.now() - startTime,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
// Wait before next poll
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval))
|
||||
}
|
||||
|
||||
// Final check if key exists after polling
|
||||
const exists = await redis.exists(key)
|
||||
if (!exists) {
|
||||
logger.warn('Tool call not found in Redis after polling timeout', {
|
||||
toolCallId,
|
||||
key,
|
||||
timeout,
|
||||
pollDuration: Date.now() - startTime,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// Store both status and message as JSON
|
||||
const toolCallData = {
|
||||
status,
|
||||
message: message || null,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
// Log what we're about to update in Redis
|
||||
logger.info('About to update Redis with tool call data', {
|
||||
toolCallId,
|
||||
key,
|
||||
toolCallData,
|
||||
serializedData: JSON.stringify(toolCallData),
|
||||
providedStatus: status,
|
||||
providedMessage: message,
|
||||
messageIsUndefined: message === undefined,
|
||||
messageIsNull: message === null,
|
||||
})
|
||||
|
||||
await redis.set(key, JSON.stringify(toolCallData), 'EX', 86400) // Keep 24 hour expiry
|
||||
|
||||
logger.info('Tool call status updated in Redis', {
|
||||
toolCallId,
|
||||
key,
|
||||
status,
|
||||
message,
|
||||
pollDuration: Date.now() - startTime,
|
||||
})
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error('Failed to update tool call status in Redis', {
|
||||
toolCallId,
|
||||
status,
|
||||
message,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/copilot/confirm
|
||||
* Update tool call status (Accept/Reject)
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
// Authenticate user using consolidated helper
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
await authenticateCopilotRequestSessionOnly()
|
||||
|
||||
if (!isAuthenticated) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { toolCallId, status, message } = ConfirmationSchema.parse(body)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Tool call confirmation request`, {
|
||||
userId: authenticatedUserId,
|
||||
toolCallId,
|
||||
status,
|
||||
message,
|
||||
})
|
||||
|
||||
// Update the tool call status in Redis
|
||||
const updated = await updateToolCallStatus(toolCallId, status, message)
|
||||
|
||||
if (!updated) {
|
||||
logger.error(`[${tracker.requestId}] Failed to update tool call status`, {
|
||||
userId: authenticatedUserId,
|
||||
toolCallId,
|
||||
status,
|
||||
internalStatus: status,
|
||||
message,
|
||||
})
|
||||
return createBadRequestResponse('Failed to update tool call status or tool call not found')
|
||||
}
|
||||
|
||||
const duration = tracker.getDuration()
|
||||
logger.info(`[${tracker.requestId}] Tool call confirmation completed`, {
|
||||
userId: authenticatedUserId,
|
||||
toolCallId,
|
||||
status,
|
||||
internalStatus: status,
|
||||
duration,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: message || `Tool call ${toolCallId} has been ${status.toLowerCase()}`,
|
||||
toolCallId,
|
||||
status,
|
||||
})
|
||||
} catch (error) {
|
||||
const duration = tracker.getDuration()
|
||||
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.error(`[${tracker.requestId}] Request validation error:`, {
|
||||
duration,
|
||||
errors: error.errors,
|
||||
})
|
||||
return createBadRequestResponse(
|
||||
`Invalid request data: ${error.errors.map((e) => e.message).join(', ')}`
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${tracker.requestId}] Unexpected error:`, {
|
||||
duration,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
})
|
||||
|
||||
return createInternalServerErrorResponse(
|
||||
error instanceof Error ? error.message : 'Internal server error'
|
||||
)
|
||||
}
|
||||
}
|
||||
155
apps/sim/app/api/copilot/feedback/route.ts
Normal file
155
apps/sim/app/api/copilot/feedback/route.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
createInternalServerErrorResponse,
|
||||
createRequestTracker,
|
||||
createUnauthorizedResponse,
|
||||
} from '@/lib/copilot/auth'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { db } from '@/db'
|
||||
import { copilotFeedback } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('CopilotFeedbackAPI')
|
||||
|
||||
// Schema for feedback submission
|
||||
const FeedbackSchema = z.object({
|
||||
chatId: z.string().uuid('Chat ID must be a valid UUID'),
|
||||
userQuery: z.string().min(1, 'User query is required'),
|
||||
agentResponse: z.string().min(1, 'Agent response is required'),
|
||||
isPositiveFeedback: z.boolean(),
|
||||
feedback: z.string().optional(),
|
||||
workflowYaml: z.string().optional(), // Optional workflow YAML when edit/build workflow tools were used
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/copilot/feedback
|
||||
* Submit feedback for a copilot interaction
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
// Authenticate user using the same pattern as other copilot routes
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
await authenticateCopilotRequestSessionOnly()
|
||||
|
||||
if (!isAuthenticated || !authenticatedUserId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { chatId, userQuery, agentResponse, isPositiveFeedback, feedback, workflowYaml } =
|
||||
FeedbackSchema.parse(body)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Processing copilot feedback submission`, {
|
||||
userId: authenticatedUserId,
|
||||
chatId,
|
||||
isPositiveFeedback,
|
||||
userQueryLength: userQuery.length,
|
||||
agentResponseLength: agentResponse.length,
|
||||
hasFeedback: !!feedback,
|
||||
hasWorkflowYaml: !!workflowYaml,
|
||||
workflowYamlLength: workflowYaml?.length || 0,
|
||||
})
|
||||
|
||||
// Insert feedback into the database
|
||||
const [feedbackRecord] = await db
|
||||
.insert(copilotFeedback)
|
||||
.values({
|
||||
userId: authenticatedUserId,
|
||||
chatId,
|
||||
userQuery,
|
||||
agentResponse,
|
||||
isPositive: isPositiveFeedback,
|
||||
feedback: feedback || null,
|
||||
workflowYaml: workflowYaml || null,
|
||||
})
|
||||
.returning()
|
||||
|
||||
logger.info(`[${tracker.requestId}] Successfully saved copilot feedback`, {
|
||||
feedbackId: feedbackRecord.feedbackId,
|
||||
userId: authenticatedUserId,
|
||||
isPositive: isPositiveFeedback,
|
||||
duration: tracker.getDuration(),
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
feedbackId: feedbackRecord.feedbackId,
|
||||
message: 'Feedback submitted successfully',
|
||||
metadata: {
|
||||
requestId: tracker.requestId,
|
||||
duration: tracker.getDuration(),
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
const duration = tracker.getDuration()
|
||||
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.error(`[${tracker.requestId}] Validation error:`, {
|
||||
duration,
|
||||
errors: error.errors,
|
||||
})
|
||||
return createBadRequestResponse(
|
||||
`Invalid request data: ${error.errors.map((e) => e.message).join(', ')}`
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${tracker.requestId}] Error submitting copilot feedback:`, {
|
||||
duration,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
})
|
||||
|
||||
return createInternalServerErrorResponse('Failed to submit feedback')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/copilot/feedback
|
||||
* Get all feedback records (for analytics)
|
||||
*/
|
||||
export async function GET(req: NextRequest) {
|
||||
const tracker = createRequestTracker()
|
||||
|
||||
try {
|
||||
// Authenticate user
|
||||
const { userId: authenticatedUserId, isAuthenticated } =
|
||||
await authenticateCopilotRequestSessionOnly()
|
||||
|
||||
if (!isAuthenticated || !authenticatedUserId) {
|
||||
return createUnauthorizedResponse()
|
||||
}
|
||||
|
||||
// Get all feedback records
|
||||
const feedbackRecords = await db
|
||||
.select({
|
||||
feedbackId: copilotFeedback.feedbackId,
|
||||
userId: copilotFeedback.userId,
|
||||
chatId: copilotFeedback.chatId,
|
||||
userQuery: copilotFeedback.userQuery,
|
||||
agentResponse: copilotFeedback.agentResponse,
|
||||
isPositive: copilotFeedback.isPositive,
|
||||
feedback: copilotFeedback.feedback,
|
||||
workflowYaml: copilotFeedback.workflowYaml,
|
||||
createdAt: copilotFeedback.createdAt,
|
||||
})
|
||||
.from(copilotFeedback)
|
||||
|
||||
logger.info(`[${tracker.requestId}] Retrieved ${feedbackRecords.length} feedback records`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
feedback: feedbackRecords,
|
||||
metadata: {
|
||||
requestId: tracker.requestId,
|
||||
duration: tracker.getDuration(),
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${tracker.requestId}] Error retrieving copilot feedback:`, error)
|
||||
return createInternalServerErrorResponse('Failed to retrieve feedback')
|
||||
}
|
||||
}
|
||||
751
apps/sim/app/api/copilot/methods/route.test.ts
Normal file
751
apps/sim/app/api/copilot/methods/route.test.ts
Normal file
@@ -0,0 +1,751 @@
|
||||
/**
|
||||
* Tests for copilot methods API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockCryptoUuid,
|
||||
setupCommonApiMocks,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Copilot Methods API Route', () => {
|
||||
const mockRedisGet = vi.fn()
|
||||
const mockRedisSet = vi.fn()
|
||||
const mockGetRedisClient = vi.fn()
|
||||
const mockToolRegistryHas = vi.fn()
|
||||
const mockToolRegistryGet = vi.fn()
|
||||
const mockToolRegistryExecute = vi.fn()
|
||||
const mockToolRegistryGetAvailableIds = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
setupCommonApiMocks()
|
||||
mockCryptoUuid()
|
||||
|
||||
// Mock Redis client
|
||||
const mockRedisClient = {
|
||||
get: mockRedisGet,
|
||||
set: mockRedisSet,
|
||||
}
|
||||
|
||||
mockGetRedisClient.mockReturnValue(mockRedisClient)
|
||||
mockRedisGet.mockResolvedValue(null)
|
||||
mockRedisSet.mockResolvedValue('OK')
|
||||
|
||||
vi.doMock('@/lib/redis', () => ({
|
||||
getRedisClient: mockGetRedisClient,
|
||||
}))
|
||||
|
||||
// Mock tool registry
|
||||
const mockToolRegistry = {
|
||||
has: mockToolRegistryHas,
|
||||
get: mockToolRegistryGet,
|
||||
execute: mockToolRegistryExecute,
|
||||
getAvailableIds: mockToolRegistryGetAvailableIds,
|
||||
}
|
||||
|
||||
mockToolRegistryHas.mockReturnValue(true)
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: false })
|
||||
mockToolRegistryExecute.mockResolvedValue({ success: true, data: 'Tool executed successfully' })
|
||||
mockToolRegistryGetAvailableIds.mockReturnValue(['test-tool', 'another-tool'])
|
||||
|
||||
vi.doMock('@/lib/copilot/tools/server-tools/registry', () => ({
|
||||
copilotToolRegistry: mockToolRegistry,
|
||||
}))
|
||||
|
||||
// Mock environment variables
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
INTERNAL_API_SECRET: 'test-secret-key',
|
||||
},
|
||||
}))
|
||||
|
||||
// Mock setTimeout for polling
|
||||
vi.spyOn(global, 'setTimeout').mockImplementation((callback, _delay) => {
|
||||
if (typeof callback === 'function') {
|
||||
setImmediate(callback)
|
||||
}
|
||||
return setTimeout(() => {}, 0) as any
|
||||
})
|
||||
|
||||
// Mock Date.now for timeout control
|
||||
let mockTime = 1640995200000
|
||||
vi.spyOn(Date, 'now').mockImplementation(() => {
|
||||
mockTime += 1000 // Add 1 second each call
|
||||
return mockTime
|
||||
})
|
||||
|
||||
// Mock crypto.randomUUID for request IDs
|
||||
vi.spyOn(crypto, 'randomUUID').mockReturnValue('test-request-id')
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
describe('POST', () => {
|
||||
it('should return 401 when API key is missing', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
methodId: 'test-tool',
|
||||
params: {},
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: false,
|
||||
error: 'API key required',
|
||||
})
|
||||
})
|
||||
|
||||
it('should return 401 when API key is invalid', async () => {
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'invalid-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'test-tool',
|
||||
params: {},
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: false,
|
||||
error: 'Invalid API key',
|
||||
})
|
||||
})
|
||||
|
||||
it('should return 401 when internal API key is not configured', async () => {
|
||||
// Mock environment with no API key
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
INTERNAL_API_SECRET: undefined,
|
||||
},
|
||||
}))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'any-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'test-tool',
|
||||
params: {},
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: false,
|
||||
error: 'Internal API key not configured',
|
||||
})
|
||||
})
|
||||
|
||||
it('should return 400 for invalid request body - missing methodId', async () => {
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
params: {},
|
||||
// Missing methodId
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toContain('Required')
|
||||
})
|
||||
|
||||
it('should return 400 for empty methodId', async () => {
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: '',
|
||||
params: {},
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toContain('Method ID is required')
|
||||
})
|
||||
|
||||
it('should return 400 when tool is not found in registry', async () => {
|
||||
mockToolRegistryHas.mockReturnValue(false)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'unknown-tool',
|
||||
params: {},
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toContain('Unknown method: unknown-tool')
|
||||
expect(responseData.error).toContain('Available methods: test-tool, another-tool')
|
||||
})
|
||||
|
||||
it('should successfully execute a tool without interruption', async () => {
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'test-tool',
|
||||
params: { key: 'value' },
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
data: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalledWith('test-tool', { key: 'value' })
|
||||
})
|
||||
|
||||
it('should handle tool execution with default empty params', async () => {
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'test-tool',
|
||||
// No params provided
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
data: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalledWith('test-tool', {})
|
||||
})
|
||||
|
||||
it('should return 400 when tool requires interrupt but no toolCallId provided', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
// No toolCallId provided
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe(
|
||||
'This tool requires approval but no tool call ID was provided'
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle tool execution with interrupt - user approval', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return accepted status immediately (simulate quick approval)
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'accepted', message: 'User approved' })
|
||||
)
|
||||
|
||||
// Reset Date.now mock to not trigger timeout
|
||||
let mockTime = 1640995200000
|
||||
vi.spyOn(Date, 'now').mockImplementation(() => {
|
||||
mockTime += 100 // Small increment to avoid timeout
|
||||
return mockTime
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: { key: 'value' },
|
||||
toolCallId: 'tool-call-123',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
data: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
// Verify Redis operations
|
||||
expect(mockRedisSet).toHaveBeenCalledWith(
|
||||
'tool_call:tool-call-123',
|
||||
expect.stringContaining('"status":"pending"'),
|
||||
'EX',
|
||||
86400
|
||||
)
|
||||
expect(mockRedisGet).toHaveBeenCalledWith('tool_call:tool-call-123')
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalledWith('interrupt-tool', { key: 'value' })
|
||||
})
|
||||
|
||||
it('should handle tool execution with interrupt - user rejection', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return rejected status
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'rejected', message: 'User rejected' })
|
||||
)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-456',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200) // User rejection returns 200
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe(
|
||||
'The user decided to skip running this tool. This was a user decision.'
|
||||
)
|
||||
|
||||
// Tool should not be executed when rejected
|
||||
expect(mockToolRegistryExecute).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle tool execution with interrupt - error status', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return error status
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'error', message: 'Tool execution failed' })
|
||||
)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-error',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe('Tool execution failed')
|
||||
})
|
||||
|
||||
it('should handle tool execution with interrupt - background status', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return background status
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'background', message: 'Running in background' })
|
||||
)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-bg',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
data: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle tool execution with interrupt - success status', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return success status
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'success', message: 'Completed successfully' })
|
||||
)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-success',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
data: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle tool execution with interrupt - timeout', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to never return a status (timeout scenario)
|
||||
mockRedisGet.mockResolvedValue(null)
|
||||
|
||||
// Mock Date.now to trigger timeout quickly
|
||||
let mockTime = 1640995200000
|
||||
vi.spyOn(Date, 'now').mockImplementation(() => {
|
||||
mockTime += 100000 // Add 100 seconds each call to trigger timeout
|
||||
return mockTime
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-timeout',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(408) // Request Timeout
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe('Tool execution request timed out')
|
||||
|
||||
expect(mockToolRegistryExecute).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle unexpected status in interrupt flow', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return unexpected status
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'unknown-status', message: 'Unknown' })
|
||||
)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-unknown',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe('Unexpected tool call status: unknown-status')
|
||||
})
|
||||
|
||||
it('should handle Redis client unavailable for interrupt flow', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
mockGetRedisClient.mockReturnValue(null)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-no-redis',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(408) // Timeout due to Redis unavailable
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe('Tool execution request timed out')
|
||||
})
|
||||
|
||||
it('should handle no_op tool with confirmation message', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return accepted status with message
|
||||
mockRedisGet.mockResolvedValue(
|
||||
JSON.stringify({ status: 'accepted', message: 'Confirmation message' })
|
||||
)
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'no_op',
|
||||
params: { existing: 'param' },
|
||||
toolCallId: 'tool-call-noop',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
// Verify confirmation message was added to params
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalledWith('no_op', {
|
||||
existing: 'param',
|
||||
confirmationMessage: 'Confirmation message',
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle Redis errors in interrupt flow', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to throw an error
|
||||
mockRedisGet.mockRejectedValue(new Error('Redis connection failed'))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-redis-error',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(408) // Timeout due to Redis error
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe('Tool execution request timed out')
|
||||
})
|
||||
|
||||
it('should handle tool execution failure', async () => {
|
||||
mockToolRegistryExecute.mockResolvedValue({
|
||||
success: false,
|
||||
error: 'Tool execution failed',
|
||||
})
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'failing-tool',
|
||||
params: {},
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200) // Still returns 200, but with success: false
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: false,
|
||||
error: 'Tool execution failed',
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle JSON parsing errors in request body', async () => {
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: '{invalid-json',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toContain('JSON')
|
||||
})
|
||||
|
||||
it('should handle tool registry execution throwing an error', async () => {
|
||||
mockToolRegistryExecute.mockRejectedValue(new Error('Registry execution failed'))
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'error-tool',
|
||||
params: {},
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
const responseData = await response.json()
|
||||
expect(responseData.success).toBe(false)
|
||||
expect(responseData.error).toBe('Registry execution failed')
|
||||
})
|
||||
|
||||
it('should handle old format Redis status (string instead of JSON)', async () => {
|
||||
mockToolRegistryGet.mockReturnValue({ requiresInterrupt: true })
|
||||
|
||||
// Mock Redis to return old format (direct status string)
|
||||
mockRedisGet.mockResolvedValue('accepted')
|
||||
|
||||
const req = new NextRequest('http://localhost:3000/api/copilot/methods', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': 'test-secret-key',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
methodId: 'interrupt-tool',
|
||||
params: {},
|
||||
toolCallId: 'tool-call-old-format',
|
||||
}),
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/copilot/methods/route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const responseData = await response.json()
|
||||
expect(responseData).toEqual({
|
||||
success: true,
|
||||
data: 'Tool executed successfully',
|
||||
})
|
||||
|
||||
expect(mockToolRegistryExecute).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
427
apps/sim/app/api/copilot/methods/route.ts
Normal file
427
apps/sim/app/api/copilot/methods/route.ts
Normal file
@@ -0,0 +1,427 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { copilotToolRegistry } from '@/lib/copilot/tools/server-tools/registry'
|
||||
import type { NotificationStatus } from '@/lib/copilot/types'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getRedisClient } from '@/lib/redis'
|
||||
import { createErrorResponse } from '@/app/api/copilot/methods/utils'
|
||||
|
||||
const logger = createLogger('CopilotMethodsAPI')
|
||||
|
||||
/**
|
||||
* Add a tool call to Redis with 'pending' status
|
||||
*/
|
||||
async function addToolToRedis(toolCallId: string): Promise<void> {
|
||||
if (!toolCallId) {
|
||||
logger.warn('addToolToRedis: No tool call ID provided')
|
||||
return
|
||||
}
|
||||
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
logger.warn('addToolToRedis: Redis client not available')
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const key = `tool_call:${toolCallId}`
|
||||
const status: NotificationStatus = 'pending'
|
||||
|
||||
// Store as JSON object for consistency with confirm API
|
||||
const toolCallData = {
|
||||
status,
|
||||
message: null,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
// Set with 24 hour expiry (86400 seconds)
|
||||
await redis.set(key, JSON.stringify(toolCallData), 'EX', 86400)
|
||||
|
||||
logger.info('Tool call added to Redis', {
|
||||
toolCallId,
|
||||
key,
|
||||
status,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to add tool call to Redis', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Poll Redis for tool call status updates
|
||||
* Returns when status changes to 'Accepted' or 'Rejected', or times out after 60 seconds
|
||||
*/
|
||||
async function pollRedisForTool(
|
||||
toolCallId: string
|
||||
): Promise<{ status: NotificationStatus; message?: string } | null> {
|
||||
const redis = getRedisClient()
|
||||
if (!redis) {
|
||||
logger.warn('pollRedisForTool: Redis client not available')
|
||||
return null
|
||||
}
|
||||
|
||||
const key = `tool_call:${toolCallId}`
|
||||
const timeout = 300000 // 5 minutes
|
||||
const pollInterval = 1000 // 1 second
|
||||
const startTime = Date.now()
|
||||
|
||||
logger.info('Starting to poll Redis for tool call status', {
|
||||
toolCallId,
|
||||
timeout,
|
||||
pollInterval,
|
||||
})
|
||||
|
||||
while (Date.now() - startTime < timeout) {
|
||||
try {
|
||||
const redisValue = await redis.get(key)
|
||||
if (!redisValue) {
|
||||
// Wait before next poll
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval))
|
||||
continue
|
||||
}
|
||||
|
||||
let status: NotificationStatus | null = null
|
||||
let message: string | undefined
|
||||
|
||||
// Try to parse as JSON (new format), fallback to string (old format)
|
||||
try {
|
||||
const parsedData = JSON.parse(redisValue)
|
||||
status = parsedData.status as NotificationStatus
|
||||
message = parsedData.message || undefined
|
||||
} catch {
|
||||
// Fallback to old format (direct status string)
|
||||
status = redisValue as NotificationStatus
|
||||
}
|
||||
|
||||
if (status !== 'pending') {
|
||||
// Log the message found in redis prominently - always log, even if message is null/undefined
|
||||
logger.info('Redis poller found non-pending status', {
|
||||
toolCallId,
|
||||
foundMessage: message,
|
||||
messageType: typeof message,
|
||||
messageIsNull: message === null,
|
||||
messageIsUndefined: message === undefined,
|
||||
status,
|
||||
duration: Date.now() - startTime,
|
||||
rawRedisValue: redisValue,
|
||||
})
|
||||
|
||||
logger.info('Tool call status resolved', {
|
||||
toolCallId,
|
||||
status,
|
||||
message,
|
||||
duration: Date.now() - startTime,
|
||||
rawRedisValue: redisValue,
|
||||
parsedAsJSON: redisValue
|
||||
? (() => {
|
||||
try {
|
||||
return JSON.parse(redisValue)
|
||||
} catch {
|
||||
return 'failed-to-parse'
|
||||
}
|
||||
})()
|
||||
: null,
|
||||
})
|
||||
|
||||
// Special logging for set environment variables tool when Redis status is found
|
||||
if (toolCallId && (status === 'accepted' || status === 'rejected')) {
|
||||
logger.info('SET_ENV_VARS: Redis polling found status update', {
|
||||
toolCallId,
|
||||
foundStatus: status,
|
||||
redisMessage: message,
|
||||
pollDuration: Date.now() - startTime,
|
||||
redisKey: `tool_call:${toolCallId}`,
|
||||
})
|
||||
}
|
||||
|
||||
return { status, message }
|
||||
}
|
||||
|
||||
// Wait before next poll
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval))
|
||||
} catch (error) {
|
||||
logger.error('Error polling Redis for tool call status', {
|
||||
toolCallId,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn('Tool call polling timed out', {
|
||||
toolCallId,
|
||||
timeout,
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle tool calls that require user interruption/approval
|
||||
* Returns { approved: boolean, rejected: boolean, error?: boolean, message?: string } to distinguish between rejection, timeout, and error
|
||||
*/
|
||||
async function interruptHandler(
|
||||
toolCallId: string
|
||||
): Promise<{ approved: boolean; rejected: boolean; error?: boolean; message?: string }> {
|
||||
if (!toolCallId) {
|
||||
logger.error('interruptHandler: No tool call ID provided')
|
||||
return { approved: false, rejected: false, error: true, message: 'No tool call ID provided' }
|
||||
}
|
||||
|
||||
logger.info('Starting interrupt handler for tool call', { toolCallId })
|
||||
|
||||
try {
|
||||
// Step 1: Add tool to Redis with 'pending' status
|
||||
await addToolToRedis(toolCallId)
|
||||
|
||||
// Step 2: Poll Redis for status update
|
||||
const result = await pollRedisForTool(toolCallId)
|
||||
|
||||
if (!result) {
|
||||
logger.error('Failed to get tool call status or timed out', { toolCallId })
|
||||
return { approved: false, rejected: false }
|
||||
}
|
||||
|
||||
const { status, message } = result
|
||||
|
||||
if (status === 'rejected') {
|
||||
logger.info('Tool execution rejected by user', { toolCallId, message })
|
||||
return { approved: false, rejected: true, message }
|
||||
}
|
||||
|
||||
if (status === 'accepted') {
|
||||
logger.info('Tool execution approved by user', { toolCallId, message })
|
||||
return { approved: true, rejected: false, message }
|
||||
}
|
||||
|
||||
if (status === 'error') {
|
||||
logger.error('Tool execution failed with error', { toolCallId, message })
|
||||
return { approved: false, rejected: false, error: true, message }
|
||||
}
|
||||
|
||||
if (status === 'background') {
|
||||
logger.info('Tool execution moved to background', { toolCallId, message })
|
||||
return { approved: true, rejected: false, message }
|
||||
}
|
||||
|
||||
if (status === 'success') {
|
||||
logger.info('Tool execution completed successfully', { toolCallId, message })
|
||||
return { approved: true, rejected: false, message }
|
||||
}
|
||||
|
||||
logger.warn('Unexpected tool call status', { toolCallId, status, message })
|
||||
return {
|
||||
approved: false,
|
||||
rejected: false,
|
||||
error: true,
|
||||
message: `Unexpected tool call status: ${status}`,
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
||||
logger.error('Error in interrupt handler', {
|
||||
toolCallId,
|
||||
error: errorMessage,
|
||||
})
|
||||
return {
|
||||
approved: false,
|
||||
rejected: false,
|
||||
error: true,
|
||||
message: `Interrupt handler error: ${errorMessage}`,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schema for method execution
|
||||
const MethodExecutionSchema = z.object({
|
||||
methodId: z.string().min(1, 'Method ID is required'),
|
||||
params: z.record(z.any()).optional().default({}),
|
||||
toolCallId: z.string().nullable().optional().default(null),
|
||||
})
|
||||
|
||||
// Simple internal API key authentication
|
||||
function checkInternalApiKey(req: NextRequest) {
|
||||
const apiKey = req.headers.get('x-api-key')
|
||||
const expectedApiKey = env.INTERNAL_API_SECRET
|
||||
|
||||
if (!expectedApiKey) {
|
||||
return { success: false, error: 'Internal API key not configured' }
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
return { success: false, error: 'API key required' }
|
||||
}
|
||||
|
||||
if (apiKey !== expectedApiKey) {
|
||||
return { success: false, error: 'Invalid API key' }
|
||||
}
|
||||
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/copilot/methods
|
||||
* Execute a method based on methodId with internal API key auth
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID()
|
||||
const startTime = Date.now()
|
||||
|
||||
try {
|
||||
// Check authentication (internal API key)
|
||||
const authResult = checkInternalApiKey(req)
|
||||
if (!authResult.success) {
|
||||
return NextResponse.json(createErrorResponse(authResult.error || 'Authentication failed'), {
|
||||
status: 401,
|
||||
})
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { methodId, params, toolCallId } = MethodExecutionSchema.parse(body)
|
||||
|
||||
logger.info(`[${requestId}] Method execution request: ${methodId}`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
hasParams: !!params && Object.keys(params).length > 0,
|
||||
})
|
||||
|
||||
// Check if tool exists in registry
|
||||
if (!copilotToolRegistry.has(methodId)) {
|
||||
logger.error(`[${requestId}] Tool not found in registry: ${methodId}`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
availableTools: copilotToolRegistry.getAvailableIds(),
|
||||
registrySize: copilotToolRegistry.getAvailableIds().length,
|
||||
})
|
||||
return NextResponse.json(
|
||||
createErrorResponse(
|
||||
`Unknown method: ${methodId}. Available methods: ${copilotToolRegistry.getAvailableIds().join(', ')}`
|
||||
),
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Tool found in registry: ${methodId}`, {
|
||||
toolCallId,
|
||||
})
|
||||
|
||||
// Check if the tool requires interrupt/approval
|
||||
const tool = copilotToolRegistry.get(methodId)
|
||||
if (tool?.requiresInterrupt) {
|
||||
if (!toolCallId) {
|
||||
logger.warn(`[${requestId}] Tool requires interrupt but no toolCallId provided`, {
|
||||
methodId,
|
||||
})
|
||||
return NextResponse.json(
|
||||
createErrorResponse('This tool requires approval but no tool call ID was provided'),
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Tool requires interrupt, starting approval process`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
})
|
||||
|
||||
// Handle interrupt flow
|
||||
const { approved, rejected, error, message } = await interruptHandler(toolCallId)
|
||||
|
||||
if (rejected) {
|
||||
logger.info(`[${requestId}] Tool execution rejected by user`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
message,
|
||||
})
|
||||
return NextResponse.json(
|
||||
createErrorResponse(
|
||||
'The user decided to skip running this tool. This was a user decision.'
|
||||
),
|
||||
{ status: 200 } // Changed to 200 - user rejection is a valid response
|
||||
)
|
||||
}
|
||||
|
||||
if (error) {
|
||||
logger.error(`[${requestId}] Tool execution failed with error`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
message,
|
||||
})
|
||||
return NextResponse.json(
|
||||
createErrorResponse(message || 'Tool execution failed with unknown error'),
|
||||
{ status: 500 } // 500 Internal Server Error
|
||||
)
|
||||
}
|
||||
|
||||
if (!approved) {
|
||||
logger.warn(`[${requestId}] Tool execution timed out`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
})
|
||||
return NextResponse.json(
|
||||
createErrorResponse('Tool execution request timed out'),
|
||||
{ status: 408 } // 408 Request Timeout
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Tool execution approved by user`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
message,
|
||||
})
|
||||
|
||||
// For noop tool, pass the confirmation message as a parameter
|
||||
if (methodId === 'no_op' && message) {
|
||||
params.confirmationMessage = message
|
||||
}
|
||||
}
|
||||
|
||||
// Execute the tool directly via registry
|
||||
const result = await copilotToolRegistry.execute(methodId, params)
|
||||
|
||||
logger.info(`[${requestId}] Tool execution result:`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
success: result.success,
|
||||
hasData: !!result.data,
|
||||
hasError: !!result.error,
|
||||
})
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
logger.info(`[${requestId}] Method execution completed: ${methodId}`, {
|
||||
methodId,
|
||||
toolCallId,
|
||||
duration,
|
||||
success: result.success,
|
||||
})
|
||||
|
||||
return NextResponse.json(result)
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime
|
||||
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.error(`[${requestId}] Request validation error:`, {
|
||||
duration,
|
||||
errors: error.errors,
|
||||
})
|
||||
return NextResponse.json(
|
||||
createErrorResponse(
|
||||
`Invalid request data: ${error.errors.map((e) => e.message).join(', ')}`
|
||||
),
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${requestId}] Unexpected error:`, {
|
||||
duration,
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
createErrorResponse(error instanceof Error ? error.message : 'Internal server error'),
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
14
apps/sim/app/api/copilot/methods/utils.ts
Normal file
14
apps/sim/app/api/copilot/methods/utils.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import type { CopilotToolResponse } from '@/lib/copilot/tools/server-tools/base'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
const logger = createLogger('CopilotMethodsUtils')
|
||||
|
||||
/**
|
||||
* Create a standardized error response
|
||||
*/
|
||||
export function createErrorResponse(error: string): CopilotToolResponse {
|
||||
return {
|
||||
success: false,
|
||||
error,
|
||||
}
|
||||
}
|
||||
@@ -1,429 +0,0 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import {
|
||||
createChat,
|
||||
deleteChat,
|
||||
generateChatTitle,
|
||||
getChat,
|
||||
listChats,
|
||||
sendMessage,
|
||||
updateChat,
|
||||
} from '@/lib/copilot/service'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
const logger = createLogger('CopilotAPI')
|
||||
|
||||
// Interface for StreamingExecution response
|
||||
interface StreamingExecution {
|
||||
stream: ReadableStream
|
||||
execution: Promise<any>
|
||||
}
|
||||
|
||||
// Schema for sending messages
|
||||
const SendMessageSchema = z.object({
|
||||
message: z.string().min(1, 'Message is required'),
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().optional(),
|
||||
mode: z.enum(['ask', 'agent']).optional().default('ask'),
|
||||
createNewChat: z.boolean().optional().default(false),
|
||||
stream: z.boolean().optional().default(false),
|
||||
})
|
||||
|
||||
// Schema for docs queries
|
||||
const DocsQuerySchema = z.object({
|
||||
query: z.string().min(1, 'Query is required'),
|
||||
topK: z.number().min(1).max(20).default(5),
|
||||
provider: z.string().optional(),
|
||||
model: z.string().optional(),
|
||||
stream: z.boolean().optional().default(false),
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().optional(),
|
||||
createNewChat: z.boolean().optional().default(false),
|
||||
})
|
||||
|
||||
// Schema for creating chats
|
||||
const CreateChatSchema = z.object({
|
||||
workflowId: z.string().min(1, 'Workflow ID is required'),
|
||||
title: z.string().optional(),
|
||||
initialMessage: z.string().optional(),
|
||||
})
|
||||
|
||||
// Schema for updating chats
|
||||
const UpdateChatSchema = z.object({
|
||||
chatId: z.string().min(1, 'Chat ID is required'),
|
||||
messages: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
role: z.enum(['user', 'assistant', 'system']),
|
||||
content: z.string(),
|
||||
timestamp: z.string(),
|
||||
citations: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.number(),
|
||||
title: z.string(),
|
||||
url: z.string(),
|
||||
similarity: z.number().optional(),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
title: z.string().optional(),
|
||||
})
|
||||
|
||||
// Schema for listing chats
|
||||
const ListChatsSchema = z.object({
|
||||
workflowId: z.string().min(1, 'Workflow ID is required'),
|
||||
limit: z.number().min(1).max(100).optional().default(50),
|
||||
offset: z.number().min(0).optional().default(0),
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/copilot
|
||||
* Send a message to the copilot
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID()
|
||||
|
||||
try {
|
||||
const body = await req.json()
|
||||
const { message, chatId, workflowId, mode, createNewChat, stream } =
|
||||
SendMessageSchema.parse(body)
|
||||
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Copilot message: "${message}"`, {
|
||||
chatId,
|
||||
workflowId,
|
||||
mode,
|
||||
createNewChat,
|
||||
stream,
|
||||
userId: session.user.id,
|
||||
})
|
||||
|
||||
// Send message using the service
|
||||
const result = await sendMessage({
|
||||
message,
|
||||
chatId,
|
||||
workflowId,
|
||||
mode,
|
||||
createNewChat,
|
||||
stream,
|
||||
userId: session.user.id,
|
||||
})
|
||||
|
||||
// Handle streaming response (ReadableStream or StreamingExecution)
|
||||
let streamToRead: ReadableStream | null = null
|
||||
|
||||
// Debug logging to see what we actually got
|
||||
logger.info(`[${requestId}] Response type analysis:`, {
|
||||
responseType: typeof result.response,
|
||||
isReadableStream: result.response instanceof ReadableStream,
|
||||
hasStreamProperty:
|
||||
typeof result.response === 'object' && result.response && 'stream' in result.response,
|
||||
hasExecutionProperty:
|
||||
typeof result.response === 'object' && result.response && 'execution' in result.response,
|
||||
responseKeys:
|
||||
typeof result.response === 'object' && result.response ? Object.keys(result.response) : [],
|
||||
})
|
||||
|
||||
if (result.response instanceof ReadableStream) {
|
||||
logger.info(`[${requestId}] Direct ReadableStream detected`)
|
||||
streamToRead = result.response
|
||||
} else if (
|
||||
typeof result.response === 'object' &&
|
||||
result.response &&
|
||||
'stream' in result.response &&
|
||||
'execution' in result.response
|
||||
) {
|
||||
// Handle StreamingExecution (from providers with tool calls)
|
||||
logger.info(`[${requestId}] StreamingExecution detected`)
|
||||
const streamingExecution = result.response as StreamingExecution
|
||||
streamToRead = streamingExecution.stream
|
||||
|
||||
// No need to extract citations - LLM generates direct markdown links
|
||||
}
|
||||
|
||||
if (streamToRead) {
|
||||
logger.info(`[${requestId}] Returning streaming response`)
|
||||
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
async start(controller) {
|
||||
const reader = streamToRead!.getReader()
|
||||
let accumulatedResponse = ''
|
||||
|
||||
// Send initial metadata
|
||||
const metadata = {
|
||||
type: 'metadata',
|
||||
chatId: result.chatId,
|
||||
metadata: {
|
||||
requestId,
|
||||
message,
|
||||
},
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(metadata)}\n\n`))
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
const chunkText = new TextDecoder().decode(value)
|
||||
accumulatedResponse += chunkText
|
||||
|
||||
const contentChunk = {
|
||||
type: 'content',
|
||||
content: chunkText,
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(contentChunk)}\n\n`))
|
||||
}
|
||||
|
||||
// Send completion signal
|
||||
const completion = {
|
||||
type: 'complete',
|
||||
finalContent: accumulatedResponse,
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(completion)}\n\n`))
|
||||
controller.close()
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Streaming error:`, error)
|
||||
const errorChunk = {
|
||||
type: 'error',
|
||||
error: 'Streaming failed',
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`))
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// Handle non-streaming response
|
||||
logger.info(`[${requestId}] Chat response generated successfully`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
response: result.response,
|
||||
chatId: result.chatId,
|
||||
metadata: {
|
||||
requestId,
|
||||
message,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${requestId}] Copilot error:`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/copilot
|
||||
* List chats or get a specific chat
|
||||
*/
|
||||
export async function GET(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(req.url)
|
||||
const chatId = searchParams.get('chatId')
|
||||
|
||||
// If chatId is provided, get specific chat
|
||||
if (chatId) {
|
||||
const chat = await getChat(chatId, session.user.id)
|
||||
if (!chat) {
|
||||
return NextResponse.json({ error: 'Chat not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat,
|
||||
})
|
||||
}
|
||||
|
||||
// Otherwise, list chats
|
||||
const workflowId = searchParams.get('workflowId')
|
||||
const limit = Number.parseInt(searchParams.get('limit') || '50')
|
||||
const offset = Number.parseInt(searchParams.get('offset') || '0')
|
||||
|
||||
if (!workflowId) {
|
||||
return NextResponse.json(
|
||||
{ error: 'workflowId is required for listing chats' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const chats = await listChats(session.user.id, workflowId, { limit, offset })
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chats,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to handle GET request:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PUT /api/copilot
|
||||
* Create a new chat
|
||||
*/
|
||||
export async function PUT(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { workflowId, title, initialMessage } = CreateChatSchema.parse(body)
|
||||
|
||||
logger.info(`Creating new chat for user ${session.user.id}, workflow ${workflowId}`)
|
||||
|
||||
const chat = await createChat(session.user.id, workflowId, {
|
||||
title,
|
||||
initialMessage,
|
||||
})
|
||||
|
||||
logger.info(`Created chat ${chat.id} for user ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error('Failed to create chat:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PATCH /api/copilot
|
||||
* Update a chat with new messages
|
||||
*/
|
||||
export async function PATCH(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { chatId, messages, title } = UpdateChatSchema.parse(body)
|
||||
|
||||
logger.info(`Updating chat ${chatId} for user ${session.user.id}`)
|
||||
|
||||
// Get the current chat to check if it has a title
|
||||
const existingChat = await getChat(chatId, session.user.id)
|
||||
|
||||
let titleToUse = title
|
||||
|
||||
// Generate title if chat doesn't have one and we have messages
|
||||
if (!titleToUse && existingChat && !existingChat.title && messages && messages.length > 0) {
|
||||
const firstUserMessage = messages.find((msg) => msg.role === 'user')
|
||||
if (firstUserMessage) {
|
||||
logger.info('Generating LLM-based title for chat without title')
|
||||
try {
|
||||
titleToUse = await generateChatTitle(firstUserMessage.content)
|
||||
logger.info(`Generated title: ${titleToUse}`)
|
||||
} catch (error) {
|
||||
logger.error('Failed to generate chat title:', error)
|
||||
titleToUse = 'New Chat'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const chat = await updateChat(chatId, session.user.id, {
|
||||
messages,
|
||||
title: titleToUse,
|
||||
})
|
||||
|
||||
if (!chat) {
|
||||
return NextResponse.json({ error: 'Chat not found or access denied' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error('Failed to update chat:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /api/copilot
|
||||
* Delete a chat
|
||||
*/
|
||||
export async function DELETE(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(req.url)
|
||||
const chatId = searchParams.get('chatId')
|
||||
|
||||
if (!chatId) {
|
||||
return NextResponse.json({ error: 'chatId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const success = await deleteChat(chatId, session.user.id)
|
||||
|
||||
if (!success) {
|
||||
return NextResponse.json({ error: 'Chat not found or access denied' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: 'Chat deleted successfully',
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete chat:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { searchDocumentation } from '@/lib/copilot/service'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
const logger = createLogger('DocsSearchAPI')
|
||||
|
||||
// Request and response type definitions
|
||||
interface DocsSearchRequest {
|
||||
query: string
|
||||
topK?: number
|
||||
}
|
||||
|
||||
interface DocsSearchResult {
|
||||
id: number
|
||||
title: string
|
||||
url: string
|
||||
content: string
|
||||
similarity: number
|
||||
}
|
||||
|
||||
interface DocsSearchSuccessResponse {
|
||||
success: true
|
||||
results: DocsSearchResult[]
|
||||
query: string
|
||||
totalResults: number
|
||||
searchTime?: number
|
||||
}
|
||||
|
||||
interface DocsSearchErrorResponse {
|
||||
success: false
|
||||
error: string
|
||||
}
|
||||
|
||||
export async function POST(
|
||||
request: NextRequest
|
||||
): Promise<NextResponse<DocsSearchSuccessResponse | DocsSearchErrorResponse>> {
|
||||
try {
|
||||
const requestBody: DocsSearchRequest = await request.json()
|
||||
const { query, topK = 10 } = requestBody
|
||||
|
||||
if (!query) {
|
||||
const errorResponse: DocsSearchErrorResponse = {
|
||||
success: false,
|
||||
error: 'Query is required',
|
||||
}
|
||||
return NextResponse.json(errorResponse, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info('Executing documentation search', { query, topK })
|
||||
|
||||
const startTime = Date.now()
|
||||
const results = await searchDocumentation(query, { topK })
|
||||
const searchTime = Date.now() - startTime
|
||||
|
||||
logger.info(`Found ${results.length} documentation results`, { query })
|
||||
|
||||
const successResponse: DocsSearchSuccessResponse = {
|
||||
success: true,
|
||||
results,
|
||||
query,
|
||||
totalResults: results.length,
|
||||
searchTime,
|
||||
}
|
||||
|
||||
return NextResponse.json(successResponse)
|
||||
} catch (error) {
|
||||
logger.error('Documentation search API failed', error)
|
||||
|
||||
const errorResponse: DocsSearchErrorResponse = {
|
||||
success: false,
|
||||
error: `Documentation search failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
}
|
||||
|
||||
return NextResponse.json(errorResponse, { status: 500 })
|
||||
}
|
||||
}
|
||||
223
apps/sim/app/api/environment/variables/route.ts
Normal file
223
apps/sim/app/api/environment/variables/route.ts
Normal file
@@ -0,0 +1,223 @@
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getEnvironmentVariableKeys } from '@/lib/environment/utils'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { decryptSecret, encryptSecret } from '@/lib/utils'
|
||||
import { getUserId } from '@/app/api/auth/oauth/utils'
|
||||
import { db } from '@/db'
|
||||
import { environment } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('EnvironmentVariablesAPI')
|
||||
|
||||
// Schema for environment variable updates
|
||||
const EnvVarSchema = z.object({
|
||||
variables: z.record(z.string()),
|
||||
})
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
try {
|
||||
// For GET requests, check for workflowId in query params
|
||||
const { searchParams } = new URL(request.url)
|
||||
const workflowId = searchParams.get('workflowId')
|
||||
|
||||
// Use dual authentication pattern like other copilot tools
|
||||
const userId = await getUserId(requestId, workflowId || undefined)
|
||||
|
||||
if (!userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized environment variables access attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Get only the variable names (keys), not values
|
||||
const result = await getEnvironmentVariableKeys(userId)
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: true,
|
||||
output: result,
|
||||
},
|
||||
{ status: 200 }
|
||||
)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Environment variables fetch error`, error)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: error.message || 'Failed to get environment variables',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export async function PUT(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
try {
|
||||
const body = await request.json()
|
||||
const { workflowId, variables } = body
|
||||
|
||||
// Use dual authentication pattern like other copilot tools
|
||||
const userId = await getUserId(requestId, workflowId)
|
||||
|
||||
if (!userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized environment variables set attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
try {
|
||||
const { variables: validatedVariables } = EnvVarSchema.parse({ variables })
|
||||
|
||||
// Get existing environment variables for this user
|
||||
const existingData = await db
|
||||
.select()
|
||||
.from(environment)
|
||||
.where(eq(environment.userId, userId))
|
||||
.limit(1)
|
||||
|
||||
// Start with existing encrypted variables or empty object
|
||||
const existingEncryptedVariables =
|
||||
(existingData[0]?.variables as Record<string, string>) || {}
|
||||
|
||||
// Determine which variables are new or changed by comparing with decrypted existing values
|
||||
const variablesToEncrypt: Record<string, string> = {}
|
||||
const addedVariables: string[] = []
|
||||
const updatedVariables: string[] = []
|
||||
|
||||
for (const [key, newValue] of Object.entries(validatedVariables)) {
|
||||
if (!(key in existingEncryptedVariables)) {
|
||||
// New variable
|
||||
variablesToEncrypt[key] = newValue
|
||||
addedVariables.push(key)
|
||||
} else {
|
||||
// Check if the value has actually changed by decrypting the existing value
|
||||
try {
|
||||
const { decrypted: existingValue } = await decryptSecret(
|
||||
existingEncryptedVariables[key]
|
||||
)
|
||||
|
||||
if (existingValue !== newValue) {
|
||||
// Value changed, needs re-encryption
|
||||
variablesToEncrypt[key] = newValue
|
||||
updatedVariables.push(key)
|
||||
}
|
||||
// If values are the same, keep the existing encrypted value
|
||||
} catch (decryptError) {
|
||||
// If we can't decrypt the existing value, treat as changed and re-encrypt
|
||||
logger.warn(
|
||||
`[${requestId}] Could not decrypt existing variable ${key}, re-encrypting`,
|
||||
{ error: decryptError }
|
||||
)
|
||||
variablesToEncrypt[key] = newValue
|
||||
updatedVariables.push(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only encrypt the variables that are new or changed
|
||||
const newlyEncryptedVariables = await Object.entries(variablesToEncrypt).reduce(
|
||||
async (accPromise, [key, value]) => {
|
||||
const acc = await accPromise
|
||||
const { encrypted } = await encryptSecret(value)
|
||||
return { ...acc, [key]: encrypted }
|
||||
},
|
||||
Promise.resolve({})
|
||||
)
|
||||
|
||||
// Merge existing encrypted variables with newly encrypted ones
|
||||
const finalEncryptedVariables = { ...existingEncryptedVariables, ...newlyEncryptedVariables }
|
||||
|
||||
// Update or insert environment variables for user
|
||||
await db
|
||||
.insert(environment)
|
||||
.values({
|
||||
id: crypto.randomUUID(),
|
||||
userId: userId,
|
||||
variables: finalEncryptedVariables,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.onConflictDoUpdate({
|
||||
target: [environment.userId],
|
||||
set: {
|
||||
variables: finalEncryptedVariables,
|
||||
updatedAt: new Date(),
|
||||
},
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: true,
|
||||
output: {
|
||||
message: `Successfully processed ${Object.keys(validatedVariables).length} environment variable(s): ${addedVariables.length} added, ${updatedVariables.length} updated`,
|
||||
variableCount: Object.keys(validatedVariables).length,
|
||||
variableNames: Object.keys(validatedVariables),
|
||||
totalVariableCount: Object.keys(finalEncryptedVariables).length,
|
||||
addedVariables,
|
||||
updatedVariables,
|
||||
},
|
||||
},
|
||||
{ status: 200 }
|
||||
)
|
||||
} catch (validationError) {
|
||||
if (validationError instanceof z.ZodError) {
|
||||
logger.warn(`[${requestId}] Invalid environment variables data`, {
|
||||
errors: validationError.errors,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: validationError.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
throw validationError
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Environment variables set error`, error)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: error.message || 'Failed to set environment variables',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
try {
|
||||
const body = await request.json()
|
||||
const { workflowId } = body
|
||||
|
||||
// Use dual authentication pattern like other copilot tools
|
||||
const userId = await getUserId(requestId, workflowId)
|
||||
|
||||
if (!userId) {
|
||||
logger.warn(`[${requestId}] Unauthorized environment variables access attempt`)
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Get only the variable names (keys), not values
|
||||
const result = await getEnvironmentVariableKeys(userId)
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: true,
|
||||
output: result,
|
||||
},
|
||||
{ status: 200 }
|
||||
)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Environment variables fetch error`, error)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: error.message || 'Failed to get environment variables',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -204,6 +204,32 @@ describe('/api/files/presigned', () => {
|
||||
expect(data.directUploadSupported).toBe(true)
|
||||
})
|
||||
|
||||
it('should generate chat S3 presigned URL with chat prefix and direct path', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
storageProvider: 's3',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/files/presigned/route')
|
||||
|
||||
const request = new NextRequest('http://localhost:3000/api/files/presigned?type=chat', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
fileName: 'chat-logo.png',
|
||||
contentType: 'image/png',
|
||||
fileSize: 4096,
|
||||
}),
|
||||
})
|
||||
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.fileInfo.key).toMatch(/^chat\/.*chat-logo\.png$/)
|
||||
expect(data.fileInfo.path).toMatch(/^https:\/\/.*\.s3\..*\.amazonaws\.com\/chat\//)
|
||||
expect(data.directUploadSupported).toBe(true)
|
||||
})
|
||||
|
||||
it('should generate Azure Blob presigned URL successfully', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
@@ -225,7 +251,9 @@ describe('/api/files/presigned', () => {
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.presignedUrl).toContain('https://example.com/presigned-url')
|
||||
expect(data.presignedUrl).toContain(
|
||||
'https://testaccount.blob.core.windows.net/test-container'
|
||||
)
|
||||
expect(data.presignedUrl).toContain('sas-token-string')
|
||||
expect(data.fileInfo).toMatchObject({
|
||||
path: expect.stringContaining('/api/files/serve/blob/'),
|
||||
@@ -243,6 +271,41 @@ describe('/api/files/presigned', () => {
|
||||
})
|
||||
})
|
||||
|
||||
it('should generate chat Azure Blob presigned URL with chat prefix and direct path', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
storageProvider: 'blob',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/files/presigned/route')
|
||||
|
||||
const request = new NextRequest('http://localhost:3000/api/files/presigned?type=chat', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
fileName: 'chat-logo.png',
|
||||
contentType: 'image/png',
|
||||
fileSize: 4096,
|
||||
}),
|
||||
})
|
||||
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.fileInfo.key).toMatch(/^chat\/.*chat-logo\.png$/)
|
||||
expect(data.fileInfo.path).toContain(
|
||||
'https://testaccount.blob.core.windows.net/test-container'
|
||||
)
|
||||
expect(data.directUploadSupported).toBe(true)
|
||||
expect(data.uploadHeaders).toMatchObject({
|
||||
'x-ms-blob-type': 'BlockBlob',
|
||||
'x-ms-blob-content-type': 'image/png',
|
||||
'x-ms-meta-originalname': expect.any(String),
|
||||
'x-ms-meta-uploadedat': '2024-01-01T00:00:00.000Z',
|
||||
'x-ms-meta-purpose': 'chat',
|
||||
})
|
||||
})
|
||||
|
||||
it('should return error for unknown storage provider', async () => {
|
||||
// For unknown provider, we'll need to mock manually since our helper doesn't support it
|
||||
vi.doMock('@/lib/uploads', () => ({
|
||||
|
||||
@@ -6,7 +6,14 @@ import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { getStorageProvider, isUsingCloudStorage } from '@/lib/uploads'
|
||||
import { getBlobServiceClient } from '@/lib/uploads/blob/blob-client'
|
||||
import { getS3Client, sanitizeFilenameForMetadata } from '@/lib/uploads/s3/s3-client'
|
||||
import { BLOB_CONFIG, BLOB_KB_CONFIG, S3_CONFIG, S3_KB_CONFIG } from '@/lib/uploads/setup'
|
||||
import {
|
||||
BLOB_CHAT_CONFIG,
|
||||
BLOB_CONFIG,
|
||||
BLOB_KB_CONFIG,
|
||||
S3_CHAT_CONFIG,
|
||||
S3_CONFIG,
|
||||
S3_KB_CONFIG,
|
||||
} from '@/lib/uploads/setup'
|
||||
import { createErrorResponse, createOptionsResponse } from '@/app/api/files/utils'
|
||||
|
||||
const logger = createLogger('PresignedUploadAPI')
|
||||
@@ -17,7 +24,7 @@ interface PresignedUrlRequest {
|
||||
fileSize: number
|
||||
}
|
||||
|
||||
type UploadType = 'general' | 'knowledge-base'
|
||||
type UploadType = 'general' | 'knowledge-base' | 'chat'
|
||||
|
||||
class PresignedUrlError extends Error {
|
||||
constructor(
|
||||
@@ -72,7 +79,11 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
const uploadTypeParam = request.nextUrl.searchParams.get('type')
|
||||
const uploadType: UploadType =
|
||||
uploadTypeParam === 'knowledge-base' ? 'knowledge-base' : 'general'
|
||||
uploadTypeParam === 'knowledge-base'
|
||||
? 'knowledge-base'
|
||||
: uploadTypeParam === 'chat'
|
||||
? 'chat'
|
||||
: 'general'
|
||||
|
||||
if (!isUsingCloudStorage()) {
|
||||
throw new StorageConfigError(
|
||||
@@ -118,14 +129,19 @@ async function handleS3PresignedUrl(
|
||||
uploadType: UploadType
|
||||
) {
|
||||
try {
|
||||
const config = uploadType === 'knowledge-base' ? S3_KB_CONFIG : S3_CONFIG
|
||||
const config =
|
||||
uploadType === 'knowledge-base'
|
||||
? S3_KB_CONFIG
|
||||
: uploadType === 'chat'
|
||||
? S3_CHAT_CONFIG
|
||||
: S3_CONFIG
|
||||
|
||||
if (!config.bucket || !config.region) {
|
||||
throw new StorageConfigError(`S3 configuration missing for ${uploadType} uploads`)
|
||||
}
|
||||
|
||||
const safeFileName = fileName.replace(/\s+/g, '-').replace(/[^a-zA-Z0-9.-]/g, '_')
|
||||
const prefix = uploadType === 'knowledge-base' ? 'kb/' : ''
|
||||
const prefix = uploadType === 'knowledge-base' ? 'kb/' : uploadType === 'chat' ? 'chat/' : ''
|
||||
const uniqueKey = `${prefix}${Date.now()}-${uuidv4()}-${safeFileName}`
|
||||
|
||||
const sanitizedOriginalName = sanitizeFilenameForMetadata(fileName)
|
||||
@@ -137,6 +153,8 @@ async function handleS3PresignedUrl(
|
||||
|
||||
if (uploadType === 'knowledge-base') {
|
||||
metadata.purpose = 'knowledge-base'
|
||||
} else if (uploadType === 'chat') {
|
||||
metadata.purpose = 'chat'
|
||||
}
|
||||
|
||||
const command = new PutObjectCommand({
|
||||
@@ -156,14 +174,22 @@ async function handleS3PresignedUrl(
|
||||
)
|
||||
}
|
||||
|
||||
const servePath = `/api/files/serve/s3/${encodeURIComponent(uniqueKey)}`
|
||||
// For chat images, use direct S3 URLs since they need to be permanently accessible
|
||||
// For other files, use serve path for access control
|
||||
const finalPath =
|
||||
uploadType === 'chat'
|
||||
? `https://${config.bucket}.s3.${config.region}.amazonaws.com/${uniqueKey}`
|
||||
: `/api/files/serve/s3/${encodeURIComponent(uniqueKey)}`
|
||||
|
||||
logger.info(`Generated ${uploadType} S3 presigned URL for ${fileName} (${uniqueKey})`)
|
||||
logger.info(`Presigned URL: ${presignedUrl}`)
|
||||
logger.info(`Final path: ${finalPath}`)
|
||||
|
||||
return NextResponse.json({
|
||||
presignedUrl,
|
||||
uploadUrl: presignedUrl, // Make sure we're returning the uploadUrl field
|
||||
fileInfo: {
|
||||
path: servePath,
|
||||
path: finalPath,
|
||||
key: uniqueKey,
|
||||
name: fileName,
|
||||
size: fileSize,
|
||||
@@ -187,7 +213,12 @@ async function handleBlobPresignedUrl(
|
||||
uploadType: UploadType
|
||||
) {
|
||||
try {
|
||||
const config = uploadType === 'knowledge-base' ? BLOB_KB_CONFIG : BLOB_CONFIG
|
||||
const config =
|
||||
uploadType === 'knowledge-base'
|
||||
? BLOB_KB_CONFIG
|
||||
: uploadType === 'chat'
|
||||
? BLOB_CHAT_CONFIG
|
||||
: BLOB_CONFIG
|
||||
|
||||
if (
|
||||
!config.accountName ||
|
||||
@@ -198,7 +229,7 @@ async function handleBlobPresignedUrl(
|
||||
}
|
||||
|
||||
const safeFileName = fileName.replace(/\s+/g, '-').replace(/[^a-zA-Z0-9.-]/g, '_')
|
||||
const prefix = uploadType === 'knowledge-base' ? 'kb/' : ''
|
||||
const prefix = uploadType === 'knowledge-base' ? 'kb/' : uploadType === 'chat' ? 'chat/' : ''
|
||||
const uniqueKey = `${prefix}${Date.now()}-${uuidv4()}-${safeFileName}`
|
||||
|
||||
const blobServiceClient = getBlobServiceClient()
|
||||
@@ -231,7 +262,12 @@ async function handleBlobPresignedUrl(
|
||||
|
||||
const presignedUrl = `${blockBlobClient.url}?${sasToken}`
|
||||
|
||||
const servePath = `/api/files/serve/blob/${encodeURIComponent(uniqueKey)}`
|
||||
// For chat images, use direct Blob URLs since they need to be permanently accessible
|
||||
// For other files, use serve path for access control
|
||||
const finalPath =
|
||||
uploadType === 'chat'
|
||||
? blockBlobClient.url
|
||||
: `/api/files/serve/blob/${encodeURIComponent(uniqueKey)}`
|
||||
|
||||
logger.info(`Generated ${uploadType} Azure Blob presigned URL for ${fileName} (${uniqueKey})`)
|
||||
|
||||
@@ -244,12 +280,14 @@ async function handleBlobPresignedUrl(
|
||||
|
||||
if (uploadType === 'knowledge-base') {
|
||||
uploadHeaders['x-ms-meta-purpose'] = 'knowledge-base'
|
||||
} else if (uploadType === 'chat') {
|
||||
uploadHeaders['x-ms-meta-purpose'] = 'chat'
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
presignedUrl,
|
||||
fileInfo: {
|
||||
path: servePath,
|
||||
path: finalPath,
|
||||
key: uniqueKey,
|
||||
name: fileName,
|
||||
size: fileSize,
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
/**
|
||||
* Tests for knowledge search API route
|
||||
* Focuses on route-specific functionality: authentication, validation, API contract, error handling
|
||||
* Search logic is tested in utils.test.ts
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
@@ -56,6 +58,27 @@ vi.mock('@/app/api/knowledge/utils', () => ({
|
||||
checkKnowledgeBaseAccess: mockCheckKnowledgeBaseAccess,
|
||||
}))
|
||||
|
||||
const mockHandleTagOnlySearch = vi.fn()
|
||||
const mockHandleVectorOnlySearch = vi.fn()
|
||||
const mockHandleTagAndVectorSearch = vi.fn()
|
||||
const mockGetQueryStrategy = vi.fn()
|
||||
const mockGenerateSearchEmbedding = vi.fn()
|
||||
vi.mock('./utils', () => ({
|
||||
handleTagOnlySearch: mockHandleTagOnlySearch,
|
||||
handleVectorOnlySearch: mockHandleVectorOnlySearch,
|
||||
handleTagAndVectorSearch: mockHandleTagAndVectorSearch,
|
||||
getQueryStrategy: mockGetQueryStrategy,
|
||||
generateSearchEmbedding: mockGenerateSearchEmbedding,
|
||||
APIError: class APIError extends Error {
|
||||
public status: number
|
||||
constructor(message: string, status: number) {
|
||||
super(message)
|
||||
this.name = 'APIError'
|
||||
this.status = status
|
||||
}
|
||||
},
|
||||
}))
|
||||
|
||||
mockConsoleLogger()
|
||||
|
||||
describe('Knowledge Search API Route', () => {
|
||||
@@ -65,6 +88,10 @@ describe('Knowledge Search API Route', () => {
|
||||
where: vi.fn().mockReturnThis(),
|
||||
orderBy: vi.fn().mockReturnThis(),
|
||||
limit: vi.fn().mockReturnThis(),
|
||||
innerJoin: vi.fn().mockReturnThis(),
|
||||
leftJoin: vi.fn().mockReturnThis(),
|
||||
groupBy: vi.fn().mockReturnThis(),
|
||||
having: vi.fn().mockReturnThis(),
|
||||
}
|
||||
|
||||
const mockGetUserId = vi.fn()
|
||||
@@ -107,6 +134,17 @@ describe('Knowledge Search API Route', () => {
|
||||
}
|
||||
})
|
||||
|
||||
mockHandleTagOnlySearch.mockClear()
|
||||
mockHandleVectorOnlySearch.mockClear()
|
||||
mockHandleTagAndVectorSearch.mockClear()
|
||||
mockGetQueryStrategy.mockClear().mockReturnValue({
|
||||
useParallel: false,
|
||||
distanceThreshold: 1.0,
|
||||
parallelLimit: 15,
|
||||
singleQueryOptimized: true,
|
||||
})
|
||||
mockGenerateSearchEmbedding.mockClear().mockResolvedValue([0.1, 0.2, 0.3, 0.4, 0.5])
|
||||
|
||||
vi.stubGlobal('crypto', {
|
||||
randomUUID: vi.fn().mockReturnValue('mock-uuid-1234-5678'),
|
||||
})
|
||||
@@ -137,13 +175,19 @@ describe('Knowledge Search API Route', () => {
|
||||
it('should perform search successfully with single knowledge base', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
|
||||
// Mock knowledge base access check to return success
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: mockKnowledgeBases[0],
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
mockDbChain.limit.mockResolvedValue([])
|
||||
|
||||
mockHandleVectorOnlySearch.mockResolvedValue(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
@@ -168,7 +212,12 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.data.results[0].similarity).toBe(0.8) // 1 - 0.2
|
||||
expect(data.data.query).toBe(validSearchData.query)
|
||||
expect(data.data.knowledgeBaseIds).toEqual(['kb-123'])
|
||||
expect(mockDbChain.select).toHaveBeenCalled()
|
||||
expect(mockHandleVectorOnlySearch).toHaveBeenCalledWith({
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
queryVector: JSON.stringify(mockEmbedding),
|
||||
distanceThreshold: expect.any(Number),
|
||||
})
|
||||
})
|
||||
|
||||
it('should perform search successfully with multiple knowledge bases', async () => {
|
||||
@@ -184,12 +233,13 @@ describe('Knowledge Search API Route', () => {
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
|
||||
// Mock knowledge base access check to return success for both KBs
|
||||
mockCheckKnowledgeBaseAccess
|
||||
.mockResolvedValueOnce({ hasAccess: true, knowledgeBase: multiKbs[0] })
|
||||
.mockResolvedValueOnce({ hasAccess: true, knowledgeBase: multiKbs[1] })
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
mockDbChain.limit.mockResolvedValue([])
|
||||
|
||||
mockHandleVectorOnlySearch.mockResolvedValue(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
@@ -207,6 +257,12 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.data.knowledgeBaseIds).toEqual(['kb-123', 'kb-456'])
|
||||
expect(mockHandleVectorOnlySearch).toHaveBeenCalledWith({
|
||||
knowledgeBaseIds: ['kb-123', 'kb-456'],
|
||||
topK: 10,
|
||||
queryVector: JSON.stringify(mockEmbedding),
|
||||
distanceThreshold: expect.any(Number),
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle workflow-based authentication', async () => {
|
||||
@@ -217,13 +273,19 @@ describe('Knowledge Search API Route', () => {
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
|
||||
// Mock knowledge base access check to return success
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: mockKnowledgeBases[0],
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults) // Search results
|
||||
mockDbChain.limit.mockResolvedValue([])
|
||||
|
||||
mockHandleVectorOnlySearch.mockResolvedValue(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
@@ -275,7 +337,6 @@ describe('Knowledge Search API Route', () => {
|
||||
it('should return not found for non-existent knowledge base', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
|
||||
// Mock knowledge base access check to return no access
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: false,
|
||||
notFound: true,
|
||||
@@ -340,7 +401,12 @@ describe('Knowledge Search API Route', () => {
|
||||
// Mock knowledge base access check to return success
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: mockKnowledgeBases[0],
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults) // Search results
|
||||
@@ -366,12 +432,10 @@ describe('Knowledge Search API Route', () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: false,
|
||||
status: 401,
|
||||
statusText: 'Unauthorized',
|
||||
text: () => Promise.resolve('Invalid API key'),
|
||||
})
|
||||
// Mock generateSearchEmbedding to throw an error
|
||||
mockGenerateSearchEmbedding.mockRejectedValueOnce(
|
||||
new Error('OpenAI API error: 401 Unauthorized - Invalid API key')
|
||||
)
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
@@ -383,15 +447,12 @@ describe('Knowledge Search API Route', () => {
|
||||
})
|
||||
|
||||
it.concurrent('should handle missing OpenAI API key', async () => {
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
OPENAI_API_KEY: undefined,
|
||||
},
|
||||
}))
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
|
||||
// Mock generateSearchEmbedding to throw missing API key error
|
||||
mockGenerateSearchEmbedding.mockRejectedValueOnce(new Error('OPENAI_API_KEY not configured'))
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
@@ -404,15 +465,9 @@ describe('Knowledge Search API Route', () => {
|
||||
it.concurrent('should handle database errors during search', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
mockDbChain.limit.mockRejectedValueOnce(new Error('Database error'))
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [{ embedding: mockEmbedding }],
|
||||
}),
|
||||
})
|
||||
// Mock the search handler to throw a database error
|
||||
mockHandleVectorOnlySearch.mockRejectedValueOnce(new Error('Database error'))
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
@@ -427,13 +482,10 @@ describe('Knowledge Search API Route', () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [], // Empty data array
|
||||
}),
|
||||
})
|
||||
// Mock generateSearchEmbedding to throw invalid response format error
|
||||
mockGenerateSearchEmbedding.mockRejectedValueOnce(
|
||||
new Error('Invalid response format from OpenAI embeddings API')
|
||||
)
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
@@ -451,7 +503,12 @@ describe('Knowledge Search API Route', () => {
|
||||
// Mock knowledge base access check to return success
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: mockKnowledgeBases[0],
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
@@ -499,7 +556,12 @@ describe('Knowledge Search API Route', () => {
|
||||
// Mock knowledge base access check to return success
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: mockKnowledgeBases[0],
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
@@ -556,7 +618,12 @@ describe('Knowledge Search API Route', () => {
|
||||
// Mock knowledge base access check to return success
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: mockKnowledgeBases[0],
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
@@ -581,4 +648,350 @@ describe('Knowledge Search API Route', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Optional Query Search', () => {
|
||||
const mockTagDefinitions = [
|
||||
{ tagSlot: 'tag1', displayName: 'category' },
|
||||
{ tagSlot: 'tag2', displayName: 'priority' },
|
||||
]
|
||||
|
||||
const mockTaggedResults = [
|
||||
{
|
||||
id: 'chunk-1',
|
||||
content: 'Tagged content 1',
|
||||
documentId: 'doc-1',
|
||||
chunkIndex: 0,
|
||||
tag1: 'api',
|
||||
tag2: 'high',
|
||||
distance: 0,
|
||||
knowledgeBaseId: 'kb-123',
|
||||
},
|
||||
{
|
||||
id: 'chunk-2',
|
||||
content: 'Tagged content 2',
|
||||
documentId: 'doc-2',
|
||||
chunkIndex: 1,
|
||||
tag1: 'docs',
|
||||
tag2: 'medium',
|
||||
distance: 0,
|
||||
knowledgeBaseId: 'kb-123',
|
||||
},
|
||||
]
|
||||
|
||||
it('should perform tag-only search without query', async () => {
|
||||
const tagOnlyData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
filters: {
|
||||
category: 'api',
|
||||
},
|
||||
topK: 10,
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
// Mock tag definitions queries for filter mapping and display mapping
|
||||
mockDbChain.limit
|
||||
.mockResolvedValueOnce(mockTagDefinitions) // Tag definitions for filter mapping
|
||||
.mockResolvedValueOnce(mockTagDefinitions) // Tag definitions for display mapping
|
||||
|
||||
// Mock the tag-only search handler
|
||||
mockHandleTagOnlySearch.mockResolvedValue(mockTaggedResults)
|
||||
|
||||
const req = createMockRequest('POST', tagOnlyData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
if (response.status !== 200) {
|
||||
console.log('Tag-only search test error:', data)
|
||||
}
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.data.results).toHaveLength(2)
|
||||
expect(data.data.results[0].similarity).toBe(1) // Perfect similarity for tag-only
|
||||
expect(data.data.query).toBe('') // Empty query
|
||||
expect(data.data.cost).toBeUndefined() // No cost for tag-only search
|
||||
expect(mockGenerateSearchEmbedding).not.toHaveBeenCalled() // No embedding API call
|
||||
expect(mockHandleTagOnlySearch).toHaveBeenCalledWith({
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: { category: 'api' }, // Note: When no tag definitions are found, it uses the original filter key
|
||||
})
|
||||
})
|
||||
|
||||
it('should perform query + tag combination search', async () => {
|
||||
const combinedData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
query: 'test search',
|
||||
filters: {
|
||||
category: 'api',
|
||||
},
|
||||
topK: 10,
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
// Mock tag definitions queries for filter mapping and display mapping
|
||||
mockDbChain.limit
|
||||
.mockResolvedValueOnce(mockTagDefinitions) // Tag definitions for filter mapping
|
||||
.mockResolvedValueOnce(mockTagDefinitions) // Tag definitions for display mapping
|
||||
|
||||
// Mock the tag + vector search handler
|
||||
mockHandleTagAndVectorSearch.mockResolvedValue(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [{ embedding: mockEmbedding }],
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', combinedData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
if (response.status !== 200) {
|
||||
console.log('Query+tag combination test error:', data)
|
||||
}
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.data.results).toHaveLength(2)
|
||||
expect(data.data.query).toBe('test search')
|
||||
expect(data.data.cost).toBeDefined() // Cost included for vector search
|
||||
expect(mockGenerateSearchEmbedding).toHaveBeenCalled() // Embedding API called
|
||||
expect(mockHandleTagAndVectorSearch).toHaveBeenCalledWith({
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: { category: 'api' }, // Note: When no tag definitions are found, it uses the original filter key
|
||||
queryVector: JSON.stringify(mockEmbedding),
|
||||
distanceThreshold: 1, // Single KB uses threshold of 1.0
|
||||
})
|
||||
})
|
||||
|
||||
it('should validate that either query or filters are provided', async () => {
|
||||
const emptyData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
topK: 10,
|
||||
}
|
||||
|
||||
const req = createMockRequest('POST', emptyData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Invalid request data')
|
||||
expect(data.details).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
message:
|
||||
'Please provide either a search query or tag filters to search your knowledge base',
|
||||
}),
|
||||
])
|
||||
)
|
||||
})
|
||||
|
||||
it('should validate that empty query with empty filters fails', async () => {
|
||||
const emptyFiltersData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
query: '',
|
||||
filters: {},
|
||||
topK: 10,
|
||||
}
|
||||
|
||||
const req = createMockRequest('POST', emptyFiltersData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Invalid request data')
|
||||
})
|
||||
|
||||
it('should handle empty tag values gracefully', async () => {
|
||||
// This simulates what happens when the frontend sends empty tag values
|
||||
// The tool transformation should filter out empty values, resulting in no filters
|
||||
const emptyTagValueData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
query: '',
|
||||
topK: 10,
|
||||
// This would result in no filters after tool transformation
|
||||
}
|
||||
|
||||
const req = createMockRequest('POST', emptyTagValueData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Invalid request data')
|
||||
expect(data.details).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
message:
|
||||
'Please provide either a search query or tag filters to search your knowledge base',
|
||||
}),
|
||||
])
|
||||
)
|
||||
})
|
||||
|
||||
it('should handle null values from frontend gracefully', async () => {
|
||||
// This simulates the exact scenario the user reported
|
||||
// Null values should be transformed to undefined and then trigger validation
|
||||
const nullValuesData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
topK: null,
|
||||
query: null,
|
||||
filters: null,
|
||||
}
|
||||
|
||||
const req = createMockRequest('POST', nullValuesData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Invalid request data')
|
||||
expect(data.details).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
message:
|
||||
'Please provide either a search query or tag filters to search your knowledge base',
|
||||
}),
|
||||
])
|
||||
)
|
||||
})
|
||||
|
||||
it('should perform query-only search (existing behavior)', async () => {
|
||||
const queryOnlyData = {
|
||||
knowledgeBaseIds: 'kb-123',
|
||||
query: 'test search query',
|
||||
topK: 10,
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockCheckKnowledgeBaseAccess.mockResolvedValue({
|
||||
hasAccess: true,
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [{ embedding: mockEmbedding }],
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', queryOnlyData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.data.results).toHaveLength(2)
|
||||
expect(data.data.query).toBe('test search query')
|
||||
expect(data.data.cost).toBeDefined() // Cost included for vector search
|
||||
expect(mockGenerateSearchEmbedding).toHaveBeenCalled() // Embedding API called
|
||||
})
|
||||
|
||||
it('should handle tag-only search with multiple knowledge bases', async () => {
|
||||
const multiKbTagData = {
|
||||
knowledgeBaseIds: ['kb-123', 'kb-456'],
|
||||
filters: {
|
||||
category: 'docs',
|
||||
priority: 'high',
|
||||
},
|
||||
topK: 10,
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockCheckKnowledgeBaseAccess
|
||||
.mockResolvedValueOnce({
|
||||
hasAccess: true,
|
||||
knowledgeBase: {
|
||||
id: 'kb-123',
|
||||
userId: 'user-123',
|
||||
name: 'Test KB',
|
||||
deletedAt: null,
|
||||
},
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
hasAccess: true,
|
||||
knowledgeBase: { id: 'kb-456', userId: 'user-123', name: 'Test KB 2' },
|
||||
})
|
||||
|
||||
// Reset all mocks before setting up specific behavior
|
||||
Object.values(mockDbChain).forEach((fn) => {
|
||||
if (typeof fn === 'function') {
|
||||
fn.mockClear().mockReturnThis()
|
||||
}
|
||||
})
|
||||
|
||||
// Create fresh mocks for multiple database calls needed for multi-KB tag search
|
||||
const mockTagDefsQuery1 = {
|
||||
...mockDbChain,
|
||||
limit: vi.fn().mockResolvedValue(mockTagDefinitions),
|
||||
}
|
||||
const mockTagSearchQuery = {
|
||||
...mockDbChain,
|
||||
limit: vi.fn().mockResolvedValue(mockTaggedResults),
|
||||
}
|
||||
const mockTagDefsQuery2 = {
|
||||
...mockDbChain,
|
||||
limit: vi.fn().mockResolvedValue(mockTagDefinitions),
|
||||
}
|
||||
const mockTagDefsQuery3 = {
|
||||
...mockDbChain,
|
||||
limit: vi.fn().mockResolvedValue(mockTagDefinitions),
|
||||
}
|
||||
|
||||
// Chain the mocks for: tag defs, search, display mapping KB1, display mapping KB2
|
||||
mockDbChain.select
|
||||
.mockReturnValueOnce(mockTagDefsQuery1)
|
||||
.mockReturnValueOnce(mockTagSearchQuery)
|
||||
.mockReturnValueOnce(mockTagDefsQuery2)
|
||||
.mockReturnValueOnce(mockTagDefsQuery3)
|
||||
|
||||
const req = createMockRequest('POST', multiKbTagData)
|
||||
const { POST } = await import('@/app/api/knowledge/search/route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.data.knowledgeBaseIds).toEqual(['kb-123', 'kb-456'])
|
||||
expect(mockGenerateSearchEmbedding).not.toHaveBeenCalled() // No embedding for tag-only
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,235 +1,61 @@
|
||||
import { and, eq, inArray, sql } from 'drizzle-orm'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { TAG_SLOTS } from '@/lib/constants/knowledge'
|
||||
import { retryWithExponentialBackoff } from '@/lib/documents/utils'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { estimateTokenCount } from '@/lib/tokenization/estimators'
|
||||
import { getUserId } from '@/app/api/auth/oauth/utils'
|
||||
import { checkKnowledgeBaseAccess } from '@/app/api/knowledge/utils'
|
||||
import { db } from '@/db'
|
||||
import { embedding, knowledgeBaseTagDefinitions } from '@/db/schema'
|
||||
import { knowledgeBaseTagDefinitions } from '@/db/schema'
|
||||
import { calculateCost } from '@/providers/utils'
|
||||
import {
|
||||
generateSearchEmbedding,
|
||||
getQueryStrategy,
|
||||
handleTagAndVectorSearch,
|
||||
handleTagOnlySearch,
|
||||
handleVectorOnlySearch,
|
||||
type SearchResult,
|
||||
} from './utils'
|
||||
|
||||
const logger = createLogger('VectorSearchAPI')
|
||||
|
||||
function getTagFilters(filters: Record<string, string>, embedding: any) {
|
||||
return Object.entries(filters).map(([key, value]) => {
|
||||
// Handle OR logic within same tag
|
||||
const values = value.includes('|OR|') ? value.split('|OR|') : [value]
|
||||
logger.debug(`[getTagFilters] Processing ${key}="${value}" -> values:`, values)
|
||||
|
||||
const getColumnForKey = (key: string) => {
|
||||
switch (key) {
|
||||
case 'tag1':
|
||||
return embedding.tag1
|
||||
case 'tag2':
|
||||
return embedding.tag2
|
||||
case 'tag3':
|
||||
return embedding.tag3
|
||||
case 'tag4':
|
||||
return embedding.tag4
|
||||
case 'tag5':
|
||||
return embedding.tag5
|
||||
case 'tag6':
|
||||
return embedding.tag6
|
||||
case 'tag7':
|
||||
return embedding.tag7
|
||||
default:
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const column = getColumnForKey(key)
|
||||
if (!column) return sql`1=1` // No-op for unknown keys
|
||||
|
||||
if (values.length === 1) {
|
||||
// Single value - simple equality
|
||||
logger.debug(`[getTagFilters] Single value filter: ${key} = ${values[0]}`)
|
||||
return sql`LOWER(${column}) = LOWER(${values[0]})`
|
||||
}
|
||||
// Multiple values - OR logic
|
||||
logger.debug(`[getTagFilters] OR filter: ${key} IN (${values.join(', ')})`)
|
||||
const orConditions = values.map((v) => sql`LOWER(${column}) = LOWER(${v})`)
|
||||
return sql`(${sql.join(orConditions, sql` OR `)})`
|
||||
const VectorSearchSchema = z
|
||||
.object({
|
||||
knowledgeBaseIds: z.union([
|
||||
z.string().min(1, 'Knowledge base ID is required'),
|
||||
z.array(z.string().min(1)).min(1, 'At least one knowledge base ID is required'),
|
||||
]),
|
||||
query: z
|
||||
.string()
|
||||
.optional()
|
||||
.nullable()
|
||||
.transform((val) => val || undefined),
|
||||
topK: z
|
||||
.number()
|
||||
.min(1)
|
||||
.max(100)
|
||||
.optional()
|
||||
.nullable()
|
||||
.default(10)
|
||||
.transform((val) => val ?? 10),
|
||||
filters: z
|
||||
.record(z.string())
|
||||
.optional()
|
||||
.nullable()
|
||||
.transform((val) => val || undefined), // Allow dynamic filter keys (display names)
|
||||
})
|
||||
}
|
||||
|
||||
class APIError extends Error {
|
||||
public status: number
|
||||
|
||||
constructor(message: string, status: number) {
|
||||
super(message)
|
||||
this.name = 'APIError'
|
||||
this.status = status
|
||||
}
|
||||
}
|
||||
|
||||
const VectorSearchSchema = z.object({
|
||||
knowledgeBaseIds: z.union([
|
||||
z.string().min(1, 'Knowledge base ID is required'),
|
||||
z.array(z.string().min(1)).min(1, 'At least one knowledge base ID is required'),
|
||||
]),
|
||||
query: z.string().min(1, 'Search query is required'),
|
||||
topK: z.number().min(1).max(100).default(10),
|
||||
filters: z.record(z.string()).optional(), // Allow dynamic filter keys (display names)
|
||||
})
|
||||
|
||||
async function generateSearchEmbedding(query: string): Promise<number[]> {
|
||||
const openaiApiKey = env.OPENAI_API_KEY
|
||||
if (!openaiApiKey) {
|
||||
throw new Error('OPENAI_API_KEY not configured')
|
||||
}
|
||||
|
||||
try {
|
||||
const embedding = await retryWithExponentialBackoff(
|
||||
async () => {
|
||||
const response = await fetch('https://api.openai.com/v1/embeddings', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: `Bearer ${openaiApiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
input: query,
|
||||
model: 'text-embedding-3-small',
|
||||
encoding_format: 'float',
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
const error = new APIError(
|
||||
`OpenAI API error: ${response.status} ${response.statusText} - ${errorText}`,
|
||||
response.status
|
||||
)
|
||||
throw error
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (!data.data || !Array.isArray(data.data) || data.data.length === 0) {
|
||||
throw new Error('Invalid response format from OpenAI embeddings API')
|
||||
}
|
||||
|
||||
return data.data[0].embedding
|
||||
},
|
||||
{
|
||||
maxRetries: 5,
|
||||
initialDelayMs: 1000,
|
||||
maxDelayMs: 30000,
|
||||
backoffMultiplier: 2,
|
||||
}
|
||||
)
|
||||
|
||||
return embedding
|
||||
} catch (error) {
|
||||
logger.error('Failed to generate search embedding:', error)
|
||||
throw new Error(
|
||||
`Embedding generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
function getQueryStrategy(kbCount: number, topK: number) {
|
||||
const useParallel = kbCount > 4 || (kbCount > 2 && topK > 50)
|
||||
const distanceThreshold = kbCount > 3 ? 0.8 : 1.0
|
||||
const parallelLimit = Math.ceil(topK / kbCount) + 5
|
||||
|
||||
return {
|
||||
useParallel,
|
||||
distanceThreshold,
|
||||
parallelLimit,
|
||||
singleQueryOptimized: kbCount <= 2,
|
||||
}
|
||||
}
|
||||
|
||||
async function executeParallelQueries(
|
||||
knowledgeBaseIds: string[],
|
||||
queryVector: string,
|
||||
topK: number,
|
||||
distanceThreshold: number,
|
||||
filters?: Record<string, string>
|
||||
) {
|
||||
const parallelLimit = Math.ceil(topK / knowledgeBaseIds.length) + 5
|
||||
|
||||
const queryPromises = knowledgeBaseIds.map(async (kbId) => {
|
||||
const results = await db
|
||||
.select({
|
||||
id: embedding.id,
|
||||
content: embedding.content,
|
||||
documentId: embedding.documentId,
|
||||
chunkIndex: embedding.chunkIndex,
|
||||
tag1: embedding.tag1,
|
||||
tag2: embedding.tag2,
|
||||
tag3: embedding.tag3,
|
||||
tag4: embedding.tag4,
|
||||
tag5: embedding.tag5,
|
||||
tag6: embedding.tag6,
|
||||
tag7: embedding.tag7,
|
||||
distance: sql<number>`${embedding.embedding} <=> ${queryVector}::vector`.as('distance'),
|
||||
knowledgeBaseId: embedding.knowledgeBaseId,
|
||||
})
|
||||
.from(embedding)
|
||||
.where(
|
||||
and(
|
||||
eq(embedding.knowledgeBaseId, kbId),
|
||||
eq(embedding.enabled, true),
|
||||
sql`${embedding.embedding} <=> ${queryVector}::vector < ${distanceThreshold}`,
|
||||
...(filters ? getTagFilters(filters, embedding) : [])
|
||||
)
|
||||
)
|
||||
.orderBy(sql`${embedding.embedding} <=> ${queryVector}::vector`)
|
||||
.limit(parallelLimit)
|
||||
|
||||
return results
|
||||
})
|
||||
|
||||
const parallelResults = await Promise.all(queryPromises)
|
||||
return parallelResults.flat()
|
||||
}
|
||||
|
||||
async function executeSingleQuery(
|
||||
knowledgeBaseIds: string[],
|
||||
queryVector: string,
|
||||
topK: number,
|
||||
distanceThreshold: number,
|
||||
filters?: Record<string, string>
|
||||
) {
|
||||
logger.debug(`[executeSingleQuery] Called with filters:`, filters)
|
||||
return await db
|
||||
.select({
|
||||
id: embedding.id,
|
||||
content: embedding.content,
|
||||
documentId: embedding.documentId,
|
||||
chunkIndex: embedding.chunkIndex,
|
||||
tag1: embedding.tag1,
|
||||
tag2: embedding.tag2,
|
||||
tag3: embedding.tag3,
|
||||
tag4: embedding.tag4,
|
||||
tag5: embedding.tag5,
|
||||
tag6: embedding.tag6,
|
||||
tag7: embedding.tag7,
|
||||
distance: sql<number>`${embedding.embedding} <=> ${queryVector}::vector`.as('distance'),
|
||||
knowledgeBaseId: embedding.knowledgeBaseId,
|
||||
})
|
||||
.from(embedding)
|
||||
.where(
|
||||
and(
|
||||
inArray(embedding.knowledgeBaseId, knowledgeBaseIds),
|
||||
eq(embedding.enabled, true),
|
||||
sql`${embedding.embedding} <=> ${queryVector}::vector < ${distanceThreshold}`,
|
||||
...(filters ? getTagFilters(filters, embedding) : [])
|
||||
)
|
||||
)
|
||||
.orderBy(sql`${embedding.embedding} <=> ${queryVector}::vector`)
|
||||
.limit(topK)
|
||||
}
|
||||
|
||||
function mergeAndRankResults(results: any[], topK: number) {
|
||||
return results.sort((a, b) => a.distance - b.distance).slice(0, topK)
|
||||
}
|
||||
.refine(
|
||||
(data) => {
|
||||
// Ensure at least query or filters are provided
|
||||
const hasQuery = data.query && data.query.trim().length > 0
|
||||
const hasFilters = data.filters && Object.keys(data.filters).length > 0
|
||||
return hasQuery || hasFilters
|
||||
},
|
||||
{
|
||||
message: 'Please provide either a search query or tag filters to search your knowledge base',
|
||||
}
|
||||
)
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
@@ -317,8 +143,9 @@ export async function POST(request: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
// Generate query embedding in parallel with access checks
|
||||
const queryEmbedding = await generateSearchEmbedding(validatedData.query)
|
||||
// Generate query embedding only if query is provided
|
||||
const hasQuery = validatedData.query && validatedData.query.trim().length > 0
|
||||
const queryEmbedding = hasQuery ? await generateSearchEmbedding(validatedData.query!) : null
|
||||
|
||||
// Check if any requested knowledge bases were not accessible
|
||||
const inaccessibleKbIds = knowledgeBaseIds.filter((id) => !accessibleKbIds.includes(id))
|
||||
@@ -330,46 +157,67 @@ export async function POST(request: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
// Adaptive query strategy based on accessible KB count and parameters
|
||||
const strategy = getQueryStrategy(accessibleKbIds.length, validatedData.topK)
|
||||
const queryVector = JSON.stringify(queryEmbedding)
|
||||
let results: SearchResult[]
|
||||
|
||||
let results: any[]
|
||||
const hasFilters = mappedFilters && Object.keys(mappedFilters).length > 0
|
||||
|
||||
if (strategy.useParallel) {
|
||||
// Execute parallel queries for better performance with many KBs
|
||||
logger.debug(`[${requestId}] Executing parallel queries with filters:`, mappedFilters)
|
||||
const parallelResults = await executeParallelQueries(
|
||||
accessibleKbIds,
|
||||
if (!hasQuery && hasFilters) {
|
||||
// Tag-only search without vector similarity
|
||||
logger.debug(`[${requestId}] Executing tag-only search with filters:`, mappedFilters)
|
||||
results = await handleTagOnlySearch({
|
||||
knowledgeBaseIds: accessibleKbIds,
|
||||
topK: validatedData.topK,
|
||||
filters: mappedFilters,
|
||||
})
|
||||
} else if (hasQuery && hasFilters) {
|
||||
// Tag + Vector search
|
||||
logger.debug(`[${requestId}] Executing tag + vector search with filters:`, mappedFilters)
|
||||
const strategy = getQueryStrategy(accessibleKbIds.length, validatedData.topK)
|
||||
const queryVector = JSON.stringify(queryEmbedding)
|
||||
|
||||
results = await handleTagAndVectorSearch({
|
||||
knowledgeBaseIds: accessibleKbIds,
|
||||
topK: validatedData.topK,
|
||||
filters: mappedFilters,
|
||||
queryVector,
|
||||
validatedData.topK,
|
||||
strategy.distanceThreshold,
|
||||
mappedFilters
|
||||
)
|
||||
results = mergeAndRankResults(parallelResults, validatedData.topK)
|
||||
distanceThreshold: strategy.distanceThreshold,
|
||||
})
|
||||
} else if (hasQuery && !hasFilters) {
|
||||
// Vector-only search
|
||||
logger.debug(`[${requestId}] Executing vector-only search`)
|
||||
const strategy = getQueryStrategy(accessibleKbIds.length, validatedData.topK)
|
||||
const queryVector = JSON.stringify(queryEmbedding)
|
||||
|
||||
results = await handleVectorOnlySearch({
|
||||
knowledgeBaseIds: accessibleKbIds,
|
||||
topK: validatedData.topK,
|
||||
queryVector,
|
||||
distanceThreshold: strategy.distanceThreshold,
|
||||
})
|
||||
} else {
|
||||
// Execute single optimized query for fewer KBs
|
||||
logger.debug(`[${requestId}] Executing single query with filters:`, mappedFilters)
|
||||
results = await executeSingleQuery(
|
||||
accessibleKbIds,
|
||||
queryVector,
|
||||
validatedData.topK,
|
||||
strategy.distanceThreshold,
|
||||
mappedFilters
|
||||
// This should never happen due to schema validation, but just in case
|
||||
return NextResponse.json(
|
||||
{
|
||||
error:
|
||||
'Please provide either a search query or tag filters to search your knowledge base',
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Calculate cost for the embedding (with fallback if calculation fails)
|
||||
let cost = null
|
||||
let tokenCount = null
|
||||
try {
|
||||
tokenCount = estimateTokenCount(validatedData.query, 'openai')
|
||||
cost = calculateCost('text-embedding-3-small', tokenCount.count, 0, false)
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Failed to calculate cost for search query`, {
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
// Continue without cost information rather than failing the search
|
||||
if (hasQuery) {
|
||||
try {
|
||||
tokenCount = estimateTokenCount(validatedData.query!, 'openai')
|
||||
cost = calculateCost('text-embedding-3-small', tokenCount.count, 0, false)
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Failed to calculate cost for search query`, {
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
// Continue without cost information rather than failing the search
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch tag definitions for display name mapping (reuse the same fetch from filtering)
|
||||
@@ -412,12 +260,13 @@ export async function POST(request: NextRequest) {
|
||||
const tags: Record<string, any> = {}
|
||||
|
||||
TAG_SLOTS.forEach((slot) => {
|
||||
if (result[slot]) {
|
||||
const tagValue = (result as any)[slot]
|
||||
if (tagValue) {
|
||||
const displayName = kbTagMap[slot] || slot
|
||||
logger.debug(
|
||||
`[${requestId}] Mapping ${slot}="${result[slot]}" -> "${displayName}"="${result[slot]}"`
|
||||
`[${requestId}] Mapping ${slot}="${tagValue}" -> "${displayName}"="${tagValue}"`
|
||||
)
|
||||
tags[displayName] = result[slot]
|
||||
tags[displayName] = tagValue
|
||||
}
|
||||
})
|
||||
|
||||
@@ -427,10 +276,10 @@ export async function POST(request: NextRequest) {
|
||||
documentId: result.documentId,
|
||||
chunkIndex: result.chunkIndex,
|
||||
tags, // Clean display name mapped tags
|
||||
similarity: 1 - result.distance,
|
||||
similarity: hasQuery ? 1 - result.distance : 1, // Perfect similarity for tag-only searches
|
||||
}
|
||||
}),
|
||||
query: validatedData.query,
|
||||
query: validatedData.query || '',
|
||||
knowledgeBaseIds: accessibleKbIds,
|
||||
knowledgeBaseId: accessibleKbIds[0],
|
||||
topK: validatedData.topK,
|
||||
|
||||
143
apps/sim/app/api/knowledge/search/utils.test.ts
Normal file
143
apps/sim/app/api/knowledge/search/utils.test.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
/**
|
||||
* Tests for knowledge search utility functions
|
||||
* Focuses on testing core functionality with simplified mocking
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('drizzle-orm')
|
||||
vi.mock('@/lib/logs/console/logger')
|
||||
vi.mock('@/db')
|
||||
|
||||
import { handleTagAndVectorSearch, handleTagOnlySearch, handleVectorOnlySearch } from './utils'
|
||||
|
||||
describe('Knowledge Search Utils', () => {
|
||||
describe('handleTagOnlySearch', () => {
|
||||
it('should throw error when no filters provided', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: {},
|
||||
}
|
||||
|
||||
await expect(handleTagOnlySearch(params)).rejects.toThrow(
|
||||
'Tag filters are required for tag-only search'
|
||||
)
|
||||
})
|
||||
|
||||
it('should accept valid parameters for tag-only search', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: { tag1: 'api' },
|
||||
}
|
||||
|
||||
// This test validates the function accepts the right parameters
|
||||
// The actual database interaction is tested via route tests
|
||||
expect(params.knowledgeBaseIds).toEqual(['kb-123'])
|
||||
expect(params.topK).toBe(10)
|
||||
expect(params.filters).toEqual({ tag1: 'api' })
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleVectorOnlySearch', () => {
|
||||
it('should throw error when queryVector not provided', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
distanceThreshold: 0.8,
|
||||
}
|
||||
|
||||
await expect(handleVectorOnlySearch(params)).rejects.toThrow(
|
||||
'Query vector and distance threshold are required for vector-only search'
|
||||
)
|
||||
})
|
||||
|
||||
it('should throw error when distanceThreshold not provided', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
queryVector: JSON.stringify([0.1, 0.2, 0.3]),
|
||||
}
|
||||
|
||||
await expect(handleVectorOnlySearch(params)).rejects.toThrow(
|
||||
'Query vector and distance threshold are required for vector-only search'
|
||||
)
|
||||
})
|
||||
|
||||
it('should accept valid parameters for vector-only search', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
queryVector: JSON.stringify([0.1, 0.2, 0.3]),
|
||||
distanceThreshold: 0.8,
|
||||
}
|
||||
|
||||
// This test validates the function accepts the right parameters
|
||||
expect(params.knowledgeBaseIds).toEqual(['kb-123'])
|
||||
expect(params.topK).toBe(10)
|
||||
expect(params.queryVector).toBe(JSON.stringify([0.1, 0.2, 0.3]))
|
||||
expect(params.distanceThreshold).toBe(0.8)
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleTagAndVectorSearch', () => {
|
||||
it('should throw error when no filters provided', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: {},
|
||||
queryVector: JSON.stringify([0.1, 0.2, 0.3]),
|
||||
distanceThreshold: 0.8,
|
||||
}
|
||||
|
||||
await expect(handleTagAndVectorSearch(params)).rejects.toThrow(
|
||||
'Tag filters are required for tag and vector search'
|
||||
)
|
||||
})
|
||||
|
||||
it('should throw error when queryVector not provided', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: { tag1: 'api' },
|
||||
distanceThreshold: 0.8,
|
||||
}
|
||||
|
||||
await expect(handleTagAndVectorSearch(params)).rejects.toThrow(
|
||||
'Query vector and distance threshold are required for tag and vector search'
|
||||
)
|
||||
})
|
||||
|
||||
it('should throw error when distanceThreshold not provided', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: { tag1: 'api' },
|
||||
queryVector: JSON.stringify([0.1, 0.2, 0.3]),
|
||||
}
|
||||
|
||||
await expect(handleTagAndVectorSearch(params)).rejects.toThrow(
|
||||
'Query vector and distance threshold are required for tag and vector search'
|
||||
)
|
||||
})
|
||||
|
||||
it('should accept valid parameters for tag and vector search', async () => {
|
||||
const params = {
|
||||
knowledgeBaseIds: ['kb-123'],
|
||||
topK: 10,
|
||||
filters: { tag1: 'api' },
|
||||
queryVector: JSON.stringify([0.1, 0.2, 0.3]),
|
||||
distanceThreshold: 0.8,
|
||||
}
|
||||
|
||||
// This test validates the function accepts the right parameters
|
||||
expect(params.knowledgeBaseIds).toEqual(['kb-123'])
|
||||
expect(params.topK).toBe(10)
|
||||
expect(params.filters).toEqual({ tag1: 'api' })
|
||||
expect(params.queryVector).toBe(JSON.stringify([0.1, 0.2, 0.3]))
|
||||
expect(params.distanceThreshold).toBe(0.8)
|
||||
})
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user