mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-11 07:58:06 -05:00
Compare commits
147 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7192cdef6f | ||
|
|
8a9bc4e929 | ||
|
|
d65bdaf546 | ||
|
|
348b524d86 | ||
|
|
0361397879 | ||
|
|
ff2b1d33c8 | ||
|
|
db22e26662 | ||
|
|
38f5aae0fb | ||
|
|
36eb04dab5 | ||
|
|
f8000a747a | ||
|
|
9a954d1830 | ||
|
|
f16d759d8d | ||
|
|
dad72e3100 | ||
|
|
4285b36a42 | ||
|
|
6967ac0417 | ||
|
|
027614f373 | ||
|
|
b4b6895efa | ||
|
|
5b7c07c283 | ||
|
|
db4ad80a4c | ||
|
|
0023e8df80 | ||
|
|
c81f881855 | ||
|
|
2f726fa9f3 | ||
|
|
4d3dee7f0f | ||
|
|
7860894007 | ||
|
|
52ffc39194 | ||
|
|
f666ccad43 | ||
|
|
95dfe9e6d2 | ||
|
|
91a4c6d588 | ||
|
|
5f9bfdde06 | ||
|
|
0bd480cfe4 | ||
|
|
1afdeed244 | ||
|
|
e216b176ac | ||
|
|
f0e605f5fa | ||
|
|
4c85e34d2b | ||
|
|
7e174f239a | ||
|
|
af0dcf7efd | ||
|
|
617ea25ab4 | ||
|
|
b7d536b7bc | ||
|
|
e83745fcaf | ||
|
|
3887733da5 | ||
|
|
614d826217 | ||
|
|
a0a4b21000 | ||
|
|
1f6dcd8465 | ||
|
|
30538d9380 | ||
|
|
6149489483 | ||
|
|
9ede001202 | ||
|
|
209d822ce9 | ||
|
|
31d9e2a4a8 | ||
|
|
e5080febd5 | ||
|
|
529fd44405 | ||
|
|
717b4dd2ff | ||
|
|
8aa86e0e9d | ||
|
|
148f0a6da3 | ||
|
|
14f422ef5e | ||
|
|
f27cb18883 | ||
|
|
e102b6cf17 | ||
|
|
50595c5c49 | ||
|
|
3c61bc167a | ||
|
|
ef681d8a04 | ||
|
|
df4971a876 | ||
|
|
f269fc9776 | ||
|
|
c65384d715 | ||
|
|
24e19a83a5 | ||
|
|
5c487f59f9 | ||
|
|
c45da7b93e | ||
|
|
cfc261d646 | ||
|
|
763d0de5d5 | ||
|
|
eade867d98 | ||
|
|
4a26b061a4 | ||
|
|
8176b37d89 | ||
|
|
610ea0b689 | ||
|
|
3c1914c566 | ||
|
|
218041dba3 | ||
|
|
a2827a52c0 | ||
|
|
6ca8311a76 | ||
|
|
37c4f835dd | ||
|
|
0b01d4bc78 | ||
|
|
a5883171f9 | ||
|
|
c2f786e40b | ||
|
|
3421eaec27 | ||
|
|
f6b25bf727 | ||
|
|
aa343fb62f | ||
|
|
cc249c2dd0 | ||
|
|
f1734766c3 | ||
|
|
e37f362459 | ||
|
|
bb9291aecc | ||
|
|
5dc3ba3379 | ||
|
|
684a8020d4 | ||
|
|
9097c520a5 | ||
|
|
bacb6f3831 | ||
|
|
2a0224f6ae | ||
|
|
6cb15a620a | ||
|
|
c7b77bd303 | ||
|
|
c0b8e1aca3 | ||
|
|
82cb609bb7 | ||
|
|
07cd6f9e49 | ||
|
|
c53e950269 | ||
|
|
2ce68aedf5 | ||
|
|
88282378ea | ||
|
|
1b3b85f4c4 | ||
|
|
4b60bba992 | ||
|
|
4aaa68d21b | ||
|
|
776ae06671 | ||
|
|
ccf5c2f6d8 | ||
|
|
02c41127c2 | ||
|
|
d1fe209d29 | ||
|
|
ee66c15ed9 | ||
|
|
d9046042af | ||
|
|
4fffc66ee0 | ||
|
|
a3159bcebc | ||
|
|
2354909ef9 | ||
|
|
caccb61362 | ||
|
|
3c7e7949d9 | ||
|
|
537fbdb2ce | ||
|
|
3460a7b39e | ||
|
|
d75751bbe6 | ||
|
|
2c9a4f4c3e | ||
|
|
767b63c57d | ||
|
|
b58d8773c9 | ||
|
|
3af1a6e100 | ||
|
|
840a028f92 | ||
|
|
7bc644a478 | ||
|
|
70a51006f6 | ||
|
|
17513d77ea | ||
|
|
6dc8b17bed | ||
|
|
70a5f4ec31 | ||
|
|
b9fa50b4de | ||
|
|
97021559cc | ||
|
|
76c0c56689 | ||
|
|
850447a604 | ||
|
|
0f21fbf705 | ||
|
|
3e45d793f1 | ||
|
|
5167deb75c | ||
|
|
02b7899861 | ||
|
|
7e4669108f | ||
|
|
ede224a15f | ||
|
|
5cf7d025db | ||
|
|
b4eda8fe6a | ||
|
|
60e2e6c735 | ||
|
|
c635b19548 | ||
|
|
f3bc1fc250 | ||
|
|
0bf9ce0b9e | ||
|
|
e22f0123a3 | ||
|
|
231bfb9add | ||
|
|
cac9ad250d | ||
|
|
78b5ae7b3d | ||
|
|
016cd6750c |
74
.github/CONTRIBUTING.md
vendored
74
.github/CONTRIBUTING.md
vendored
@@ -15,8 +15,6 @@ Thank you for your interest in contributing to Sim Studio! Our goal is to provid
|
||||
- [Commit Message Guidelines](#commit-message-guidelines)
|
||||
- [Local Development Setup](#local-development-setup)
|
||||
- [Adding New Blocks and Tools](#adding-new-blocks-and-tools)
|
||||
- [Local Storage Mode](#local-storage-mode)
|
||||
- [Standalone Build](#standalone-build)
|
||||
- [License](#license)
|
||||
- [Contributor License Agreement (CLA)](#contributor-license-agreement-cla)
|
||||
|
||||
@@ -57,7 +55,7 @@ We strive to keep our workflow as simple as possible. To contribute:
|
||||
```
|
||||
|
||||
7. **Create a Pull Request**
|
||||
Open a pull request against the `main` branch on GitHub. Please provide a clear description of the changes and reference any relevant issues (e.g., `fixes #123`).
|
||||
Open a pull request against the `staging` branch on GitHub. Please provide a clear description of the changes and reference any relevant issues (e.g., `fixes #123`).
|
||||
|
||||
---
|
||||
|
||||
@@ -85,7 +83,7 @@ If you discover a bug or have a feature request, please open an issue in our Git
|
||||
Before creating a pull request:
|
||||
|
||||
- **Ensure Your Branch Is Up-to-Date:**
|
||||
Rebase your branch onto the latest `main` branch to prevent merge conflicts.
|
||||
Rebase your branch onto the latest `staging` branch to prevent merge conflicts.
|
||||
- **Follow the Guidelines:**
|
||||
Make sure your changes are well-tested, follow our coding standards, and include relevant documentation if necessary.
|
||||
|
||||
@@ -209,13 +207,14 @@ Dev Containers provide a consistent and easy-to-use development environment:
|
||||
|
||||
3. **Start Developing:**
|
||||
|
||||
- Run `bun run dev` in the terminal or use the `sim-start` alias
|
||||
- Run `bun run dev:full` in the terminal or use the `sim-start` alias
|
||||
- This starts both the main application and the realtime socket server
|
||||
- All dependencies and configurations are automatically set up
|
||||
- Your changes will be automatically hot-reloaded
|
||||
|
||||
4. **GitHub Codespaces:**
|
||||
- This setup also works with GitHub Codespaces if you prefer development in the browser
|
||||
- Just click "Code" → "Codespaces" → "Create codespace on main"
|
||||
- Just click "Code" → "Codespaces" → "Create codespace on staging"
|
||||
|
||||
### Option 4: Manual Setup
|
||||
|
||||
@@ -246,9 +245,11 @@ If you prefer not to use Docker or Dev Containers:
|
||||
4. **Run the Development Server:**
|
||||
|
||||
```bash
|
||||
bun run dev
|
||||
bun run dev:full
|
||||
```
|
||||
|
||||
This command starts both the main application and the realtime socket server required for full functionality.
|
||||
|
||||
5. **Make Your Changes and Test Locally.**
|
||||
|
||||
### Email Template Development
|
||||
@@ -379,7 +380,18 @@ In addition, you will need to update the registries:
|
||||
provider: 'pinecone', // ID of the OAuth provider
|
||||
|
||||
params: {
|
||||
// Tool parameters
|
||||
parameterName: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm', // Controls parameter visibility
|
||||
description: 'Description of the parameter',
|
||||
},
|
||||
optionalParam: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-only',
|
||||
description: 'Optional parameter only user can set',
|
||||
},
|
||||
},
|
||||
request: {
|
||||
// Request configuration
|
||||
@@ -429,11 +441,57 @@ Maintaining consistent naming across the codebase is critical for auto-generatio
|
||||
- **Tool Exports:** Should be named `{toolName}Tool` (e.g., `fetchTool`)
|
||||
- **Tool IDs:** Should follow the format `{provider}_{tool_name}` (e.g., `pinecone_fetch`)
|
||||
|
||||
### Parameter Visibility System
|
||||
|
||||
Sim Studio implements a sophisticated parameter visibility system that controls how parameters are exposed to users and LLMs in agent workflows. Each parameter can have one of four visibility levels:
|
||||
|
||||
| Visibility | User Sees | LLM Sees | How It Gets Set |
|
||||
|-------------|-----------|----------|--------------------------------|
|
||||
| `user-only` | ✅ Yes | ❌ No | User provides in UI |
|
||||
| `user-or-llm` | ✅ Yes | ✅ Yes | User provides OR LLM generates |
|
||||
| `llm-only` | ❌ No | ✅ Yes | LLM generates only |
|
||||
| `hidden` | ❌ No | ❌ No | Application injects at runtime |
|
||||
|
||||
#### Visibility Guidelines
|
||||
|
||||
- **`user-or-llm`**: Use for core parameters that can be provided by users or intelligently filled by the LLM (e.g., search queries, email subjects)
|
||||
- **`user-only`**: Use for configuration parameters, API keys, and settings that only users should control (e.g., number of results, authentication credentials)
|
||||
- **`llm-only`**: Use for computed values that the LLM should handle internally (e.g., dynamic calculations, contextual data)
|
||||
- **`hidden`**: Use for system-level parameters injected at runtime (e.g., OAuth tokens, internal identifiers)
|
||||
|
||||
#### Example Implementation
|
||||
|
||||
```typescript
|
||||
params: {
|
||||
query: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm', // User can provide or LLM can generate
|
||||
description: 'Search query to execute',
|
||||
},
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only', // Only user provides this
|
||||
description: 'API key for authentication',
|
||||
},
|
||||
internalId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'hidden', // System provides this at runtime
|
||||
description: 'Internal tracking identifier',
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
This visibility system ensures clean user interfaces while maintaining full flexibility for LLM-driven workflows.
|
||||
|
||||
### Guidelines & Best Practices
|
||||
|
||||
- **Code Style:** Follow the project's ESLint and Prettier configurations. Use meaningful variable names and small, focused functions.
|
||||
- **Documentation:** Clearly document the purpose, inputs, outputs, and any special behavior for your block/tool.
|
||||
- **Error Handling:** Implement robust error handling and provide user-friendly error messages.
|
||||
- **Parameter Visibility:** Always specify the appropriate visibility level for each parameter to ensure proper UI behavior and LLM integration.
|
||||
- **Testing:** Add unit or integration tests to verify your changes when possible.
|
||||
- **Commit Changes:** Update all related components and registries, and describe your changes in your pull request.
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
bun lint
|
||||
bunx lint-staged
|
||||
22
README.md
22
README.md
@@ -87,6 +87,7 @@ docker compose -f docker-compose.prod.yml up -d
|
||||
1. Open VS Code with the [Remote - Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
|
||||
2. Open the project and click "Reopen in Container" when prompted
|
||||
3. Run `bun run dev:full` in the terminal or use the `sim-start` alias
|
||||
- This starts both the main application and the realtime socket server
|
||||
|
||||
### Option 4: Manual Setup
|
||||
|
||||
@@ -113,24 +114,27 @@ bunx drizzle-kit push
|
||||
|
||||
4. Start the development servers:
|
||||
|
||||
Next.js app:
|
||||
**Recommended approach - run both servers together (from project root):**
|
||||
|
||||
```bash
|
||||
bun run dev:full
|
||||
```
|
||||
|
||||
This starts both the main Next.js application and the realtime socket server required for full functionality.
|
||||
|
||||
**Alternative - run servers separately:**
|
||||
|
||||
Next.js app (from project root):
|
||||
```bash
|
||||
bun run dev
|
||||
```
|
||||
|
||||
Start the realtime server:
|
||||
|
||||
Realtime socket server (from `apps/sim` directory in a separate terminal):
|
||||
```bash
|
||||
cd apps/sim
|
||||
bun run dev:sockets
|
||||
```
|
||||
|
||||
Run both together (recommended):
|
||||
|
||||
```bash
|
||||
bun run dev:full
|
||||
```
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- **Framework**: [Next.js](https://nextjs.org/) (App Router)
|
||||
|
||||
@@ -88,9 +88,8 @@ For security and performance reasons, function execution has certain limitations
|
||||
|
||||
### Outputs
|
||||
|
||||
- **Result**: The value returned by your function
|
||||
- **Standard Output**: Any console output from your function
|
||||
- **Execution Time**: The time taken to execute your function (in milliseconds)
|
||||
- **result**: The value returned by your function
|
||||
- **stdout**: Any console output from your function
|
||||
|
||||
## Example Usage
|
||||
|
||||
|
||||
@@ -115,14 +115,9 @@ Headers are configured as key-value pairs:
|
||||
</Tab>
|
||||
<Tab>
|
||||
<ul className="list-disc space-y-2 pl-6">
|
||||
<li>
|
||||
<strong>response</strong>: Complete response object containing:
|
||||
<ul className="list-disc space-y-1 pl-6 mt-2">
|
||||
<li><strong>data</strong>: The response body data</li>
|
||||
<li><strong>status</strong>: HTTP status code</li>
|
||||
<li><strong>headers</strong>: Response headers</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><strong>data</strong>: The response body data</li>
|
||||
<li><strong>status</strong>: HTTP status code</li>
|
||||
<li><strong>headers</strong>: Response headers</li>
|
||||
</ul>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
@@ -66,17 +66,17 @@ Define the data to pass to the child workflow:
|
||||
|
||||
- **Single Variable Input**: Select a variable or block output to pass to the child workflow
|
||||
- **Variable References**: Use `<variable.name>` to reference workflow variables
|
||||
- **Block References**: Use `<blockName.response.field>` to reference outputs from previous blocks
|
||||
- **Automatic Mapping**: The selected data is automatically available as `start.response.input` in the child workflow
|
||||
- **Block References**: Use `<blockName.field>` to reference outputs from previous blocks
|
||||
- **Automatic Mapping**: The selected data is automatically available as `start.input` in the child workflow
|
||||
- **Optional**: The input field is optional - child workflows can run without input data
|
||||
- **Type Preservation**: Variable types (strings, numbers, objects, etc.) are preserved when passed to the child workflow
|
||||
|
||||
### Examples of Input References
|
||||
|
||||
- `<variable.customerData>` - Pass a workflow variable
|
||||
- `<dataProcessor.response.result>` - Pass the result from a previous block
|
||||
- `<start.response.input>` - Pass the original workflow input
|
||||
- `<apiCall.response.data.user>` - Pass a specific field from an API response
|
||||
- `<dataProcessor.result>` - Pass the result from a previous block
|
||||
- `<start.input>` - Pass the original workflow input
|
||||
- `<apiCall.data.user>` - Pass a specific field from an API response
|
||||
|
||||
### Execution Context
|
||||
|
||||
@@ -109,7 +109,7 @@ To prevent infinite recursion and ensure system stability, the Workflow block in
|
||||
<strong>Workflow ID</strong>: The identifier of the workflow to execute
|
||||
</li>
|
||||
<li>
|
||||
<strong>Input Variable</strong>: Variable or block reference to pass to the child workflow (e.g., `<variable.name>` or `<block.response.field>`)
|
||||
<strong>Input Variable</strong>: Variable or block reference to pass to the child workflow (e.g., `<variable.name>` or `<block.field>`)
|
||||
</li>
|
||||
</ul>
|
||||
</Tab>
|
||||
@@ -150,23 +150,23 @@ blocks:
|
||||
- type: workflow
|
||||
name: "Setup Customer Account"
|
||||
workflowId: "account-setup-workflow"
|
||||
input: "<Validate Customer Data.response.result>"
|
||||
input: "<Validate Customer Data.result>"
|
||||
|
||||
- type: workflow
|
||||
name: "Send Welcome Email"
|
||||
workflowId: "welcome-email-workflow"
|
||||
input: "<Setup Customer Account.response.result.accountDetails>"
|
||||
input: "<Setup Customer Account.result.accountDetails>"
|
||||
```
|
||||
|
||||
### Child Workflow: Customer Validation
|
||||
```yaml
|
||||
# Reusable customer validation workflow
|
||||
# Access the input data using: start.response.input
|
||||
# Access the input data using: start.input
|
||||
blocks:
|
||||
- type: function
|
||||
name: "Validate Email"
|
||||
code: |
|
||||
const customerData = start.response.input;
|
||||
const customerData = start.input;
|
||||
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
|
||||
return emailRegex.test(customerData.email);
|
||||
|
||||
@@ -174,7 +174,7 @@ blocks:
|
||||
name: "Check Credit Score"
|
||||
url: "https://api.creditcheck.com/score"
|
||||
method: "POST"
|
||||
body: "<start.response.input>"
|
||||
body: "<start.input>"
|
||||
```
|
||||
|
||||
### Variable Reference Examples
|
||||
@@ -184,13 +184,13 @@ blocks:
|
||||
input: "<variable.customerInfo>"
|
||||
|
||||
# Using block outputs
|
||||
input: "<dataProcessor.response.cleanedData>"
|
||||
input: "<dataProcessor.cleanedData>"
|
||||
|
||||
# Using nested object properties
|
||||
input: "<apiCall.response.data.user.profile>"
|
||||
input: "<apiCall.data.user.profile>"
|
||||
|
||||
# Using array elements (if supported by the resolver)
|
||||
input: "<listProcessor.response.items[0]>"
|
||||
input: "<listProcessor.items[0]>"
|
||||
```
|
||||
|
||||
## Access Control and Permissions
|
||||
|
||||
@@ -81,4 +81,4 @@ Sim Studio provides a wide range of features designed to accelerate your develop
|
||||
|
||||
##
|
||||
|
||||
Ready to get started? Check out our [Getting Started](/getting-started) guide or explore our [Blocks](/docs/blocks) and [Tools](/docs/tools) in more detail.
|
||||
Ready to get started? Check out our [Getting Started](/getting-started) guide or explore our [Blocks](/blocks) and [Tools](/tools) in more detail.
|
||||
|
||||
@@ -182,10 +182,9 @@ Update multiple existing records in an Airtable table
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `records` | json | records of the response |
|
||||
| ↳ `record` | json | record of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `records` | json | records output from the block |
|
||||
| `record` | json | record output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -1,187 +0,0 @@
|
||||
---
|
||||
title: Autoblocks
|
||||
description: Manage and use versioned prompts with Autoblocks
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="autoblocks"
|
||||
color="#0D2929"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
version='1.1'
|
||||
id='Layer_1'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
xmlnsXlink='http://www.w3.org/1999/xlink'
|
||||
x='0px'
|
||||
y='0px'
|
||||
|
||||
viewBox='0 0 1250 1250'
|
||||
enableBackground='new 0 0 1250 1250'
|
||||
xmlSpace='preserve'
|
||||
>
|
||||
<path
|
||||
fill='#FFFFFF'
|
||||
opacity='1.000000'
|
||||
stroke='none'
|
||||
d='
|
||||
M671.222290,1079.959839
|
||||
C671.176025,1077.962891 671.089233,1075.965820 671.089111,1073.968872
|
||||
C671.082825,918.318481 671.062683,762.668091 671.192322,607.017761
|
||||
C671.195862,602.748474 669.789551,600.693787 666.180847,598.638306
|
||||
C636.091125,581.500183 606.140991,564.117126 576.145508,546.813599
|
||||
C556.393311,535.419128 536.677856,523.960449 516.869568,512.664307
|
||||
C495.246002,500.332977 473.461487,488.282806 451.883911,475.872253
|
||||
C434.220825,465.713257 416.802856,455.129089 399.195587,444.871857
|
||||
C379.466736,433.378601 359.648438,422.038818 339.866608,410.636597
|
||||
C320.229004,399.317505 300.588470,388.003510 280.948822,376.688019
|
||||
C271.840149,371.440033 262.730530,366.193695 253.057938,360.622070
|
||||
C267.185272,352.478241 280.655273,344.713531 294.125092,336.948517
|
||||
C329.023163,316.830566 363.943237,296.750366 398.783295,276.532349
|
||||
C402.073059,274.623260 404.534790,274.139191 408.118988,276.252319
|
||||
C435.683502,292.503723 463.371948,308.546082 491.084290,324.545258
|
||||
C509.340118,335.084839 527.725525,345.399719 546.006958,355.895203
|
||||
C585.713440,378.690979 625.427124,401.474670 665.069397,424.381744
|
||||
C705.530884,447.762177 745.895203,471.310669 786.336243,494.726715
|
||||
C796.959717,500.877930 807.667236,506.888184 818.432190,512.787903
|
||||
C820.966064,514.176636 821.763611,515.816772 821.762329,518.659241
|
||||
C821.692932,676.145020 821.688171,833.630737 821.793762,991.116455
|
||||
C821.795837,994.184937 820.514771,995.521545 818.222412,996.837891
|
||||
C782.578491,1017.306641 746.954346,1037.809570 711.333679,1058.318848
|
||||
C698.839661,1065.512573 686.367554,1072.744629 673.219116,1079.994141
|
||||
C672.109314,1080.006104 671.665771,1079.982910 671.222290,1079.959839
|
||||
z'
|
||||
/>
|
||||
<path
|
||||
fill='#FFFFFF'
|
||||
opacity='1.000000'
|
||||
stroke='none'
|
||||
d='
|
||||
M684.421631,400.605865
|
||||
C600.749390,352.376038 517.388306,304.342010 433.717010,256.129181
|
||||
C455.858643,243.338989 477.724731,230.689346 499.608948,218.071136
|
||||
C526.744324,202.425217 553.916504,186.842911 581.002014,171.111252
|
||||
C583.487793,169.667450 585.282104,169.727783 587.700562,171.126724
|
||||
C627.018250,193.870560 666.389465,216.521790 705.739136,239.210449
|
||||
C744.537903,261.581543 783.343262,283.941437 822.113525,306.361786
|
||||
C854.544006,325.115936 886.886658,344.022156 919.345703,362.726379
|
||||
C945.337769,377.704102 971.415039,392.534851 997.539551,407.280151
|
||||
C1001.126465,409.304749 1002.459045,411.581146 1002.455444,415.839966
|
||||
C1002.322388,571.647339 1002.315430,727.454834 1002.468750,883.262207
|
||||
C1002.473694,888.329590 1001.184082,891.101135 996.646118,893.690186
|
||||
C949.437134,920.624695 902.383667,947.831665 855.284607,974.958862
|
||||
C854.453491,975.437500 853.591980,975.863708 851.884216,976.772095
|
||||
C851.884216,974.236023 851.884216,972.347290 851.884216,970.458557
|
||||
C851.884216,814.817688 851.876099,659.176880 851.927551,503.536011
|
||||
C851.928955,499.372650 851.416870,497.004883 846.802246,494.523651
|
||||
C829.014954,484.959839 811.879517,474.190002 794.417969,464.012421
|
||||
C774.549316,452.431854 754.597900,440.993225 734.670959,429.512817
|
||||
C718.033508,419.927551 701.379517,410.370911 684.421631,400.605865
|
||||
z'
|
||||
/>
|
||||
<path
|
||||
fill='#FFFFFF'
|
||||
opacity='1.000000'
|
||||
stroke='none'
|
||||
d='
|
||||
M398.927063,451.754761
|
||||
C400.510162,450.940521 401.764893,450.328430 403.700867,449.383972
|
||||
C403.700867,452.154175 403.700897,454.096252 403.700897,456.038330
|
||||
C403.700897,554.021851 403.720520,652.005371 403.628479,749.988831
|
||||
C403.624847,753.876892 404.584320,756.067810 408.236908,758.155518
|
||||
C451.188324,782.705505 493.996735,807.505737 536.834656,832.254150
|
||||
C575.355164,854.508362 613.866882,876.777893 652.379028,899.046387
|
||||
C658.236328,902.433167 664.075500,905.851257 670.506531,909.594543
|
||||
C660.506226,915.396240 650.958069,920.955383 641.391357,926.482483
|
||||
C602.367798,949.028442 563.293213,971.486938 524.376099,994.215210
|
||||
C520.155334,996.680237 517.203247,996.930176 512.863708,994.408752
|
||||
C454.421143,960.451721 395.851410,926.713562 337.314575,892.918823
|
||||
C319.777893,882.794556 302.245758,872.662292 284.710938,862.534790
|
||||
C274.721008,856.764954 264.759888,850.944214 254.717163,845.267761
|
||||
C252.338959,843.923462 251.216995,842.476929 251.219849,839.499817
|
||||
C251.315567,739.849976 251.312408,640.200073 251.234558,540.550232
|
||||
C251.232254,537.601685 252.346344,536.241150 254.806610,534.827820
|
||||
C302.775909,507.271362 350.680695,479.602600 398.927063,451.754761
|
||||
z'
|
||||
/>
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Autoblocks](https://www.autoblocks.ai/) is a comprehensive platform for managing, monitoring, and optimizing AI applications. It provides robust tools for prompt management that enable teams to collaborate effectively on AI prompts while maintaining version control and type safety.
|
||||
|
||||
With Autoblocks, you can:
|
||||
|
||||
- **Version and manage prompts**: Track changes, roll back to previous versions, and maintain a history of prompt iterations
|
||||
- **Collaborate across teams**: Enable product, engineering, and AI teams to work together on prompt development
|
||||
- **Ensure type safety**: Get autocomplete and validation for prompt variables
|
||||
- **Monitor prompt performance**: Track metrics and analyze how changes affect outcomes
|
||||
- **Test prompts**: Compare different versions and evaluate results before deployment
|
||||
|
||||
Autoblocks integrates seamlessly with your existing AI workflows in Sim Studio, providing a structured approach to prompt engineering that improves consistency and reduces errors.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Collaborate on prompts with type safety, autocomplete, and backwards-incompatibility protection. Autoblocks prompt management allows product teams to collaborate while maintaining excellent developer experience.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `autoblocks_prompt_manager`
|
||||
|
||||
Manage and render prompts using Autoblocks prompt management system
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `promptId` | string | Yes | The ID of the prompt to retrieve |
|
||||
| `version` | string | Yes | Version strategy \(latest or specific\) |
|
||||
| `specificVersion` | string | No | Specific version to use \(e.g., |
|
||||
| `templateParams` | object | No | Parameters to render the template with |
|
||||
| `apiKey` | string | Yes | Autoblocks API key |
|
||||
| `enableABTesting` | boolean | No | Whether to enable A/B testing between versions |
|
||||
| `abTestConfig` | object | No | Configuration for A/B testing between versions |
|
||||
| `environment` | string | Yes | Environment to use \(production, staging, development\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `promptId` | string |
|
||||
| `version` | string |
|
||||
| `renderedPrompt` | string |
|
||||
| `templates` | string |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `promptId` | string | Yes | Prompt ID - Enter the Autoblocks prompt ID |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `promptId` | string | promptId of the response |
|
||||
| ↳ `version` | string | version of the response |
|
||||
| ↳ `renderedPrompt` | string | renderedPrompt of the response |
|
||||
| ↳ `templates` | json | templates of the response |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
- Type: `autoblocks`
|
||||
@@ -102,11 +102,10 @@ Runs a browser automation task using BrowserUse
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `id` | string | id of the response |
|
||||
| ↳ `success` | boolean | success of the response |
|
||||
| ↳ `output` | any | output of the response |
|
||||
| ↳ `steps` | json | steps of the response |
|
||||
| `id` | string | id output from the block |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `output` | any | output output from the block |
|
||||
| `steps` | json | steps output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -214,7 +214,7 @@ Populate Clay with data from a JSON file. Enables direct communication and notif
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `webhookURL` | string | Yes | The webhook URL to populate |
|
||||
| `data` | json | Yes | The data to populate |
|
||||
| `authToken` | string | No | Optional auth token for WebhookURL |
|
||||
| `authToken` | string | Yes | Auth token for Clay webhook authentication |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -238,8 +238,7 @@ Populate Clay with data from a JSON file. Enables direct communication and notif
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `data` | any | data of the response |
|
||||
| `data` | any | data output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -113,12 +113,11 @@ Update a Confluence page using the Confluence API.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `ts` | string | ts of the response |
|
||||
| ↳ `pageId` | string | pageId of the response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `title` | string | title of the response |
|
||||
| ↳ `success` | boolean | success of the response |
|
||||
| `ts` | string | ts output from the block |
|
||||
| `pageId` | string | pageId output from the block |
|
||||
| `content` | string | content output from the block |
|
||||
| `title` | string | title output from the block |
|
||||
| `success` | boolean | success output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -150,9 +150,8 @@ Retrieve information about a Discord user
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `message` | string | message of the response |
|
||||
| ↳ `data` | any | data of the response |
|
||||
| `message` | string | message output from the block |
|
||||
| `data` | any | data output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -53,10 +53,10 @@ Convert TTS using ElevenLabs voices
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your ElevenLabs API key |
|
||||
| `text` | string | Yes | The text to convert to speech |
|
||||
| `voiceId` | string | Yes | The ID of the voice to use |
|
||||
| `modelId` | string | No | The ID of the model to use \(defaults to eleven_monolingual_v1\) |
|
||||
| `apiKey` | string | Yes | Your ElevenLabs API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -80,8 +80,7 @@ Convert TTS using ElevenLabs voices
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `audioUrl` | string | audioUrl of the response |
|
||||
| `audioUrl` | string | audioUrl output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -158,11 +158,10 @@ Get an AI-generated answer to a question with citations from the web using Exa A
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `results` | json | results of the response |
|
||||
| ↳ `similarLinks` | json | similarLinks of the response |
|
||||
| ↳ `answer` | string | answer of the response |
|
||||
| ↳ `citations` | json | citations of the response |
|
||||
| `results` | json | results output from the block |
|
||||
| `similarLinks` | json | similarLinks output from the block |
|
||||
| `answer` | string | answer output from the block |
|
||||
| `citations` | json | citations output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -87,9 +87,8 @@ This tool does not produce any outputs.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `files` | json | files of the response |
|
||||
| ↳ `combinedContent` | string | combinedContent of the response |
|
||||
| `files` | json | files output from the block |
|
||||
| `combinedContent` | string | combinedContent output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -65,9 +65,9 @@ Extract structured content from web pages with comprehensive metadata support. C
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Firecrawl API key |
|
||||
| `url` | string | Yes | The URL to scrape content from |
|
||||
| `scrapeOptions` | json | No | Options for content scraping |
|
||||
| `apiKey` | string | Yes | Firecrawl API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -85,8 +85,8 @@ Search for information on the web using Firecrawl
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Firecrawl API key |
|
||||
| `query` | string | Yes | The search query to use |
|
||||
| `apiKey` | string | Yes | Firecrawl API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -111,12 +111,11 @@ Search for information on the web using Firecrawl
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `markdown` | string | markdown of the response |
|
||||
| ↳ `html` | any | html of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| ↳ `data` | json | data of the response |
|
||||
| ↳ `warning` | any | warning of the response |
|
||||
| `markdown` | string | markdown output from the block |
|
||||
| `html` | any | html output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `data` | json | data output from the block |
|
||||
| `warning` | any | warning output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -85,15 +85,15 @@ Create comments on GitHub PRs
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `owner` | string | Yes | Repository owner |
|
||||
| `repo` | string | Yes | Repository name |
|
||||
| `pullNumber` | number | Yes | Pull request number |
|
||||
| `body` | string | Yes | Comment content |
|
||||
| `pullNumber` | number | Yes | Pull request number |
|
||||
| `path` | string | No | File path for review comment |
|
||||
| `position` | number | No | Line number for review comment |
|
||||
| `apiKey` | string | Yes | GitHub API token |
|
||||
| `commentType` | string | No | Type of comment \(pr_comment or file_comment\) |
|
||||
| `line` | number | No | Line number for review comment |
|
||||
| `side` | string | No | Side of the diff \(LEFT or RIGHT\) |
|
||||
| `commitId` | string | No | The SHA of the commit to comment on |
|
||||
| `apiKey` | string | Yes | GitHub API token |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -174,9 +174,8 @@ Retrieve the latest commit from a GitHub repository
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -79,19 +79,18 @@ Send emails using Gmail
|
||||
| `threadId` | string |
|
||||
| `labelIds` | string |
|
||||
|
||||
### `gmail_read`
|
||||
### `gmail_draft`
|
||||
|
||||
Read emails from Gmail
|
||||
Draft emails using Gmail
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | Access token for Gmail API |
|
||||
| `messageId` | string | No | ID of the message to read |
|
||||
| `folder` | string | No | Folder/label to read emails from |
|
||||
| `unreadOnly` | boolean | No | Only retrieve unread messages |
|
||||
| `maxResults` | number | No | Maximum number of messages to retrieve \(default: 1, max: 10\) |
|
||||
| `to` | string | Yes | Recipient email address |
|
||||
| `subject` | string | Yes | Email subject |
|
||||
| `body` | string | Yes | Email body content |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -99,30 +98,19 @@ Read emails from Gmail
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `metadata` | string |
|
||||
|
||||
### `gmail_search`
|
||||
|
||||
Search emails in Gmail
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | Access token for Gmail API |
|
||||
| `query` | string | Yes | Search query for emails |
|
||||
| `maxResults` | number | No | Maximum number of results to return |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `content` | string |
|
||||
| `message` | string |
|
||||
| `threadId` | string |
|
||||
| `labelIds` | string |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
No configuration parameters required.
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
@@ -130,9 +118,8 @@ No configuration parameters required.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -228,9 +228,8 @@ Invite attendees to an existing Google Calendar event
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -135,7 +135,8 @@ Create a new Google Docs document
|
||||
| `accessToken` | string | Yes | The access token for the Google Docs API |
|
||||
| `title` | string | Yes | The title of the document to create |
|
||||
| `content` | string | No | The content of the document to create |
|
||||
| `folderId` | string | No | The ID of the folder to create the document in |
|
||||
| `folderSelector` | string | No | Select the folder to create the document in |
|
||||
| `folderId` | string | No | The ID of the folder to create the document in \(internal use\) |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -159,10 +160,9 @@ Create a new Google Docs document
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| ↳ `updatedContent` | boolean | updatedContent of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedContent` | boolean | updatedContent output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -91,7 +91,8 @@ Upload a file to Google Drive
|
||||
| `fileName` | string | Yes | The name of the file to upload |
|
||||
| `content` | string | Yes | The content of the file to upload |
|
||||
| `mimeType` | string | No | The MIME type of the file to upload |
|
||||
| `folderId` | string | No | The ID of the folder to upload the file to |
|
||||
| `folderSelector` | string | No | Select the folder to upload the file to |
|
||||
| `folderId` | string | No | The ID of the folder to upload the file to \(internal use\) |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -117,7 +118,8 @@ Create a new folder in Google Drive
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Google Drive API |
|
||||
| `fileName` | string | Yes | Name of the folder to create |
|
||||
| `folderId` | string | No | ID of the parent folder \(leave empty for root folder\) |
|
||||
| `folderSelector` | string | No | Select the parent folder to create the folder in |
|
||||
| `folderId` | string | No | ID of the parent folder \(internal use\) |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -142,7 +144,8 @@ List files and folders in Google Drive
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Google Drive API |
|
||||
| `folderId` | string | No | The ID of the folder to list files from |
|
||||
| `folderSelector` | string | No | Select the folder to list files from |
|
||||
| `folderId` | string | No | The ID of the folder to list files from \(internal use\) |
|
||||
| `query` | string | No | A query to filter the files |
|
||||
| `pageSize` | number | No | The number of files to return |
|
||||
| `pageToken` | string | No | The page token to use for pagination |
|
||||
@@ -177,9 +180,8 @@ List files and folders in Google Drive
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `file` | json | file of the response |
|
||||
| ↳ `files` | json | files of the response |
|
||||
| `file` | json | file output from the block |
|
||||
| `files` | json | files output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -73,9 +73,9 @@ Search the web with the Custom Search API
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | The search query to execute |
|
||||
| `apiKey` | string | Yes | Google API key |
|
||||
| `searchEngineId` | string | Yes | Custom Search Engine ID |
|
||||
| `num` | string | No | Number of results to return \(default: 10, max: 10\) |
|
||||
| `apiKey` | string | Yes | Google API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -101,7 +101,11 @@ Search the web with the Custom Search API
|
||||
|
||||
### Outputs
|
||||
|
||||
This block does not produce any outputs.
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `items` | json | items output from the block |
|
||||
| `searchInformation` | json | searchInformation output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
|
||||
@@ -212,14 +212,13 @@ Append data to the end of a Google Sheets spreadsheet
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `data` | json | data of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| ↳ `updatedRange` | string | updatedRange of the response |
|
||||
| ↳ `updatedRows` | number | updatedRows of the response |
|
||||
| ↳ `updatedColumns` | number | updatedColumns of the response |
|
||||
| ↳ `updatedCells` | number | updatedCells of the response |
|
||||
| ↳ `tableRange` | string | tableRange of the response |
|
||||
| `data` | json | data output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedRange` | string | updatedRange output from the block |
|
||||
| `updatedRows` | number | updatedRows output from the block |
|
||||
| `updatedColumns` | number | updatedColumns output from the block |
|
||||
| `updatedCells` | number | updatedCells output from the block |
|
||||
| `tableRange` | string | tableRange output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
title: Guesty
|
||||
description: Interact with Guesty property management system
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="guesty"
|
||||
color="#0051F8"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
|
||||
|
||||
viewBox='0 0 101 100'
|
||||
fill='none'
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
>
|
||||
<path
|
||||
d='M56.6019 2.6685C53.2445 0.339792 48.8025 0.308905 45.413 2.5907L44.1323 3.45286C44.1309 3.45379 44.1296 3.45471 44.1282 3.45564L5.37916 29.5416C5.37801 29.5424 5.37687 29.5431 5.37572 29.5439L4.37839 30.2153C1.64126 32.058 0 35.1414 0 38.441V90.0841C0 95.5599 4.4395 100 9.91593 100H67.4737C72.9501 100 77.389 95.5605 77.389 90.0841V49.6765C77.389 46.3038 75.675 43.1622 72.8385 41.3373L56.3027 30.6989C53.0908 28.6325 48.9777 28.5944 45.728 30.6009L28.3986 41.301C25.4732 43.1073 23.6922 46.3001 23.6922 49.7382V75.553H33.3248V51.0025C33.3248 50.1189 33.7823 49.2983 34.5337 48.8337L34.535 48.8329L49.5731 39.5476C50.408 39.0322 51.4645 39.0414 52.29 39.5714L66.5886 48.7705C67.3167 49.24 67.7564 50.0471 67.7564 50.9134V87.8176C67.7564 89.2256 66.6152 90.3674 65.2072 90.3674H12.1824C10.7742 90.3674 9.63262 89.2256 9.63262 87.8176V39.6474C9.63262 38.7995 10.0541 38.0071 10.7571 37.5331L49.5075 11.4463C50.3783 10.8601 51.5192 10.8675 52.3822 11.4646L89.8995 37.4867C89.9007 37.4877 89.9024 37.4886 89.9035 37.4896C90.588 37.9663 90.9959 38.7476 90.9959 39.5819V100H100.629V38.3956C100.629 35.1448 99.0352 32.1005 96.3641 30.2478L95.3969 29.5767C95.3941 29.575 95.3918 29.5733 95.3895 29.5717L56.6019 2.6685Z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Guesty](https://www.guesty.com) is a comprehensive property management platform designed for short-term and vacation rental property managers. It provides a centralized system to manage listings, reservations, guest communications, and operations across multiple booking channels like Airbnb, Booking.com, and VRBO.
|
||||
|
||||
With Guesty, property managers can:
|
||||
|
||||
- **Centralize operations**: Manage multiple properties and listings from a single dashboard
|
||||
- **Automate workflows**: Set up automated messaging, task assignments, and cleaning schedules
|
||||
- **Synchronize calendars**: Keep availability updated across all booking channels
|
||||
- **Process payments**: Handle secure payment processing and financial reporting
|
||||
- **Manage guest communications**: Streamline guest interactions through unified inbox
|
||||
- **Generate reports**: Access analytics and insights to optimize property performance
|
||||
|
||||
In Sim Studio, the Guesty integration enables your agents to interact directly with your property management system programmatically. This allows for powerful automation scenarios such as reservation management, guest communication, and operational workflows. Your agents can retrieve detailed reservation information by ID, including guest details, booking dates, and property information. They can also search for guests by phone number to access their profiles and booking history. This integration bridges the gap between your AI workflows and your property management operations, enabling seamless handling of hospitality tasks without manual intervention. By connecting Sim Studio with Guesty, you can automate guest communications, streamline check-in processes, manage reservation details, and enhance the overall guest experience through intelligent automation.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Access Guesty property management data including reservations and guest information. Retrieve reservation details by ID or search for guests by phone number.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `guesty_reservation`
|
||||
|
||||
Fetch reservation details from Guesty by reservation ID
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Guesty API token |
|
||||
| `reservationId` | string | Yes | The ID of the reservation to fetch |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `id` | string |
|
||||
| `guest` | string |
|
||||
| `email` | string |
|
||||
| `phone` | string |
|
||||
|
||||
### `guesty_guest`
|
||||
|
||||
Search for guests in Guesty by phone number
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Guesty API token |
|
||||
| `phoneNumber` | string | Yes | The phone number to search for |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `guests` | string |
|
||||
| `fullName` | string |
|
||||
| `email` | string |
|
||||
| `phone` | string |
|
||||
| `address` | string |
|
||||
| `city` | string |
|
||||
| `country` | string |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `action` | string | Yes | Action |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `id` | string | id of the response |
|
||||
| ↳ `guest` | json | guest of the response |
|
||||
| ↳ `checkIn` | string | checkIn of the response |
|
||||
| ↳ `checkOut` | string | checkOut of the response |
|
||||
| ↳ `status` | string | status of the response |
|
||||
| ↳ `listing` | json | listing of the response |
|
||||
| ↳ `money` | json | money of the response |
|
||||
| ↳ `guests` | json | guests of the response |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
- Type: `guesty`
|
||||
@@ -80,14 +80,13 @@ Generate completions using Hugging Face Inference API
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Hugging Face API token |
|
||||
| `systemPrompt` | string | No | System prompt to guide the model behavior |
|
||||
| `content` | string | Yes | The user message content to send to the model |
|
||||
| `provider` | string | Yes | The provider to use for the API request \(e.g., novita, cerebras, etc.\) |
|
||||
| `model` | string | Yes | Model to use for chat completions \(e.g., deepseek/deepseek-v3-0324\) |
|
||||
| `content` | string | Yes | The user message content to send to the model |
|
||||
| `systemPrompt` | string | No | System prompt to guide the model behavior |
|
||||
| `maxTokens` | number | No | Maximum number of tokens to generate |
|
||||
| `temperature` | number | No | Sampling temperature \(0-2\). Higher values make output more random |
|
||||
| `stream` | boolean | No | Whether to stream the response |
|
||||
| `apiKey` | string | Yes | Hugging Face API token |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -115,10 +114,9 @@ Generate completions using Hugging Face Inference API
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `model` | string | model of the response |
|
||||
| ↳ `usage` | json | usage of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `usage` | json | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -60,8 +60,8 @@ Generate images using OpenAI
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `prompt` | string | Yes | A text description of the desired image |
|
||||
| `model` | string | Yes | The model to use \(gpt-image-1 or dall-e-3\) |
|
||||
| `prompt` | string | Yes | A text description of the desired image |
|
||||
| `size` | string | Yes | The size of the generated images \(1024x1024, 1024x1792, or 1792x1024\) |
|
||||
| `quality` | string | No | The quality of the image \(standard or hd\) |
|
||||
| `style` | string | No | The style of the image \(vivid or natural\) |
|
||||
@@ -93,10 +93,9 @@ Generate images using OpenAI
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `image` | string | image of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `image` | string | image output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -78,6 +78,10 @@ Extract and process web content into clean, LLM-friendly text using Jina AI Read
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `url` | string | Yes | The URL to read and convert to markdown |
|
||||
| `useReaderLMv2` | boolean | No | Whether to use ReaderLM-v2 for better quality |
|
||||
| `gatherLinks` | boolean | No | Whether to gather all links at the end |
|
||||
| `jsonResponse` | boolean | No | Whether to return response in JSON format |
|
||||
| `apiKey` | string | Yes | Your Jina AI API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -101,8 +105,7 @@ Extract and process web content into clean, LLM-friendly text using Jina AI Read
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| `content` | string | content output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -165,15 +165,14 @@ Retrieve multiple Jira issues in bulk
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `ts` | string | ts of the response |
|
||||
| ↳ `issueKey` | string | issueKey of the response |
|
||||
| ↳ `summary` | string | summary of the response |
|
||||
| ↳ `description` | string | description of the response |
|
||||
| ↳ `created` | string | created of the response |
|
||||
| ↳ `updated` | string | updated of the response |
|
||||
| ↳ `success` | boolean | success of the response |
|
||||
| ↳ `url` | string | url of the response |
|
||||
| `ts` | string | ts output from the block |
|
||||
| `issueKey` | string | issueKey output from the block |
|
||||
| `summary` | string | summary output from the block |
|
||||
| `description` | string | description output from the block |
|
||||
| `created` | string | created output from the block |
|
||||
| `updated` | string | updated output from the block |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `url` | string | url output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -66,6 +66,13 @@ Search for similar content in one or more knowledge bases using vector similarit
|
||||
| `knowledgeBaseIds` | string | Yes | ID of the knowledge base to search in, or comma-separated IDs for multiple knowledge bases |
|
||||
| `query` | string | Yes | Search query text |
|
||||
| `topK` | number | No | Number of most similar results to return \(1-100\) |
|
||||
| `tag1` | string | No | Filter by tag 1 value |
|
||||
| `tag2` | string | No | Filter by tag 2 value |
|
||||
| `tag3` | string | No | Filter by tag 3 value |
|
||||
| `tag4` | string | No | Filter by tag 4 value |
|
||||
| `tag5` | string | No | Filter by tag 5 value |
|
||||
| `tag6` | string | No | Filter by tag 6 value |
|
||||
| `tag7` | string | No | Filter by tag 7 value |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -74,6 +81,7 @@ Search for similar content in one or more knowledge bases using vector similarit
|
||||
| `results` | string |
|
||||
| `query` | string |
|
||||
| `totalResults` | string |
|
||||
| `cost` | string |
|
||||
|
||||
### `knowledge_upload_chunk`
|
||||
|
||||
@@ -111,6 +119,13 @@ Create a new document in a knowledge base
|
||||
| `knowledgeBaseId` | string | Yes | ID of the knowledge base containing the document |
|
||||
| `name` | string | Yes | Name of the document |
|
||||
| `content` | string | Yes | Content of the document |
|
||||
| `tag1` | string | No | Tag 1 value for the document |
|
||||
| `tag2` | string | No | Tag 2 value for the document |
|
||||
| `tag3` | string | No | Tag 3 value for the document |
|
||||
| `tag4` | string | No | Tag 4 value for the document |
|
||||
| `tag5` | string | No | Tag 5 value for the document |
|
||||
| `tag6` | string | No | Tag 6 value for the document |
|
||||
| `tag7` | string | No | Tag 7 value for the document |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -135,10 +150,9 @@ Create a new document in a knowledge base
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `results` | json | results of the response |
|
||||
| ↳ `query` | string | query of the response |
|
||||
| ↳ `totalResults` | number | totalResults of the response |
|
||||
| `results` | json | results output from the block |
|
||||
| `query` | string | query output from the block |
|
||||
| `totalResults` | number | totalResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -105,9 +105,8 @@ Create a new issue in Linear
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `issues` | json | issues of the response |
|
||||
| ↳ `issue` | json | issue of the response |
|
||||
| `issues` | json | issues output from the block |
|
||||
| `issue` | json | issue output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -65,9 +65,9 @@ Search the web for information using Linkup
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `q` | string | Yes | The search query |
|
||||
| `apiKey` | string | Yes | Enter your Linkup API key |
|
||||
| `depth` | string | Yes | Search depth \(has to either be |
|
||||
| `outputType` | string | Yes | Type of output to return \(has to either be |
|
||||
| `apiKey` | string | Yes | Enter your Linkup API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -92,9 +92,8 @@ Search the web for information using Linkup
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `answer` | string | answer of the response |
|
||||
| ↳ `sources` | json | sources of the response |
|
||||
| `answer` | string | answer output from the block |
|
||||
| `sources` | json | sources output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -58,9 +58,9 @@ Add memories to Mem0 for persistent storage and retrieval
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Mem0 API key |
|
||||
| `userId` | string | Yes | User ID associated with the memory |
|
||||
| `messages` | json | Yes | Array of message objects with role and content |
|
||||
| `apiKey` | string | Yes | Your Mem0 API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -76,10 +76,10 @@ Search for memories in Mem0 using semantic search
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Mem0 API key |
|
||||
| `userId` | string | Yes | User ID to search memories for |
|
||||
| `query` | string | Yes | Search query to find relevant memories |
|
||||
| `limit` | number | No | Maximum number of results to return |
|
||||
| `apiKey` | string | Yes | Your Mem0 API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -96,12 +96,12 @@ Retrieve memories from Mem0 by ID or filter criteria
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Mem0 API key |
|
||||
| `userId` | string | Yes | User ID to retrieve memories for |
|
||||
| `memoryId` | string | No | Specific memory ID to retrieve |
|
||||
| `startDate` | string | No | Start date for filtering by created_at \(format: YYYY-MM-DD\) |
|
||||
| `endDate` | string | No | End date for filtering by created_at \(format: YYYY-MM-DD\) |
|
||||
| `limit` | number | No | Maximum number of results to return |
|
||||
| `apiKey` | string | Yes | Your Mem0 API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -126,10 +126,9 @@ Retrieve memories from Mem0 by ID or filter criteria
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `ids` | any | ids of the response |
|
||||
| ↳ `memories` | any | memories of the response |
|
||||
| ↳ `searchResults` | any | searchResults of the response |
|
||||
| `ids` | any | ids output from the block |
|
||||
| `memories` | any | memories output from the block |
|
||||
| `searchResults` | any | searchResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -124,9 +124,8 @@ Delete a specific memory by its ID
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `memories` | any | memories of the response |
|
||||
| ↳ `id` | string | id of the response |
|
||||
| `memories` | any | memories output from the block |
|
||||
| `id` | string | id output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"items": [
|
||||
"index",
|
||||
"airtable",
|
||||
"autoblocks",
|
||||
"browser_use",
|
||||
"clay",
|
||||
"confluence",
|
||||
@@ -18,7 +17,6 @@
|
||||
"google_drive",
|
||||
"google_search",
|
||||
"google_sheets",
|
||||
"guesty",
|
||||
"huggingface",
|
||||
"image_generator",
|
||||
"jina",
|
||||
@@ -50,6 +48,7 @@
|
||||
"twilio_sms",
|
||||
"typeform",
|
||||
"vision",
|
||||
"wealthbox",
|
||||
"whatsapp",
|
||||
"x",
|
||||
"youtube"
|
||||
|
||||
@@ -180,15 +180,14 @@ Add new rows to a Microsoft Excel table
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `data` | json | data of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| ↳ `updatedRange` | string | updatedRange of the response |
|
||||
| ↳ `updatedRows` | number | updatedRows of the response |
|
||||
| ↳ `updatedColumns` | number | updatedColumns of the response |
|
||||
| ↳ `updatedCells` | number | updatedCells of the response |
|
||||
| ↳ `index` | number | index of the response |
|
||||
| ↳ `values` | json | values of the response |
|
||||
| `data` | json | data output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedRange` | string | updatedRange output from the block |
|
||||
| `updatedRows` | number | updatedRows output from the block |
|
||||
| `updatedColumns` | number | updatedColumns output from the block |
|
||||
| `updatedCells` | number | updatedCells output from the block |
|
||||
| `index` | number | index output from the block |
|
||||
| `values` | json | values output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -205,10 +205,9 @@ Write or send a message to a Microsoft Teams channel
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| ↳ `updatedContent` | boolean | updatedContent of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `updatedContent` | boolean | updatedContent output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -96,11 +96,11 @@ Parse PDF documents using Mistral OCR API
|
||||
| `filePath` | string | Yes | URL to a PDF document to be processed |
|
||||
| `fileUpload` | object | No | File upload data from file-upload component |
|
||||
| `resultType` | string | No | Type of parsed result \(markdown, text, or json\). Defaults to markdown. |
|
||||
| `apiKey` | string | Yes | Mistral API key \(MISTRAL_API_KEY\) |
|
||||
| `includeImageBase64` | boolean | No | Include base64-encoded images in the response |
|
||||
| `pages` | array | No | Specific pages to process \(array of page numbers, starting from 0\) |
|
||||
| `imageLimit` | number | No | Maximum number of images to extract from the PDF |
|
||||
| `imageMinSize` | number | No | Minimum height and width of images to extract from the PDF |
|
||||
| `apiKey` | string | Yes | Mistral API key \(MISTRAL_API_KEY\) |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -122,9 +122,8 @@ This tool does not produce any outputs.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -49,8 +49,8 @@ Read content from a Notion page
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `pageId` | string | Yes | The ID of the Notion page to read |
|
||||
| `accessToken` | string | Yes | Notion OAuth access token |
|
||||
| `pageId` | string | Yes | The ID of the Notion page to read |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -70,9 +70,9 @@ Append content to a Notion page
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | Notion OAuth access token |
|
||||
| `pageId` | string | Yes | The ID of the Notion page to append content to |
|
||||
| `content` | string | Yes | The content to append to the page |
|
||||
| `accessToken` | string | Yes | Notion OAuth access token |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -88,12 +88,12 @@ Create a new page in Notion
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | Notion OAuth access token |
|
||||
| `parentType` | string | Yes | Type of parent: |
|
||||
| `parentId` | string | Yes | ID of the parent page or database |
|
||||
| `title` | string | No | Title of the page \(required for parent pages, not for databases\) |
|
||||
| `properties` | json | No | JSON object of properties for database pages |
|
||||
| `content` | string | No | Optional content to add to the page upon creation |
|
||||
| `accessToken` | string | Yes | Notion OAuth access token |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -117,9 +117,8 @@ Create a new page in Notion
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `metadata` | any | metadata of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `metadata` | any | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -57,11 +57,10 @@ Generate embeddings from text using OpenAI
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | OpenAI API key |
|
||||
| `input` | string | Yes | Text to generate embeddings for |
|
||||
| `model` | string | No | Model to use for embeddings |
|
||||
| `encoding_format` | string | No | The format to return the embeddings in |
|
||||
| `user` | string | No | A unique identifier for the end-user |
|
||||
| `encodingFormat` | string | No | The format to return the embeddings in |
|
||||
| `apiKey` | string | Yes | OpenAI API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -88,10 +87,9 @@ Generate embeddings from text using OpenAI
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `embeddings` | json | embeddings of the response |
|
||||
| ↳ `model` | string | model of the response |
|
||||
| ↳ `usage` | json | usage of the response |
|
||||
| `embeddings` | json | embeddings output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `usage` | json | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -225,9 +225,8 @@ Read emails from Outlook
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `message` | string | message of the response |
|
||||
| ↳ `results` | json | results of the response |
|
||||
| `message` | string | message output from the block |
|
||||
| `results` | json | results output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -51,11 +51,12 @@ Generate completions using Perplexity AI chat models
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Perplexity API key |
|
||||
| `systemPrompt` | string | No | System prompt to guide the model behavior |
|
||||
| `content` | string | Yes | The user message content to send to the model |
|
||||
| `model` | string | Yes | Model to use for chat completions \(e.g., sonar, mistral\) |
|
||||
| `messages` | array | Yes | Array of message objects with role and content |
|
||||
| `max_tokens` | number | No | Maximum number of tokens to generate |
|
||||
| `temperature` | number | No | Sampling temperature between 0 and 1 |
|
||||
| `apiKey` | string | Yes | Perplexity API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -75,7 +76,7 @@ Generate completions using Perplexity AI chat models
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `prompt` | string | Yes | User Prompt - Enter your prompt here... |
|
||||
| `content` | string | Yes | User Prompt - Enter your prompt here... |
|
||||
|
||||
|
||||
|
||||
@@ -83,10 +84,9 @@ Generate completions using Perplexity AI chat models
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `model` | string | model of the response |
|
||||
| ↳ `usage` | json | usage of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `usage` | json | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -59,9 +59,9 @@ Generate embeddings from text using Pinecone
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
| `model` | string | Yes | Model to use for generating embeddings |
|
||||
| `inputs` | array | Yes | Array of text inputs to generate embeddings for |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -80,10 +80,10 @@ Insert or update text records in a Pinecone index
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
| `indexHost` | string | Yes | Full Pinecone index host URL |
|
||||
| `namespace` | string | Yes | Namespace to upsert records into |
|
||||
| `records` | array | Yes | Record or array of records to upsert, each containing _id, text, and optional metadata |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -99,7 +99,6 @@ Search for similar text in a Pinecone index
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
| `indexHost` | string | Yes | Full Pinecone index host URL |
|
||||
| `namespace` | string | No | Namespace to search in |
|
||||
| `searchQuery` | string | Yes | Text to search for |
|
||||
@@ -107,6 +106,7 @@ Search for similar text in a Pinecone index
|
||||
| `fields` | array | No | Fields to return in the results |
|
||||
| `filter` | object | No | Filter to apply to the search |
|
||||
| `rerank` | object | No | Reranking parameters |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -124,7 +124,6 @@ Search for similar vectors in a Pinecone index
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
| `indexHost` | string | Yes | Full Pinecone index host URL |
|
||||
| `namespace` | string | No | Namespace to search in |
|
||||
| `vector` | array | Yes | Vector to search for |
|
||||
@@ -132,6 +131,7 @@ Search for similar vectors in a Pinecone index
|
||||
| `filter` | object | No | Filter to apply to the search |
|
||||
| `includeValues` | boolean | No | Include vector values in response |
|
||||
| `includeMetadata` | boolean | No | Include metadata in response |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -150,10 +150,10 @@ Fetch vectors by ID from a Pinecone index
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
| `indexHost` | string | Yes | Full Pinecone index host URL |
|
||||
| `ids` | array | Yes | Array of vector IDs to fetch |
|
||||
| `namespace` | string | No | Namespace to fetch vectors from |
|
||||
| `apiKey` | string | Yes | Pinecone API key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -181,13 +181,12 @@ Fetch vectors by ID from a Pinecone index
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `matches` | any | matches of the response |
|
||||
| ↳ `upsertedCount` | any | upsertedCount of the response |
|
||||
| ↳ `data` | any | data of the response |
|
||||
| ↳ `model` | any | model of the response |
|
||||
| ↳ `vector_type` | any | vector_type of the response |
|
||||
| ↳ `usage` | any | usage of the response |
|
||||
| `matches` | any | matches output from the block |
|
||||
| `upsertedCount` | any | upsertedCount output from the block |
|
||||
| `data` | any | data output from the block |
|
||||
| `model` | any | model output from the block |
|
||||
| `vector_type` | any | vector_type output from the block |
|
||||
| `usage` | any | usage output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -50,24 +50,6 @@ Access Reddit data to retrieve posts and comments from any subreddit. Get post t
|
||||
|
||||
## Tools
|
||||
|
||||
### `reddit_hot_posts`
|
||||
|
||||
Fetch the most popular (hot) posts from a specified subreddit.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `subreddit` | string | Yes | The name of the subreddit to fetch posts from \(without the r/ prefix\) |
|
||||
| `limit` | number | No | Maximum number of posts to return \(default: 10, max: 100\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `subreddit` | string |
|
||||
| `posts` | string |
|
||||
|
||||
### `reddit_get_posts`
|
||||
|
||||
Fetch posts from a subreddit with different sorting options
|
||||
@@ -76,6 +58,7 @@ Fetch posts from a subreddit with different sorting options
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | Access token for Reddit API |
|
||||
| `subreddit` | string | Yes | The name of the subreddit to fetch posts from \(without the r/ prefix\) |
|
||||
| `sort` | string | No | Sort method for posts: |
|
||||
| `limit` | number | No | Maximum number of posts to return \(default: 10, max: 100\) |
|
||||
@@ -96,6 +79,7 @@ Fetch comments from a specific Reddit post
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | Access token for Reddit API |
|
||||
| `postId` | string | Yes | The ID of the Reddit post to fetch comments from |
|
||||
| `subreddit` | string | Yes | The subreddit where the post is located \(without the r/ prefix\) |
|
||||
| `sort` | string | No | Sort method for comments: |
|
||||
@@ -121,7 +105,7 @@ Fetch comments from a specific Reddit post
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `action` | string | Yes | Action |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
@@ -129,11 +113,10 @@ Fetch comments from a specific Reddit post
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `subreddit` | string | subreddit of the response |
|
||||
| ↳ `posts` | json | posts of the response |
|
||||
| ↳ `post` | json | post of the response |
|
||||
| ↳ `comments` | json | comments of the response |
|
||||
| `subreddit` | string | subreddit output from the block |
|
||||
| `posts` | json | posts output from the block |
|
||||
| `post` | json | post output from the block |
|
||||
| `comments` | json | comments output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -89,9 +89,8 @@ Retrieve an object from an AWS S3 bucket
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `url` | string | url of the response |
|
||||
| ↳ `metadata` | json | metadata of the response |
|
||||
| `url` | string | url output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -93,11 +93,11 @@ A powerful web search tool that provides access to Google search results through
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | The search query |
|
||||
| `apiKey` | string | Yes | Serper API Key |
|
||||
| `num` | number | No | Number of results to return |
|
||||
| `gl` | string | No | Country code for search results |
|
||||
| `hl` | string | No | Language code for search results |
|
||||
| `type` | string | No | Type of search to perform |
|
||||
| `apiKey` | string | Yes | Serper API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -121,8 +121,7 @@ A powerful web search tool that provides access to Google search results through
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `searchResults` | json | searchResults of the response |
|
||||
| `searchResults` | json | searchResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -70,6 +70,7 @@ Send messages to Slack channels or users through the Slack API. Supports Slack m
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `authMethod` | string | No | Authentication method: oauth or bot_token |
|
||||
| `botToken` | string | No | Bot token for Custom Bot |
|
||||
| `accessToken` | string | No | OAuth access token or bot token for Slack API |
|
||||
| `channel` | string | Yes | Target Slack channel \(e.g., #general\) |
|
||||
@@ -98,9 +99,8 @@ Send messages to Slack channels or users through the Slack API. Supports Slack m
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `ts` | string | ts of the response |
|
||||
| ↳ `channel` | string | channel of the response |
|
||||
| `ts` | string | ts output from the block |
|
||||
| `channel` | string | channel output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -205,10 +205,10 @@ Extract structured data from a webpage using Stagehand
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instruction` | string | Yes | Instructions for extraction |
|
||||
| `schema` | json | Yes | JSON schema defining the structure of the data to extract |
|
||||
| `apiKey` | string | Yes | OpenAI API key for extraction \(required by Stagehand\) |
|
||||
| `url` | string | Yes | URL of the webpage to extract data from |
|
||||
| `instruction` | string | Yes | Instructions for extraction |
|
||||
| `apiKey` | string | Yes | OpenAI API key for extraction \(required by Stagehand\) |
|
||||
| `schema` | json | Yes | JSON schema defining the structure of the data to extract |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -232,8 +232,7 @@ Extract structured data from a webpage using Stagehand
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `data` | json | data of the response |
|
||||
| `data` | json | data output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -209,11 +209,11 @@ Run an autonomous web agent to complete tasks and extract structured data
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `task` | string | Yes | The task to complete or goal to achieve on the website |
|
||||
| `startUrl` | string | Yes | URL of the webpage to start the agent on |
|
||||
| `outputSchema` | json | No | Optional JSON schema defining the structure of data the agent should return |
|
||||
| `task` | string | Yes | The task to complete or goal to achieve on the website |
|
||||
| `variables` | json | No | Optional variables to substitute in the task \(format: \{key: value\}\). Reference in task using %key% |
|
||||
| `apiKey` | string | Yes | OpenAI API key for agent execution \(required by Stagehand\) |
|
||||
| `outputSchema` | json | No | Optional JSON schema defining the structure of data the agent should return |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -240,9 +240,8 @@ Run an autonomous web agent to complete tasks and extract structured data
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `agentResult` | json | agentResult of the response |
|
||||
| ↳ `structuredOutput` | any | structuredOutput of the response |
|
||||
| `agentResult` | json | agentResult output from the block |
|
||||
| `structuredOutput` | any | structuredOutput output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -83,8 +83,10 @@ Query data from a Supabase table
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Supabase client anon key |
|
||||
| `projectId` | string | Yes | Your Supabase project ID \(e.g., jdrkgepadsdopsntdlom\) |
|
||||
| `table` | string | Yes | The name of the Supabase table to query |
|
||||
| `filter` | object | No | Filter to apply to the query |
|
||||
| `apiKey` | string | Yes | Your Supabase client anon key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -101,8 +103,10 @@ Insert data into a Supabase table
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Your Supabase client anon key |
|
||||
| `projectId` | string | Yes | Your Supabase project ID \(e.g., jdrkgepadsdopsntdlom\) |
|
||||
| `table` | string | Yes | The name of the Supabase table to insert data into |
|
||||
| `data` | any | Yes | The data to insert |
|
||||
| `apiKey` | string | Yes | Your Supabase client anon key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -127,9 +131,8 @@ Insert data into a Supabase table
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `message` | string | message of the response |
|
||||
| ↳ `results` | json | results of the response |
|
||||
| `message` | string | message output from the block |
|
||||
| `results` | json | results output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -95,8 +95,8 @@ Extract raw content from multiple web pages simultaneously using Tavily
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `urls` | string | Yes | URL or array of URLs to extract content from |
|
||||
| `apiKey` | string | Yes | Tavily API Key |
|
||||
| `extract_depth` | string | No | The depth of extraction \(basic=1 credit/5 URLs, advanced=2 credits/5 URLs\) |
|
||||
| `apiKey` | string | Yes | Tavily API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -121,13 +121,12 @@ Extract raw content from multiple web pages simultaneously using Tavily
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `results` | json | results of the response |
|
||||
| ↳ `answer` | any | answer of the response |
|
||||
| ↳ `query` | string | query of the response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `title` | string | title of the response |
|
||||
| ↳ `url` | string | url of the response |
|
||||
| `results` | json | results output from the block |
|
||||
| `answer` | any | answer output from the block |
|
||||
| `query` | string | query output from the block |
|
||||
| `content` | string | content output from the block |
|
||||
| `title` | string | title output from the block |
|
||||
| `url` | string | url output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -121,9 +121,8 @@ Send messages to Telegram channels or users through the Telegram Bot API. Enable
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `ok` | boolean | ok of the response |
|
||||
| ↳ `result` | json | result of the response |
|
||||
| `ok` | boolean | ok output from the block |
|
||||
| `result` | json | result output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -87,8 +87,7 @@ Processes a provided thought/instruction, making it available for subsequent ste
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `acknowledgedThought` | string | acknowledgedThought of the response |
|
||||
| `acknowledgedThought` | string | acknowledgedThought output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -95,10 +95,9 @@ This tool does not produce any outputs.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `model` | string | model of the response |
|
||||
| ↳ `tokens` | any | tokens of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | string | model output from the block |
|
||||
| `tokens` | any | tokens output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -78,11 +78,10 @@ Send text messages to single or multiple recipients using the Twilio API.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `success` | boolean | success of the response |
|
||||
| ↳ `messageId` | any | messageId of the response |
|
||||
| ↳ `status` | any | status of the response |
|
||||
| ↳ `error` | any | error of the response |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `messageId` | any | messageId output from the block |
|
||||
| `status` | any | status output from the block |
|
||||
| `error` | any | error output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -126,10 +126,9 @@ This tool does not produce any outputs.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `total_items` | number | total_items of the response |
|
||||
| ↳ `page_count` | number | page_count of the response |
|
||||
| ↳ `items` | json | items of the response |
|
||||
| `total_items` | number | total_items output from the block |
|
||||
| `page_count` | number | page_count output from the block |
|
||||
| `items` | json | items output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -90,10 +90,9 @@ Process and analyze images using advanced vision models. Capable of understandin
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `content` | string | content of the response |
|
||||
| ↳ `model` | any | model of the response |
|
||||
| ↳ `tokens` | any | tokens of the response |
|
||||
| `content` | string | content output from the block |
|
||||
| `model` | any | model output from the block |
|
||||
| `tokens` | any | tokens output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
190
apps/docs/content/docs/tools/wealthbox.mdx
Normal file
190
apps/docs/content/docs/tools/wealthbox.mdx
Normal file
@@ -0,0 +1,190 @@
|
||||
---
|
||||
title: Wealthbox
|
||||
description: Interact with Wealthbox
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="wealthbox"
|
||||
color="#E0E0E0"
|
||||
icon={true}
|
||||
iconSvg={`<svg className="block-icon"
|
||||
|
||||
xmlns='http://www.w3.org/2000/svg'
|
||||
version='1.0'
|
||||
|
||||
|
||||
viewBox='50 -50 200 200'
|
||||
>
|
||||
<g fill='#106ED4' stroke='none' transform='translate(0, 200) scale(0.15, -0.15)'>
|
||||
<path d='M764 1542 c-110 -64 -230 -134 -266 -156 -42 -24 -71 -49 -78 -65 -7 -19 -10 -126 -8 -334 3 -291 4 -307 23 -326 11 -11 103 -67 205 -126 102 -59 219 -127 261 -151 42 -24 85 -44 96 -44 23 0 527 288 561 320 22 22 22 23 22 340 0 288 -2 320 -17 338 -32 37 -537 322 -569 321 -18 0 -107 -46 -230 -117z m445 -144 c108 -62 206 -123 219 -135 22 -22 22 -26 22 -261 0 -214 -2 -242 -17 -260 -23 -26 -414 -252 -437 -252 -9 0 -70 31 -134 69 -64 37 -161 94 -215 125 l-97 57 2 261 3 261 210 123 c116 67 219 123 229 123 10 1 107 -50 215 -111z' />
|
||||
<path d='M700 1246 l-55 -32 -3 -211 -2 -211 37 -23 c21 -12 52 -30 69 -40 l30 -18 103 59 c56 33 109 60 117 60 8 0 62 -27 119 -60 l104 -60 63 37 c35 21 66 42 70 48 4 5 8 101 8 212 l0 202 -62 35 -63 35 -3 -197 c-1 -108 -6 -200 -11 -205 -5 -5 -54 17 -114 52 -58 34 -108 61 -111 61 -2 0 -51 -27 -107 -60 -56 -32 -106 -57 -111 -54 -4 3 -8 95 -8 205 0 109 -3 199 -7 199 -5 -1 -33 -16 -63 -34z' />
|
||||
</g>
|
||||
</svg>`}
|
||||
/>
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Wealthbox functionality to manage notes, contacts, and tasks. Read content from existing notes, contacts, and tasks and write to them using OAuth authentication. Supports text content manipulation for note creation and editing.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `wealthbox_read_note`
|
||||
|
||||
Read content from a Wealthbox note
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Wealthbox API |
|
||||
| `noteId` | string | No | The ID of the note to read \(optional\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `note` | string |
|
||||
| `metadata` | string |
|
||||
| `noteId` | string |
|
||||
| `itemType` | string |
|
||||
|
||||
### `wealthbox_write_note`
|
||||
|
||||
Create or update a Wealthbox note
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Wealthbox API |
|
||||
| `content` | string | Yes | The main body of the note |
|
||||
| `contactId` | string | No | ID of contact to link to this note |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `note` | string |
|
||||
| `metadata` | string |
|
||||
| `itemType` | string |
|
||||
|
||||
### `wealthbox_read_contact`
|
||||
|
||||
Read content from a Wealthbox contact
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Wealthbox API |
|
||||
| `contactId` | string | Yes | The ID of the contact to read |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `contact` | string |
|
||||
| `metadata` | string |
|
||||
| `contactId` | string |
|
||||
| `itemType` | string |
|
||||
|
||||
### `wealthbox_write_contact`
|
||||
|
||||
Create a new Wealthbox contact
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Wealthbox API |
|
||||
| `firstName` | string | Yes | The first name of the contact |
|
||||
| `lastName` | string | Yes | The last name of the contact |
|
||||
| `emailAddress` | string | No | The email address of the contact |
|
||||
| `backgroundInformation` | string | No | Background information about the contact |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `contact` | string |
|
||||
| `metadata` | string |
|
||||
| `itemType` | string |
|
||||
|
||||
### `wealthbox_read_task`
|
||||
|
||||
Read content from a Wealthbox task
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Wealthbox API |
|
||||
| `taskId` | string | No | The ID of the task to read \(optional\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `task` | string |
|
||||
| `metadata` | string |
|
||||
| `taskId` | string |
|
||||
| `itemType` | string |
|
||||
|
||||
### `wealthbox_write_task`
|
||||
|
||||
Create or update a Wealthbox task
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `accessToken` | string | Yes | The access token for the Wealthbox API |
|
||||
| `title` | string | Yes | The name/title of the task |
|
||||
| `dueDate` | string | Yes | The due date and time of the task |
|
||||
| `complete` | boolean | No | Whether the task is complete |
|
||||
| `category` | number | No | The category ID the task belongs to |
|
||||
| `contactId` | string | No | ID of contact to link to this task |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type |
|
||||
| --------- | ---- |
|
||||
| `task` | string |
|
||||
| `metadata` | string |
|
||||
| `taskId` | string |
|
||||
| `itemType` | string |
|
||||
|
||||
|
||||
|
||||
## Block Configuration
|
||||
|
||||
### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `operation` | string | Yes | Operation |
|
||||
|
||||
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `note` | any | note output from the block |
|
||||
| `notes` | any | notes output from the block |
|
||||
| `contact` | any | contact output from the block |
|
||||
| `contacts` | any | contacts output from the block |
|
||||
| `task` | any | task output from the block |
|
||||
| `tasks` | any | tasks output from the block |
|
||||
| `metadata` | json | metadata output from the block |
|
||||
| `success` | any | success output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Category: `tools`
|
||||
- Type: `wealthbox`
|
||||
@@ -79,10 +79,9 @@ Send WhatsApp messages
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `success` | boolean | success of the response |
|
||||
| ↳ `messageId` | any | messageId of the response |
|
||||
| ↳ `error` | any | error of the response |
|
||||
| `success` | boolean | success output from the block |
|
||||
| `messageId` | any | messageId output from the block |
|
||||
| `error` | any | error output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -145,15 +145,14 @@ Get user profile information
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `tweet` | json | tweet of the response |
|
||||
| ↳ `replies` | any | replies of the response |
|
||||
| ↳ `context` | any | context of the response |
|
||||
| ↳ `tweets` | json | tweets of the response |
|
||||
| ↳ `includes` | any | includes of the response |
|
||||
| ↳ `meta` | json | meta of the response |
|
||||
| ↳ `user` | json | user of the response |
|
||||
| ↳ `recentTweets` | any | recentTweets of the response |
|
||||
| `tweet` | json | tweet output from the block |
|
||||
| `replies` | any | replies output from the block |
|
||||
| `context` | any | context output from the block |
|
||||
| `tweets` | json | tweets output from the block |
|
||||
| `includes` | any | includes output from the block |
|
||||
| `meta` | json | meta output from the block |
|
||||
| `user` | json | user output from the block |
|
||||
| `recentTweets` | any | recentTweets output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -55,8 +55,8 @@ Search for videos on YouTube using the YouTube Data API.
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `query` | string | Yes | Search query for YouTube videos |
|
||||
| `apiKey` | string | Yes | YouTube API Key |
|
||||
| `maxResults` | number | No | Maximum number of videos to return |
|
||||
| `apiKey` | string | Yes | YouTube API Key |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -82,9 +82,8 @@ Search for videos on YouTube using the YouTube Data API.
|
||||
|
||||
| Output | Type | Description |
|
||||
| ------ | ---- | ----------- |
|
||||
| `response` | object | Output from response |
|
||||
| ↳ `items` | json | items of the response |
|
||||
| ↳ `totalResults` | number | totalResults of the response |
|
||||
| `items` | json | items output from the block |
|
||||
| `totalResults` | number | totalResults output from the block |
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"fumadocs-mdx": "^11.5.6",
|
||||
"fumadocs-ui": "^15.0.16",
|
||||
"lucide-react": "^0.511.0",
|
||||
"next": "^15.2.3",
|
||||
"next": "^15.3.2",
|
||||
"next-themes": "^0.4.6",
|
||||
"react": "19.1.0",
|
||||
"react-dom": "19.1.0",
|
||||
|
||||
@@ -15,5 +15,3 @@ ENCRYPTION_KEY=your_encryption_key # Use `openssl rand -hex 32` to generate
|
||||
# RESEND_API_KEY= # Uncomment and add your key from https://resend.com to send actual emails
|
||||
# If left commented out, emails will be logged to console instead
|
||||
|
||||
# Freestyle API Key (Required for sandboxed code execution for functions/custom-tools)
|
||||
# FREESTYLE_API_KEY= # Uncomment and add your key from https://docs.freestyle.sh/Getting-Started/run
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { z } from 'zod'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
|
||||
const emailSchema = z.string().email('Please enter a valid email')
|
||||
|
||||
export default function WaitlistForm() {
|
||||
const [email, setEmail] = useState('')
|
||||
const [isSubmitting, setIsSubmitting] = useState(false)
|
||||
const [status, setStatus] = useState<'idle' | 'success' | 'error' | 'exists' | 'ratelimited'>(
|
||||
'idle'
|
||||
)
|
||||
const [_errorMessage, setErrorMessage] = useState('')
|
||||
const [_retryAfter, setRetryAfter] = useState<number | null>(null)
|
||||
|
||||
const handleSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault()
|
||||
setStatus('idle')
|
||||
setErrorMessage('')
|
||||
setRetryAfter(null)
|
||||
|
||||
try {
|
||||
// Validate email
|
||||
emailSchema.parse(email)
|
||||
|
||||
setIsSubmitting(true)
|
||||
const response = await fetch('/api/waitlist', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ email }),
|
||||
})
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
// Check for rate limiting (429 status)
|
||||
if (response.status === 429) {
|
||||
setStatus('ratelimited')
|
||||
setErrorMessage(data.message || 'Too many attempts. Please try again later.')
|
||||
setRetryAfter(data.retryAfter || 60)
|
||||
}
|
||||
// Check if the error is because the email already exists
|
||||
else if (response.status === 400 && data.message?.includes('already exists')) {
|
||||
setStatus('exists')
|
||||
setErrorMessage('Already on the waitlist')
|
||||
} else {
|
||||
setStatus('error')
|
||||
setErrorMessage(data.message || 'Failed to join waitlist')
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
setStatus('success')
|
||||
setEmail('')
|
||||
} catch (_error) {
|
||||
setStatus('error')
|
||||
setErrorMessage('Please try again')
|
||||
} finally {
|
||||
setIsSubmitting(false)
|
||||
}
|
||||
}
|
||||
|
||||
const getButtonText = () => {
|
||||
if (isSubmitting) return 'Joining...'
|
||||
if (status === 'success') return 'Joined!'
|
||||
if (status === 'error') return 'Try again'
|
||||
if (status === 'exists') return 'Already joined'
|
||||
if (status === 'ratelimited') return 'Try again later'
|
||||
return 'Join waitlist'
|
||||
}
|
||||
|
||||
const getButtonStyle = () => {
|
||||
switch (status) {
|
||||
case 'success':
|
||||
return 'bg-green-500 hover:bg-green-600'
|
||||
case 'error':
|
||||
return 'bg-red-500 hover:bg-red-600'
|
||||
case 'exists':
|
||||
return 'bg-amber-500 hover:bg-amber-600'
|
||||
case 'ratelimited':
|
||||
return 'bg-gray-500 hover:bg-gray-600'
|
||||
default:
|
||||
return 'bg-white text-black hover:bg-gray-100'
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<form
|
||||
onSubmit={handleSubmit}
|
||||
className='mx-auto mt-8 flex max-w-lg flex-col items-center gap-3'
|
||||
>
|
||||
<div className='flex w-full gap-3'>
|
||||
<Input
|
||||
type='email'
|
||||
placeholder='you@example.com'
|
||||
className='h-[49px] flex-1 rounded-md border-white/20 bg-[#020817] text-sm focus:border-white/30 focus:ring-white/30 md:text-md lg:text-[16px]'
|
||||
value={email}
|
||||
onChange={(e) => setEmail(e.target.value)}
|
||||
disabled={isSubmitting || status === 'ratelimited'}
|
||||
/>
|
||||
<Button
|
||||
type='submit'
|
||||
className={`h-[48px] rounded-md px-8 text-sm md:text-md ${getButtonStyle()}`}
|
||||
disabled={isSubmitting || status === 'ratelimited'}
|
||||
>
|
||||
{getButtonText()}
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
)
|
||||
}
|
||||
@@ -93,7 +93,7 @@ export const sampleWorkflowState = {
|
||||
webhookPath: { id: 'webhookPath', type: 'short-input', value: '' },
|
||||
},
|
||||
outputs: {
|
||||
response: { type: { input: 'any' } },
|
||||
input: 'any',
|
||||
},
|
||||
enabled: true,
|
||||
horizontalHandles: true,
|
||||
@@ -111,7 +111,7 @@ export const sampleWorkflowState = {
|
||||
type: 'long-input',
|
||||
value: 'You are a helpful assistant',
|
||||
},
|
||||
context: { id: 'context', type: 'short-input', value: '<start.response.input>' },
|
||||
context: { id: 'context', type: 'short-input', value: '<start.input>' },
|
||||
model: { id: 'model', type: 'dropdown', value: 'gpt-4o' },
|
||||
apiKey: { id: 'apiKey', type: 'short-input', value: '{{OPENAI_API_KEY}}' },
|
||||
},
|
||||
@@ -138,6 +138,7 @@ export const sampleWorkflowState = {
|
||||
},
|
||||
],
|
||||
loops: {},
|
||||
parallels: {},
|
||||
lastSaved: Date.now(),
|
||||
isDeployed: false,
|
||||
}
|
||||
@@ -618,6 +619,13 @@ export function mockKnowledgeSchemas() {
|
||||
processingCompletedAt: 'processing_completed_at',
|
||||
processingError: 'processing_error',
|
||||
enabled: 'enabled',
|
||||
tag1: 'tag1',
|
||||
tag2: 'tag2',
|
||||
tag3: 'tag3',
|
||||
tag4: 'tag4',
|
||||
tag5: 'tag5',
|
||||
tag6: 'tag6',
|
||||
tag7: 'tag7',
|
||||
uploadedAt: 'uploaded_at',
|
||||
deletedAt: 'deleted_at',
|
||||
},
|
||||
@@ -630,6 +638,13 @@ export function mockKnowledgeSchemas() {
|
||||
embedding: 'embedding',
|
||||
tokenCount: 'token_count',
|
||||
characterCount: 'character_count',
|
||||
tag1: 'tag1',
|
||||
tag2: 'tag2',
|
||||
tag3: 'tag3',
|
||||
tag4: 'tag4',
|
||||
tag5: 'tag5',
|
||||
tag6: 'tag6',
|
||||
tag7: 'tag7',
|
||||
createdAt: 'created_at',
|
||||
},
|
||||
}))
|
||||
@@ -764,6 +779,20 @@ export function createStorageProviderMocks(options: StorageProviderMockOptions =
|
||||
bucket: 'test-s3-bucket',
|
||||
region: 'us-east-1',
|
||||
},
|
||||
S3_KB_CONFIG: {
|
||||
bucket: 'test-s3-kb-bucket',
|
||||
region: 'us-east-1',
|
||||
},
|
||||
BLOB_CONFIG: {
|
||||
accountName: 'testaccount',
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-container',
|
||||
},
|
||||
BLOB_KB_CONFIG: {
|
||||
accountName: 'testaccount',
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-kb-container',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@aws-sdk/client-s3', () => ({
|
||||
@@ -806,6 +835,11 @@ export function createStorageProviderMocks(options: StorageProviderMockOptions =
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-container',
|
||||
},
|
||||
BLOB_KB_CONFIG: {
|
||||
accountName: 'testaccount',
|
||||
accountKey: 'testkey',
|
||||
containerName: 'test-kb-container',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@azure/storage-blob', () => ({
|
||||
|
||||
@@ -14,6 +14,8 @@ const logger = createLogger('OAuthTokenAPI')
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
logger.info(`[${requestId}] OAuth token API POST request received`)
|
||||
|
||||
try {
|
||||
// Parse request body
|
||||
const body = await request.json()
|
||||
@@ -38,6 +40,7 @@ export async function POST(request: NextRequest) {
|
||||
const credential = await getCredential(requestId, credentialId, userId)
|
||||
|
||||
if (!credential) {
|
||||
logger.error(`[${requestId}] Credential not found: ${credentialId}`)
|
||||
return NextResponse.json({ error: 'Credential not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
@@ -45,7 +48,8 @@ export async function POST(request: NextRequest) {
|
||||
// Refresh the token if needed
|
||||
const { accessToken } = await refreshTokenIfNeeded(requestId, credential, credentialId)
|
||||
return NextResponse.json({ accessToken }, { status: 200 })
|
||||
} catch (_error) {
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Failed to refresh access token:`, error)
|
||||
return NextResponse.json({ error: 'Failed to refresh access token' }, { status: 401 })
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -89,6 +89,7 @@ export async function getOAuthToken(userId: string, providerId: string): Promise
|
||||
// Check if the token is expired and needs refreshing
|
||||
const now = new Date()
|
||||
const tokenExpiry = credential.accessTokenExpiresAt
|
||||
// Only refresh if we have an expiration time AND it's expired AND we have a refresh token
|
||||
const needsRefresh = tokenExpiry && tokenExpiry < now && !!credential.refreshToken
|
||||
|
||||
if (needsRefresh) {
|
||||
@@ -166,7 +167,9 @@ export async function refreshAccessTokenIfNeeded(
|
||||
// Check if we need to refresh the token
|
||||
const expiresAt = credential.accessTokenExpiresAt
|
||||
const now = new Date()
|
||||
const needsRefresh = !expiresAt || expiresAt <= now
|
||||
// Only refresh if we have an expiration time AND it's expired
|
||||
// If no expiration time is set (newly created credentials), assume token is valid
|
||||
const needsRefresh = expiresAt && expiresAt <= now
|
||||
|
||||
const accessToken = credential.accessToken
|
||||
|
||||
@@ -233,7 +236,9 @@ export async function refreshTokenIfNeeded(
|
||||
// Check if we need to refresh the token
|
||||
const expiresAt = credential.accessTokenExpiresAt
|
||||
const now = new Date()
|
||||
const needsRefresh = !expiresAt || expiresAt <= now
|
||||
// Only refresh if we have an expiration time AND it's expired
|
||||
// If no expiration time is set (newly created credentials), assume token is valid
|
||||
const needsRefresh = expiresAt && expiresAt <= now
|
||||
|
||||
// If token is still valid, return it directly
|
||||
if (!needsRefresh || !credential.refreshToken) {
|
||||
|
||||
153
apps/sim/app/api/auth/oauth/wealthbox/item/route.ts
Normal file
153
apps/sim/app/api/auth/oauth/wealthbox/item/route.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { account } from '@/db/schema'
|
||||
import { refreshAccessTokenIfNeeded } from '../../utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('WealthboxItemAPI')
|
||||
|
||||
/**
|
||||
* Get a single item (note, contact, task) from Wealthbox
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
try {
|
||||
// Get the session
|
||||
const session = await getSession()
|
||||
|
||||
// Check if the user is authenticated
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthenticated request rejected`)
|
||||
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Get parameters from query
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const itemId = searchParams.get('itemId')
|
||||
const type = searchParams.get('type') || 'contact'
|
||||
|
||||
if (!credentialId || !itemId) {
|
||||
logger.warn(`[${requestId}] Missing required parameters`, { credentialId, itemId })
|
||||
return NextResponse.json({ error: 'Credential ID and Item ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Validate item type - only handle contacts now
|
||||
if (type !== 'contact') {
|
||||
logger.warn(`[${requestId}] Invalid item type: ${type}`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid item type. Only contact is supported.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Get the credential from the database
|
||||
const credentials = await db.select().from(account).where(eq(account.id, credentialId)).limit(1)
|
||||
|
||||
if (!credentials.length) {
|
||||
logger.warn(`[${requestId}] Credential not found`, { credentialId })
|
||||
return NextResponse.json({ error: 'Credential not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Check if the credential belongs to the user
|
||||
if (credential.userId !== session.user.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized credential access attempt`, {
|
||||
credentialUserId: credential.userId,
|
||||
requestUserId: session.user.id,
|
||||
})
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Refresh access token if needed
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, session.user.id, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
logger.error(`[${requestId}] Failed to obtain valid access token`)
|
||||
return NextResponse.json({ error: 'Failed to obtain valid access token' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Determine the endpoint based on item type - only contacts
|
||||
const endpoints = {
|
||||
contact: 'contacts',
|
||||
}
|
||||
const endpoint = endpoints[type as keyof typeof endpoints]
|
||||
|
||||
logger.info(`[${requestId}] Fetching ${type} ${itemId} from Wealthbox`)
|
||||
|
||||
// Make request to Wealthbox API
|
||||
const response = await fetch(`https://api.crmworkspace.com/v1/${endpoint}/${itemId}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
logger.error(
|
||||
`[${requestId}] Wealthbox API error: ${response.status} ${response.statusText}`,
|
||||
{
|
||||
error: errorText,
|
||||
endpoint,
|
||||
itemId,
|
||||
}
|
||||
)
|
||||
|
||||
if (response.status === 404) {
|
||||
return NextResponse.json({ error: 'Item not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{ error: `Failed to fetch ${type} from Wealthbox` },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
logger.info(`[${requestId}] Wealthbox API response structure`, {
|
||||
type,
|
||||
dataKeys: Object.keys(data || {}),
|
||||
hasContacts: !!data.contacts,
|
||||
totalCount: data.meta?.total_count,
|
||||
})
|
||||
|
||||
// Transform the response to match our expected format
|
||||
let items: any[] = []
|
||||
|
||||
if (type === 'contact') {
|
||||
// Handle single contact response - API returns contact data directly when fetching by ID
|
||||
if (data?.id) {
|
||||
// Single contact response
|
||||
const item = {
|
||||
id: data.id?.toString() || '',
|
||||
name: `${data.first_name || ''} ${data.last_name || ''}`.trim() || `Contact ${data.id}`,
|
||||
type: 'contact',
|
||||
content: data.background_info || '',
|
||||
createdAt: data.created_at,
|
||||
updatedAt: data.updated_at,
|
||||
}
|
||||
items = [item]
|
||||
} else {
|
||||
logger.warn(`[${requestId}] Unexpected contact response format`, { data })
|
||||
items = []
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Successfully fetched ${items.length} ${type}s from Wealthbox (total: ${data.meta?.total_count || 'unknown'})`
|
||||
)
|
||||
|
||||
return NextResponse.json({ item: items[0] }, { status: 200 })
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error fetching Wealthbox item`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
168
apps/sim/app/api/auth/oauth/wealthbox/items/route.ts
Normal file
168
apps/sim/app/api/auth/oauth/wealthbox/items/route.ts
Normal file
@@ -0,0 +1,168 @@
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { account } from '@/db/schema'
|
||||
import { refreshAccessTokenIfNeeded } from '../../utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('WealthboxItemsAPI')
|
||||
|
||||
/**
|
||||
* Get items (notes, contacts, tasks) from Wealthbox
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
|
||||
try {
|
||||
// Get the session
|
||||
const session = await getSession()
|
||||
|
||||
// Check if the user is authenticated
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthenticated request rejected`)
|
||||
return NextResponse.json({ error: 'User not authenticated' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Get parameters from query
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const type = searchParams.get('type') || 'contact'
|
||||
const query = searchParams.get('query') || ''
|
||||
|
||||
if (!credentialId) {
|
||||
logger.warn(`[${requestId}] Missing credential ID`)
|
||||
return NextResponse.json({ error: 'Credential ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Validate item type - only handle contacts now
|
||||
if (type !== 'contact') {
|
||||
logger.warn(`[${requestId}] Invalid item type: ${type}`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid item type. Only contact is supported.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Get the credential from the database
|
||||
const credentials = await db.select().from(account).where(eq(account.id, credentialId)).limit(1)
|
||||
|
||||
if (!credentials.length) {
|
||||
logger.warn(`[${requestId}] Credential not found`, { credentialId })
|
||||
return NextResponse.json({ error: 'Credential not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const credential = credentials[0]
|
||||
|
||||
// Check if the credential belongs to the user
|
||||
if (credential.userId !== session.user.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized credential access attempt`, {
|
||||
credentialUserId: credential.userId,
|
||||
requestUserId: session.user.id,
|
||||
})
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Refresh access token if needed
|
||||
const accessToken = await refreshAccessTokenIfNeeded(credentialId, session.user.id, requestId)
|
||||
|
||||
if (!accessToken) {
|
||||
logger.error(`[${requestId}] Failed to obtain valid access token`)
|
||||
return NextResponse.json({ error: 'Failed to obtain valid access token' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Use correct endpoints based on documentation - only for contacts
|
||||
const endpoints = {
|
||||
contact: 'contacts',
|
||||
}
|
||||
const endpoint = endpoints[type as keyof typeof endpoints]
|
||||
|
||||
// Build URL - using correct API base URL
|
||||
const url = new URL(`https://api.crmworkspace.com/v1/${endpoint}`)
|
||||
|
||||
logger.info(`[${requestId}] Fetching ${type}s from Wealthbox`, {
|
||||
endpoint,
|
||||
url: url.toString(),
|
||||
hasQuery: !!query.trim(),
|
||||
})
|
||||
|
||||
// Make request to Wealthbox API
|
||||
const response = await fetch(url.toString(), {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
logger.error(
|
||||
`[${requestId}] Wealthbox API error: ${response.status} ${response.statusText}`,
|
||||
{
|
||||
error: errorText,
|
||||
endpoint,
|
||||
url: url.toString(),
|
||||
}
|
||||
)
|
||||
return NextResponse.json(
|
||||
{ error: `Failed to fetch ${type}s from Wealthbox` },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
logger.info(`[${requestId}] Wealthbox API response structure`, {
|
||||
type,
|
||||
status: response.status,
|
||||
dataKeys: Object.keys(data || {}),
|
||||
hasContacts: !!data.contacts,
|
||||
dataStructure: typeof data === 'object' ? Object.keys(data) : 'not an object',
|
||||
})
|
||||
|
||||
// Transform the response based on type and correct response format
|
||||
let items: any[] = []
|
||||
|
||||
if (type === 'contact') {
|
||||
const contacts = data.contacts || []
|
||||
if (!Array.isArray(contacts)) {
|
||||
logger.warn(`[${requestId}] Contacts is not an array`, {
|
||||
contacts,
|
||||
dataType: typeof contacts,
|
||||
})
|
||||
return NextResponse.json({ items: [] }, { status: 200 })
|
||||
}
|
||||
|
||||
items = contacts.map((item: any) => ({
|
||||
id: item.id?.toString() || '',
|
||||
name: `${item.first_name || ''} ${item.last_name || ''}`.trim() || `Contact ${item.id}`,
|
||||
type: 'contact',
|
||||
content: item.background_information || '',
|
||||
createdAt: item.created_at,
|
||||
updatedAt: item.updated_at,
|
||||
}))
|
||||
}
|
||||
|
||||
// Apply client-side filtering if query is provided
|
||||
if (query.trim()) {
|
||||
const searchTerm = query.trim().toLowerCase()
|
||||
items = items.filter(
|
||||
(item) =>
|
||||
item.name.toLowerCase().includes(searchTerm) ||
|
||||
item.content.toLowerCase().includes(searchTerm)
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Successfully fetched ${items.length} ${type}s from Wealthbox`, {
|
||||
totalItems: items.length,
|
||||
hasSearchQuery: !!query.trim(),
|
||||
})
|
||||
|
||||
return NextResponse.json({ items }, { status: 200 })
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error fetching Wealthbox items`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
109
apps/sim/app/api/billing/daily/route.ts
Normal file
109
apps/sim/app/api/billing/daily/route.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { verifyCronAuth } from '@/lib/auth/internal'
|
||||
import { processDailyBillingCheck } from '@/lib/billing/core/billing'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('DailyBillingCron')
|
||||
|
||||
/**
|
||||
* Daily billing CRON job endpoint that checks individual billing periods
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const authError = verifyCronAuth(request, 'daily billing check')
|
||||
if (authError) {
|
||||
return authError
|
||||
}
|
||||
|
||||
logger.info('Starting daily billing check cron job')
|
||||
|
||||
const startTime = Date.now()
|
||||
|
||||
// Process overage billing for users and organizations with periods ending today
|
||||
const result = await processDailyBillingCheck()
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
|
||||
if (result.success) {
|
||||
logger.info('Daily billing check completed successfully', {
|
||||
processedUsers: result.processedUsers,
|
||||
processedOrganizations: result.processedOrganizations,
|
||||
totalChargedAmount: result.totalChargedAmount,
|
||||
duration: `${duration}ms`,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
summary: {
|
||||
processedUsers: result.processedUsers,
|
||||
processedOrganizations: result.processedOrganizations,
|
||||
totalChargedAmount: result.totalChargedAmount,
|
||||
duration: `${duration}ms`,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
logger.error('Daily billing check completed with errors', {
|
||||
processedUsers: result.processedUsers,
|
||||
processedOrganizations: result.processedOrganizations,
|
||||
totalChargedAmount: result.totalChargedAmount,
|
||||
errorCount: result.errors.length,
|
||||
errors: result.errors,
|
||||
duration: `${duration}ms`,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
summary: {
|
||||
processedUsers: result.processedUsers,
|
||||
processedOrganizations: result.processedOrganizations,
|
||||
totalChargedAmount: result.totalChargedAmount,
|
||||
errorCount: result.errors.length,
|
||||
duration: `${duration}ms`,
|
||||
},
|
||||
errors: result.errors,
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
} catch (error) {
|
||||
logger.error('Fatal error in monthly billing cron job', { error })
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Internal server error during daily billing check',
|
||||
details: error instanceof Error ? error.message : 'Unknown error',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET endpoint for manual testing and health checks
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
try {
|
||||
const authError = verifyCronAuth(request, 'daily billing check health check')
|
||||
if (authError) {
|
||||
return authError
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
status: 'ready',
|
||||
message:
|
||||
'Daily billing check cron job is ready to process users and organizations with periods ending today',
|
||||
currentDate: new Date().toISOString().split('T')[0],
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error in billing health check', { error })
|
||||
return NextResponse.json(
|
||||
{
|
||||
status: 'error',
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
116
apps/sim/app/api/billing/route.ts
Normal file
116
apps/sim/app/api/billing/route.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { getSimplifiedBillingSummary } from '@/lib/billing/core/billing'
|
||||
import { getOrganizationBillingData } from '@/lib/billing/core/organization-billing'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { member } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('UnifiedBillingAPI')
|
||||
|
||||
/**
|
||||
* Unified Billing Endpoint
|
||||
*/
|
||||
export async function GET(request: NextRequest) {
|
||||
const session = await getSession()
|
||||
|
||||
try {
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(request.url)
|
||||
const context = searchParams.get('context') || 'user'
|
||||
const contextId = searchParams.get('id')
|
||||
|
||||
// Validate context parameter
|
||||
if (!['user', 'organization'].includes(context)) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid context. Must be "user" or "organization"' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// For organization context, require contextId
|
||||
if (context === 'organization' && !contextId) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Organization ID is required when context=organization' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
let billingData
|
||||
|
||||
if (context === 'user') {
|
||||
// Get user billing (may include organization if they're part of one)
|
||||
billingData = await getSimplifiedBillingSummary(session.user.id, contextId || undefined)
|
||||
} else {
|
||||
// Get user role in organization for permission checks first
|
||||
const memberRecord = await db
|
||||
.select({ role: member.role })
|
||||
.from(member)
|
||||
.where(and(eq(member.organizationId, contextId!), eq(member.userId, session.user.id)))
|
||||
.limit(1)
|
||||
|
||||
if (memberRecord.length === 0) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Access denied - not a member of this organization' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Get organization-specific billing
|
||||
const rawBillingData = await getOrganizationBillingData(contextId!)
|
||||
|
||||
if (!rawBillingData) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Organization not found or access denied' },
|
||||
{ status: 404 }
|
||||
)
|
||||
}
|
||||
|
||||
// Transform data to match component expectations
|
||||
billingData = {
|
||||
organizationId: rawBillingData.organizationId,
|
||||
organizationName: rawBillingData.organizationName,
|
||||
subscriptionPlan: rawBillingData.subscriptionPlan,
|
||||
subscriptionStatus: rawBillingData.subscriptionStatus,
|
||||
totalSeats: rawBillingData.totalSeats,
|
||||
usedSeats: rawBillingData.usedSeats,
|
||||
totalCurrentUsage: rawBillingData.totalCurrentUsage,
|
||||
totalUsageLimit: rawBillingData.totalUsageLimit,
|
||||
averageUsagePerMember: rawBillingData.averageUsagePerMember,
|
||||
billingPeriodStart: rawBillingData.billingPeriodStart?.toISOString() || null,
|
||||
billingPeriodEnd: rawBillingData.billingPeriodEnd?.toISOString() || null,
|
||||
members: rawBillingData.members.map((member) => ({
|
||||
...member,
|
||||
joinedAt: member.joinedAt.toISOString(),
|
||||
lastActive: member.lastActive?.toISOString() || null,
|
||||
})),
|
||||
}
|
||||
|
||||
const userRole = memberRecord[0].role
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
context,
|
||||
data: billingData,
|
||||
userRole,
|
||||
})
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
context,
|
||||
data: billingData,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to get billing data', {
|
||||
userId: session?.user?.id,
|
||||
error,
|
||||
})
|
||||
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
116
apps/sim/app/api/billing/webhooks/stripe/route.ts
Normal file
116
apps/sim/app/api/billing/webhooks/stripe/route.ts
Normal file
@@ -0,0 +1,116 @@
|
||||
import { headers } from 'next/headers'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import type Stripe from 'stripe'
|
||||
import { requireStripeClient } from '@/lib/billing/stripe-client'
|
||||
import { handleInvoiceWebhook } from '@/lib/billing/webhooks/stripe-invoice-webhooks'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('StripeInvoiceWebhook')
|
||||
|
||||
/**
|
||||
* Stripe billing webhook endpoint for invoice-related events
|
||||
* Endpoint: /api/billing/webhooks/stripe
|
||||
* Handles: invoice.payment_succeeded, invoice.payment_failed, invoice.finalized
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
const body = await request.text()
|
||||
const headersList = await headers()
|
||||
const signature = headersList.get('stripe-signature')
|
||||
|
||||
if (!signature) {
|
||||
logger.error('Missing Stripe signature header')
|
||||
return NextResponse.json({ error: 'Missing Stripe signature' }, { status: 400 })
|
||||
}
|
||||
|
||||
if (!env.STRIPE_BILLING_WEBHOOK_SECRET) {
|
||||
logger.error('Missing Stripe webhook secret configuration')
|
||||
return NextResponse.json({ error: 'Webhook secret not configured' }, { status: 500 })
|
||||
}
|
||||
|
||||
// Check if Stripe client is available
|
||||
let stripe
|
||||
try {
|
||||
stripe = requireStripeClient()
|
||||
} catch (stripeError) {
|
||||
logger.error('Stripe client not available for webhook processing', {
|
||||
error: stripeError,
|
||||
})
|
||||
return NextResponse.json({ error: 'Stripe client not configured' }, { status: 500 })
|
||||
}
|
||||
|
||||
// Verify webhook signature
|
||||
let event: Stripe.Event
|
||||
try {
|
||||
event = stripe.webhooks.constructEvent(body, signature, env.STRIPE_BILLING_WEBHOOK_SECRET)
|
||||
} catch (signatureError) {
|
||||
logger.error('Invalid Stripe webhook signature', {
|
||||
error: signatureError,
|
||||
signature,
|
||||
})
|
||||
return NextResponse.json({ error: 'Invalid signature' }, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info('Received Stripe invoice webhook', {
|
||||
eventId: event.id,
|
||||
eventType: event.type,
|
||||
})
|
||||
|
||||
// Handle specific invoice events
|
||||
const supportedEvents = [
|
||||
'invoice.payment_succeeded',
|
||||
'invoice.payment_failed',
|
||||
'invoice.finalized',
|
||||
]
|
||||
|
||||
if (supportedEvents.includes(event.type)) {
|
||||
try {
|
||||
await handleInvoiceWebhook(event)
|
||||
|
||||
logger.info('Successfully processed invoice webhook', {
|
||||
eventId: event.id,
|
||||
eventType: event.type,
|
||||
})
|
||||
|
||||
return NextResponse.json({ received: true })
|
||||
} catch (processingError) {
|
||||
logger.error('Failed to process invoice webhook', {
|
||||
eventId: event.id,
|
||||
eventType: event.type,
|
||||
error: processingError,
|
||||
})
|
||||
|
||||
// Return 500 to tell Stripe to retry the webhook
|
||||
return NextResponse.json({ error: 'Failed to process webhook' }, { status: 500 })
|
||||
}
|
||||
} else {
|
||||
// Not a supported invoice event, ignore
|
||||
logger.info('Ignoring unsupported webhook event', {
|
||||
eventId: event.id,
|
||||
eventType: event.type,
|
||||
supportedEvents,
|
||||
})
|
||||
|
||||
return NextResponse.json({ received: true })
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Fatal error in invoice webhook handler', {
|
||||
error,
|
||||
url: request.url,
|
||||
})
|
||||
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET endpoint for webhook health checks
|
||||
*/
|
||||
export async function GET() {
|
||||
return NextResponse.json({
|
||||
status: 'healthy',
|
||||
webhook: 'stripe-invoices',
|
||||
events: ['invoice.payment_succeeded', 'invoice.payment_failed', 'invoice.finalized'],
|
||||
})
|
||||
}
|
||||
@@ -1,8 +1,7 @@
|
||||
import { render } from '@react-email/render'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import OTPVerificationEmail from '@/components/emails/otp-verification-email'
|
||||
import { renderOTPEmail } from '@/components/emails/render-email'
|
||||
import { sendEmail } from '@/lib/email/mailer'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getRedisClient, markMessageAsProcessed, releaseLock } from '@/lib/redis'
|
||||
@@ -158,7 +157,6 @@ export async function POST(
|
||||
? deployment.allowedEmails
|
||||
: []
|
||||
|
||||
// Check if the email is allowed
|
||||
const isEmailAllowed =
|
||||
allowedEmails.includes(email) ||
|
||||
allowedEmails.some((allowed: string) => {
|
||||
@@ -176,24 +174,17 @@ export async function POST(
|
||||
)
|
||||
}
|
||||
|
||||
// Generate OTP
|
||||
const otp = generateOTP()
|
||||
|
||||
// Store OTP in Redis - AWAIT THIS BEFORE RETURNING RESPONSE
|
||||
await storeOTP(email, deployment.id, otp)
|
||||
|
||||
// Create the email
|
||||
const emailContent = OTPVerificationEmail({
|
||||
const emailHtml = await renderOTPEmail(
|
||||
otp,
|
||||
email,
|
||||
type: 'chat-access',
|
||||
chatTitle: deployment.title || 'Chat',
|
||||
})
|
||||
'email-verification',
|
||||
deployment.title || 'Chat'
|
||||
)
|
||||
|
||||
// await the render function
|
||||
const emailHtml = await render(emailContent)
|
||||
|
||||
// MAKE SURE TO AWAIT THE EMAIL SENDING
|
||||
const emailResult = await sendEmail({
|
||||
to: email,
|
||||
subject: `Verification code for ${deployment.title || 'Chat'}`,
|
||||
|
||||
@@ -241,7 +241,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
})
|
||||
|
||||
describe('POST endpoint', () => {
|
||||
it('should handle authentication requests without messages', async () => {
|
||||
it('should handle authentication requests without input', async () => {
|
||||
const req = createMockRequest('POST', { password: 'test-password' })
|
||||
const params = Promise.resolve({ subdomain: 'password-protected-chat' })
|
||||
|
||||
@@ -257,7 +257,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
expect(mockSetChatAuthCookie).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should return 400 for requests without message', async () => {
|
||||
it('should return 400 for requests without input', async () => {
|
||||
const req = createMockRequest('POST', {})
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
|
||||
@@ -269,7 +269,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error')
|
||||
expect(data).toHaveProperty('message', 'No message provided')
|
||||
expect(data).toHaveProperty('message', 'No input provided')
|
||||
})
|
||||
|
||||
it('should return 401 for unauthorized access', async () => {
|
||||
@@ -279,7 +279,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
error: 'Authentication required',
|
||||
}))
|
||||
|
||||
const req = createMockRequest('POST', { message: 'Hello' })
|
||||
const req = createMockRequest('POST', { input: 'Hello' })
|
||||
const params = Promise.resolve({ subdomain: 'protected-chat' })
|
||||
|
||||
const { POST } = await import('./route')
|
||||
@@ -342,7 +342,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
}
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', { message: 'Hello' })
|
||||
const req = createMockRequest('POST', { input: 'Hello' })
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
|
||||
const { POST } = await import('./route')
|
||||
@@ -357,7 +357,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
})
|
||||
|
||||
it('should return streaming response for valid chat messages', async () => {
|
||||
const req = createMockRequest('POST', { message: 'Hello world', conversationId: 'conv-123' })
|
||||
const req = createMockRequest('POST', { input: 'Hello world', conversationId: 'conv-123' })
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
|
||||
const { POST } = await import('./route')
|
||||
@@ -374,7 +374,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
})
|
||||
|
||||
it('should handle streaming response body correctly', async () => {
|
||||
const req = createMockRequest('POST', { message: 'Hello world' })
|
||||
const req = createMockRequest('POST', { input: 'Hello world' })
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
|
||||
const { POST } = await import('./route')
|
||||
@@ -404,7 +404,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
throw new Error('Execution failed')
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', { message: 'Trigger error' })
|
||||
const req = createMockRequest('POST', { input: 'Trigger error' })
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
|
||||
const { POST } = await import('./route')
|
||||
@@ -444,7 +444,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
|
||||
it('should pass conversationId to executeWorkflowForChat when provided', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
message: 'Hello world',
|
||||
input: 'Hello world',
|
||||
conversationId: 'test-conversation-123',
|
||||
})
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
@@ -461,7 +461,7 @@ describe('Chat Subdomain API Route', () => {
|
||||
})
|
||||
|
||||
it('should handle missing conversationId gracefully', async () => {
|
||||
const req = createMockRequest('POST', { message: 'Hello world' })
|
||||
const req = createMockRequest('POST', { input: 'Hello world' })
|
||||
const params = Promise.resolve({ subdomain: 'test-chat' })
|
||||
|
||||
const { POST } = await import('./route')
|
||||
|
||||
@@ -72,11 +72,11 @@ export async function POST(
|
||||
}
|
||||
|
||||
// Use the already parsed body
|
||||
const { message, password, email, conversationId } = parsedBody
|
||||
const { input, password, email, conversationId } = parsedBody
|
||||
|
||||
// If this is an authentication request (has password or email but no message),
|
||||
// If this is an authentication request (has password or email but no input),
|
||||
// set auth cookie and return success
|
||||
if ((password || email) && !message) {
|
||||
if ((password || email) && !input) {
|
||||
const response = addCorsHeaders(createSuccessResponse({ authenticated: true }), request)
|
||||
|
||||
// Set authentication cookie
|
||||
@@ -86,8 +86,8 @@ export async function POST(
|
||||
}
|
||||
|
||||
// For chat messages, create regular response
|
||||
if (!message) {
|
||||
return addCorsHeaders(createErrorResponse('No message provided', 400), request)
|
||||
if (!input) {
|
||||
return addCorsHeaders(createErrorResponse('No input provided', 400), request)
|
||||
}
|
||||
|
||||
// Get the workflow for this chat
|
||||
@@ -105,8 +105,8 @@ export async function POST(
|
||||
}
|
||||
|
||||
try {
|
||||
// Execute workflow with structured input (message + conversationId for context)
|
||||
const result = await executeWorkflowForChat(deployment.id, message, conversationId)
|
||||
// Execute workflow with structured input (input + conversationId for context)
|
||||
const result = await executeWorkflowForChat(deployment.id, input, conversationId)
|
||||
|
||||
// The result is always a ReadableStream that we can pipe to the client
|
||||
const streamResponse = new NextResponse(result, {
|
||||
@@ -194,6 +194,7 @@ export async function GET(
|
||||
description: deployment.description,
|
||||
customizations: deployment.customizations,
|
||||
authType: deployment.authType,
|
||||
outputConfigs: deployment.outputConfigs,
|
||||
}),
|
||||
request
|
||||
)
|
||||
@@ -219,6 +220,7 @@ export async function GET(
|
||||
description: deployment.description,
|
||||
customizations: deployment.customizations,
|
||||
authType: deployment.authType,
|
||||
outputConfigs: deployment.outputConfigs,
|
||||
}),
|
||||
request
|
||||
)
|
||||
|
||||
@@ -2,7 +2,7 @@ import { and, eq } from 'drizzle-orm'
|
||||
import type { NextRequest } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { env } from '@/lib/env'
|
||||
import { isDev } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getBaseDomain } from '@/lib/urls/utils'
|
||||
import { encryptSecret } from '@/lib/utils'
|
||||
@@ -71,9 +71,7 @@ export async function GET(_request: NextRequest, { params }: { params: Promise<{
|
||||
// Create a new result object without the password
|
||||
const { password, ...safeData } = chatInstance[0]
|
||||
|
||||
const isDevelopment = env.NODE_ENV === 'development'
|
||||
|
||||
const chatUrl = isDevelopment
|
||||
const chatUrl = isDev
|
||||
? `http://${chatInstance[0].subdomain}.${getBaseDomain()}`
|
||||
: `https://${chatInstance[0].subdomain}.simstudio.ai`
|
||||
|
||||
@@ -221,9 +219,7 @@ export async function PATCH(request: NextRequest, { params }: { params: Promise<
|
||||
|
||||
const updatedSubdomain = subdomain || existingChat[0].subdomain
|
||||
|
||||
const isDevelopment = env.NODE_ENV === 'development'
|
||||
|
||||
const chatUrl = isDevelopment
|
||||
const chatUrl = isDev
|
||||
? `http://${updatedSubdomain}.${getBaseDomain()}`
|
||||
: `https://${updatedSubdomain}.simstudio.ai`
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { env } from '@/lib/env'
|
||||
import { isDev } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { encryptSecret } from '@/lib/utils'
|
||||
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
|
||||
@@ -169,11 +170,10 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
// Return successful response with chat URL
|
||||
// Check if we're in development or production
|
||||
const isDevelopment = env.NODE_ENV === 'development'
|
||||
const baseUrl = env.NEXT_PUBLIC_APP_URL || 'http://localhost:3000'
|
||||
|
||||
let chatUrl: string
|
||||
if (isDevelopment) {
|
||||
if (isDev) {
|
||||
try {
|
||||
const url = new URL(baseUrl)
|
||||
chatUrl = `${url.protocol}//${subdomain}.${url.host}`
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { env } from '@/lib/env'
|
||||
import { isDev } from '@/lib/environment'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { persistExecutionLogs } from '@/lib/logs/execution-logger'
|
||||
import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session'
|
||||
import { buildTraceSpans } from '@/lib/logs/trace-spans'
|
||||
import { processStreamingBlockLogs } from '@/lib/tokenization'
|
||||
import { decryptSecret } from '@/lib/utils'
|
||||
import { db } from '@/db'
|
||||
import { chat, environment as envTable, userStats, workflow } from '@/db/schema'
|
||||
@@ -19,7 +20,6 @@ declare global {
|
||||
}
|
||||
|
||||
const logger = createLogger('ChatAuthUtils')
|
||||
const isDevelopment = env.NODE_ENV === 'development'
|
||||
|
||||
export const encryptAuthToken = (subdomainId: string, type: string): string => {
|
||||
return Buffer.from(`${subdomainId}:${type}:${Date.now()}`).toString('base64')
|
||||
@@ -62,11 +62,11 @@ export const setChatAuthCookie = (
|
||||
name: `chat_auth_${subdomainId}`,
|
||||
value: token,
|
||||
httpOnly: true,
|
||||
secure: !isDevelopment,
|
||||
secure: !isDev,
|
||||
sameSite: 'lax',
|
||||
path: '/',
|
||||
// Using subdomain for the domain in production
|
||||
domain: isDevelopment ? undefined : '.simstudio.ai',
|
||||
domain: isDev ? undefined : '.simstudio.ai',
|
||||
maxAge: 60 * 60 * 24, // 24 hours
|
||||
})
|
||||
}
|
||||
@@ -77,7 +77,7 @@ export function addCorsHeaders(response: NextResponse, request: NextRequest) {
|
||||
const origin = request.headers.get('origin') || ''
|
||||
|
||||
// In development, allow any localhost subdomain
|
||||
if (isDevelopment && origin.includes('localhost')) {
|
||||
if (isDev && origin.includes('localhost')) {
|
||||
response.headers.set('Access-Control-Allow-Origin', origin)
|
||||
response.headers.set('Access-Control-Allow-Credentials', 'true')
|
||||
response.headers.set('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
|
||||
@@ -128,10 +128,10 @@ export async function validateChatAuth(
|
||||
return { authorized: false, error: 'Password is required' }
|
||||
}
|
||||
|
||||
const { password, message } = parsedBody
|
||||
const { password, input } = parsedBody
|
||||
|
||||
// If this is a chat message, not an auth attempt
|
||||
if (message && !password) {
|
||||
if (input && !password) {
|
||||
return { authorized: false, error: 'auth_required_password' }
|
||||
}
|
||||
|
||||
@@ -170,10 +170,10 @@ export async function validateChatAuth(
|
||||
return { authorized: false, error: 'Email is required' }
|
||||
}
|
||||
|
||||
const { email, message } = parsedBody
|
||||
const { email, input } = parsedBody
|
||||
|
||||
// If this is a chat message, not an auth attempt
|
||||
if (message && !email) {
|
||||
if (input && !email) {
|
||||
return { authorized: false, error: 'auth_required_email' }
|
||||
}
|
||||
|
||||
@@ -211,17 +211,17 @@ export async function validateChatAuth(
|
||||
/**
|
||||
* Executes a workflow for a chat request and returns the formatted output.
|
||||
*
|
||||
* When workflows reference <start.response.input>, they receive a structured JSON
|
||||
* containing both the message and conversationId for maintaining chat context.
|
||||
* When workflows reference <start.input>, they receive the input directly.
|
||||
* The conversationId is available at <start.conversationId> for maintaining chat context.
|
||||
*
|
||||
* @param chatId - Chat deployment identifier
|
||||
* @param message - User's chat message
|
||||
* @param input - User's chat input
|
||||
* @param conversationId - Optional ID for maintaining conversation context
|
||||
* @returns Workflow execution result formatted for the chat interface
|
||||
*/
|
||||
export async function executeWorkflowForChat(
|
||||
chatId: string,
|
||||
message: string,
|
||||
input: string,
|
||||
conversationId?: string
|
||||
): Promise<any> {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
@@ -252,32 +252,42 @@ export async function executeWorkflowForChat(
|
||||
|
||||
const deployment = deploymentResult[0]
|
||||
const workflowId = deployment.workflowId
|
||||
const executionId = uuidv4()
|
||||
|
||||
// Set up enhanced logging for chat execution
|
||||
const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'chat', requestId)
|
||||
|
||||
// Check for multi-output configuration in customizations
|
||||
const customizations = (deployment.customizations || {}) as Record<string, any>
|
||||
let outputBlockIds: string[] = []
|
||||
let outputPaths: string[] = []
|
||||
|
||||
// Extract output configs from the new schema format
|
||||
let selectedOutputIds: string[] = []
|
||||
if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
|
||||
// Extract block IDs and paths from the new outputConfigs array format
|
||||
// Extract output IDs in the format expected by the streaming processor
|
||||
logger.debug(
|
||||
`[${requestId}] Found ${deployment.outputConfigs.length} output configs in deployment`
|
||||
)
|
||||
deployment.outputConfigs.forEach((config) => {
|
||||
|
||||
selectedOutputIds = deployment.outputConfigs.map((config) => {
|
||||
const outputId = config.path
|
||||
? `${config.blockId}_${config.path}`
|
||||
: `${config.blockId}.content`
|
||||
|
||||
logger.debug(
|
||||
`[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'none'}`
|
||||
`[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'content'} -> outputId=${outputId}`
|
||||
)
|
||||
|
||||
return outputId
|
||||
})
|
||||
|
||||
// Also extract block IDs for legacy compatibility
|
||||
outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
|
||||
outputPaths = deployment.outputConfigs.map((config) => config.path || '')
|
||||
} else {
|
||||
// Use customizations as fallback
|
||||
outputBlockIds = Array.isArray(customizations.outputBlockIds)
|
||||
? customizations.outputBlockIds
|
||||
: []
|
||||
outputPaths = Array.isArray(customizations.outputPaths) ? customizations.outputPaths : []
|
||||
}
|
||||
|
||||
// Fall back to customizations if we still have no outputs
|
||||
@@ -287,10 +297,11 @@ export async function executeWorkflowForChat(
|
||||
customizations.outputBlockIds.length > 0
|
||||
) {
|
||||
outputBlockIds = customizations.outputBlockIds
|
||||
outputPaths = customizations.outputPaths || new Array(outputBlockIds.length).fill('')
|
||||
}
|
||||
|
||||
logger.debug(`[${requestId}] Using ${outputBlockIds.length} output blocks for extraction`)
|
||||
logger.debug(
|
||||
`[${requestId}] Using ${outputBlockIds.length} output blocks and ${selectedOutputIds.length} selected output IDs for extraction`
|
||||
)
|
||||
|
||||
// Find the workflow (deployedState is NOT deprecated - needed for chat execution)
|
||||
const workflowResult = await db
|
||||
@@ -407,6 +418,13 @@ export async function executeWorkflowForChat(
|
||||
{} as Record<string, Record<string, any>>
|
||||
)
|
||||
|
||||
// Start enhanced logging session
|
||||
await loggingSession.safeStart({
|
||||
userId: deployment.userId,
|
||||
workspaceId: '', // TODO: Get from workflow
|
||||
variables: workflowVariables,
|
||||
})
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
@@ -445,11 +463,11 @@ export async function executeWorkflowForChat(
|
||||
workflow: serializedWorkflow,
|
||||
currentBlockStates: processedBlockStates,
|
||||
envVarValues: decryptedEnvVars,
|
||||
workflowInput: { input: message, conversationId },
|
||||
workflowInput: { input: input, conversationId },
|
||||
workflowVariables,
|
||||
contextExtensions: {
|
||||
stream: true,
|
||||
selectedOutputIds: outputBlockIds,
|
||||
selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
|
||||
edges: edges.map((e: any) => ({
|
||||
source: e.source,
|
||||
target: e.target,
|
||||
@@ -458,16 +476,41 @@ export async function executeWorkflowForChat(
|
||||
},
|
||||
})
|
||||
|
||||
const result = await executor.execute(workflowId)
|
||||
// Set up enhanced logging on the executor
|
||||
loggingSession.setupExecutor(executor)
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await executor.execute(workflowId)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Chat workflow execution failed:`, error)
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: 0,
|
||||
error: {
|
||||
message: error.message || 'Chat workflow execution failed',
|
||||
stackTrace: error.stack,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
if (result && 'success' in result) {
|
||||
result.logs?.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
if (log.output?.response) {
|
||||
log.output.response.content = streamedContent.get(log.blockId)
|
||||
// Update streamed content and apply tokenization
|
||||
if (result.logs) {
|
||||
result.logs.forEach((log: BlockLog) => {
|
||||
if (streamedContent.has(log.blockId)) {
|
||||
const content = streamedContent.get(log.blockId)
|
||||
if (log.output) {
|
||||
log.output.content = content
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Process all logs for streaming tokenization
|
||||
const processedCount = processStreamingBlockLogs(result.logs, streamedContent)
|
||||
logger.info(`[CHAT-API] Processed ${processedCount} blocks for streaming tokenization`)
|
||||
}
|
||||
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(result)
|
||||
const enrichedResult = { ...result, traceSpans, totalDuration }
|
||||
@@ -481,8 +524,7 @@ export async function executeWorkflowForChat(
|
||||
;(enrichedResult.metadata as any).conversationId = conversationId
|
||||
}
|
||||
const executionId = uuidv4()
|
||||
await persistExecutionLogs(workflowId, executionId, enrichedResult, 'chat')
|
||||
logger.debug(`Persisted logs for deployed chat: ${executionId}`)
|
||||
logger.debug(`Generated execution ID for deployed chat: ${executionId}`)
|
||||
|
||||
if (result.success) {
|
||||
try {
|
||||
@@ -506,6 +548,17 @@ export async function executeWorkflowForChat(
|
||||
)
|
||||
}
|
||||
|
||||
// Complete enhanced logging session (for both success and failure)
|
||||
if (result && 'success' in result) {
|
||||
const { traceSpans } = buildTraceSpans(result)
|
||||
await loggingSession.safeComplete({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: result.metadata?.duration || 0,
|
||||
finalOutput: result.output,
|
||||
traceSpans,
|
||||
})
|
||||
}
|
||||
|
||||
controller.close()
|
||||
},
|
||||
})
|
||||
|
||||
@@ -239,7 +239,7 @@ Example Scenario:
|
||||
User Prompt: "Fetch user data from an API. Use the User ID passed in as 'userId' and an API Key stored as the 'SERVICE_API_KEY' environment variable."
|
||||
|
||||
Generated Code:
|
||||
const userId = <block.response.content>; // Correct: Accessing input parameter without quotes
|
||||
const userId = <block.content>; // Correct: Accessing input parameter without quotes
|
||||
const apiKey = {{SERVICE_API_KEY}}; // Correct: Accessing environment variable without quotes
|
||||
const url = \`https://api.example.com/users/\${userId}\`;
|
||||
|
||||
@@ -273,7 +273,7 @@ Do not include import/require statements unless absolutely necessary and they ar
|
||||
Do not include markdown formatting or explanations.
|
||||
Output only the raw TypeScript code. Use modern TypeScript features where appropriate. Do not use semicolons.
|
||||
Example:
|
||||
const userId = <block.response.content> as string
|
||||
const userId = <block.content> as string
|
||||
const apiKey = {{SERVICE_API_KEY}}
|
||||
const response = await fetch(\`https://api.example.com/users/\${userId}\`, { headers: { Authorization: \`Bearer \${apiKey}\` } })
|
||||
if (!response.ok) {
|
||||
|
||||
281
apps/sim/app/api/copilot/docs/route.ts
Normal file
281
apps/sim/app/api/copilot/docs/route.ts
Normal file
@@ -0,0 +1,281 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import {
|
||||
type CopilotChat,
|
||||
type CopilotMessage,
|
||||
createChat,
|
||||
generateChatTitle,
|
||||
generateDocsResponse,
|
||||
getChat,
|
||||
updateChat,
|
||||
} from '@/lib/copilot/service'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('CopilotDocsAPI')
|
||||
|
||||
// Schema for docs queries
|
||||
const DocsQuerySchema = z.object({
|
||||
query: z.string().min(1, 'Query is required'),
|
||||
topK: z.number().min(1).max(20).default(5),
|
||||
provider: z.string().optional(),
|
||||
model: z.string().optional(),
|
||||
stream: z.boolean().optional().default(false),
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().optional(),
|
||||
createNewChat: z.boolean().optional().default(false),
|
||||
})
|
||||
|
||||
/**
|
||||
* POST /api/copilot/docs
|
||||
* Ask questions about documentation using RAG
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID()
|
||||
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { query, topK, provider, model, stream, chatId, workflowId, createNewChat } =
|
||||
DocsQuerySchema.parse(body)
|
||||
|
||||
logger.info(`[${requestId}] Docs RAG query: "${query}"`, {
|
||||
provider,
|
||||
model,
|
||||
topK,
|
||||
chatId,
|
||||
workflowId,
|
||||
createNewChat,
|
||||
userId: session.user.id,
|
||||
})
|
||||
|
||||
// Handle chat context
|
||||
let currentChat: CopilotChat | null = null
|
||||
let conversationHistory: CopilotMessage[] = []
|
||||
|
||||
if (chatId) {
|
||||
// Load existing chat
|
||||
currentChat = await getChat(chatId, session.user.id)
|
||||
if (currentChat) {
|
||||
conversationHistory = currentChat.messages
|
||||
}
|
||||
} else if (createNewChat && workflowId) {
|
||||
// Create new chat
|
||||
currentChat = await createChat(session.user.id, workflowId)
|
||||
}
|
||||
|
||||
// Generate docs response
|
||||
const result = await generateDocsResponse(query, conversationHistory, {
|
||||
topK,
|
||||
provider,
|
||||
model,
|
||||
stream,
|
||||
workflowId,
|
||||
requestId,
|
||||
})
|
||||
|
||||
if (stream && result.response instanceof ReadableStream) {
|
||||
// Handle streaming response with docs sources
|
||||
logger.info(`[${requestId}] Returning streaming docs response`)
|
||||
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
async start(controller) {
|
||||
const reader = (result.response as ReadableStream).getReader()
|
||||
let accumulatedResponse = ''
|
||||
|
||||
try {
|
||||
// Send initial metadata including sources
|
||||
const metadata = {
|
||||
type: 'metadata',
|
||||
chatId: currentChat?.id,
|
||||
sources: result.sources,
|
||||
citations: result.sources.map((source, index) => ({
|
||||
id: index + 1,
|
||||
title: source.title,
|
||||
url: source.url,
|
||||
})),
|
||||
metadata: {
|
||||
requestId,
|
||||
chunksFound: result.sources.length,
|
||||
query,
|
||||
topSimilarity: result.sources[0]?.similarity,
|
||||
provider,
|
||||
model,
|
||||
},
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(metadata)}\n\n`))
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
const chunk = new TextDecoder().decode(value)
|
||||
// Clean up any object serialization artifacts in streaming content
|
||||
const cleanedChunk = chunk.replace(/\[object Object\],?/g, '')
|
||||
accumulatedResponse += cleanedChunk
|
||||
|
||||
const contentChunk = {
|
||||
type: 'content',
|
||||
content: cleanedChunk,
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(contentChunk)}\n\n`))
|
||||
}
|
||||
|
||||
// Send completion marker first to unblock the user
|
||||
controller.enqueue(encoder.encode(`data: {"type":"done"}\n\n`))
|
||||
|
||||
// Save conversation to database asynchronously (non-blocking)
|
||||
if (currentChat) {
|
||||
// Fire-and-forget database save to avoid blocking stream completion
|
||||
Promise.resolve()
|
||||
.then(async () => {
|
||||
try {
|
||||
const userMessage: CopilotMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'user',
|
||||
content: query,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
const assistantMessage: CopilotMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'assistant',
|
||||
content: accumulatedResponse,
|
||||
timestamp: new Date().toISOString(),
|
||||
citations: result.sources.map((source, index) => ({
|
||||
id: index + 1,
|
||||
title: source.title,
|
||||
url: source.url,
|
||||
})),
|
||||
}
|
||||
|
||||
const updatedMessages = [
|
||||
...conversationHistory,
|
||||
userMessage,
|
||||
assistantMessage,
|
||||
]
|
||||
|
||||
// Generate title if this is the first message
|
||||
let updatedTitle = currentChat.title ?? undefined
|
||||
if (!updatedTitle && conversationHistory.length === 0) {
|
||||
updatedTitle = await generateChatTitle(query)
|
||||
}
|
||||
|
||||
// Update the chat in database
|
||||
await updateChat(currentChat.id, session.user.id, {
|
||||
title: updatedTitle,
|
||||
messages: updatedMessages,
|
||||
})
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Updated chat ${currentChat.id} with new docs messages`
|
||||
)
|
||||
} catch (dbError) {
|
||||
logger.error(`[${requestId}] Failed to save chat to database:`, dbError)
|
||||
// Database errors don't affect the user's streaming experience
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
logger.error(`[${requestId}] Unexpected error in async database save:`, error)
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Docs streaming error:`, error)
|
||||
try {
|
||||
const errorChunk = {
|
||||
type: 'error',
|
||||
error: 'Streaming failed',
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`))
|
||||
} catch (enqueueError) {
|
||||
logger.error(`[${requestId}] Failed to enqueue error response:`, enqueueError)
|
||||
}
|
||||
} finally {
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// Handle non-streaming response
|
||||
logger.info(`[${requestId}] Docs RAG response generated successfully`)
|
||||
|
||||
// Save conversation to database if we have a chat
|
||||
if (currentChat) {
|
||||
const userMessage: CopilotMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'user',
|
||||
content: query,
|
||||
timestamp: new Date().toISOString(),
|
||||
}
|
||||
|
||||
const assistantMessage: CopilotMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'assistant',
|
||||
content: typeof result.response === 'string' ? result.response : '[Streaming Response]',
|
||||
timestamp: new Date().toISOString(),
|
||||
citations: result.sources.map((source, index) => ({
|
||||
id: index + 1,
|
||||
title: source.title,
|
||||
url: source.url,
|
||||
})),
|
||||
}
|
||||
|
||||
const updatedMessages = [...conversationHistory, userMessage, assistantMessage]
|
||||
|
||||
// Generate title if this is the first message
|
||||
let updatedTitle = currentChat.title ?? undefined
|
||||
if (!updatedTitle && conversationHistory.length === 0) {
|
||||
updatedTitle = await generateChatTitle(query)
|
||||
}
|
||||
|
||||
// Update the chat in database
|
||||
await updateChat(currentChat.id, session.user.id, {
|
||||
title: updatedTitle,
|
||||
messages: updatedMessages,
|
||||
})
|
||||
|
||||
logger.info(`[${requestId}] Updated chat ${currentChat.id} with new docs messages`)
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
response: result.response,
|
||||
sources: result.sources,
|
||||
chatId: currentChat?.id,
|
||||
metadata: {
|
||||
requestId,
|
||||
chunksFound: result.sources.length,
|
||||
query,
|
||||
topSimilarity: result.sources[0]?.similarity,
|
||||
provider,
|
||||
model,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${requestId}] Copilot docs error:`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -1,214 +1,425 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { OpenAI } from 'openai'
|
||||
import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import {
|
||||
createChat,
|
||||
deleteChat,
|
||||
generateChatTitle,
|
||||
getChat,
|
||||
listChats,
|
||||
sendMessage,
|
||||
updateChat,
|
||||
} from '@/lib/copilot/service'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('CopilotAPI')
|
||||
|
||||
const MessageSchema = z.object({
|
||||
role: z.enum(['user', 'assistant', 'system']),
|
||||
content: z.string(),
|
||||
// Interface for StreamingExecution response
|
||||
interface StreamingExecution {
|
||||
stream: ReadableStream
|
||||
execution: Promise<any>
|
||||
}
|
||||
|
||||
// Schema for sending messages
|
||||
const SendMessageSchema = z.object({
|
||||
message: z.string().min(1, 'Message is required'),
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().optional(),
|
||||
createNewChat: z.boolean().optional().default(false),
|
||||
stream: z.boolean().optional().default(false),
|
||||
})
|
||||
|
||||
const RequestSchema = z.object({
|
||||
messages: z.array(MessageSchema),
|
||||
workflowState: z.object({
|
||||
blocks: z.record(z.any()),
|
||||
edges: z.array(z.any()),
|
||||
}),
|
||||
// Schema for docs queries
|
||||
const DocsQuerySchema = z.object({
|
||||
query: z.string().min(1, 'Query is required'),
|
||||
topK: z.number().min(1).max(20).default(5),
|
||||
provider: z.string().optional(),
|
||||
model: z.string().optional(),
|
||||
stream: z.boolean().optional().default(false),
|
||||
chatId: z.string().optional(),
|
||||
workflowId: z.string().optional(),
|
||||
createNewChat: z.boolean().optional().default(false),
|
||||
})
|
||||
|
||||
const workflowActions = {
|
||||
addBlock: {
|
||||
description: 'Add one new block to the workflow',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
required: ['type'],
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['agent', 'api', 'condition', 'function', 'router'],
|
||||
description: 'The type of block to add',
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Optional custom name for the block. Do not provide a name unless the user has specified it.',
|
||||
},
|
||||
position: {
|
||||
type: 'object',
|
||||
description:
|
||||
'Optional position for the block. Do not provide a position unless the user has specified it.',
|
||||
properties: {
|
||||
x: { type: 'number' },
|
||||
y: { type: 'number' },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
addEdge: {
|
||||
description: 'Create a connection (edge) between two blocks',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
required: ['sourceId', 'targetId'],
|
||||
properties: {
|
||||
sourceId: {
|
||||
type: 'string',
|
||||
description: 'ID of the source block',
|
||||
},
|
||||
targetId: {
|
||||
type: 'string',
|
||||
description: 'ID of the target block',
|
||||
},
|
||||
sourceHandle: {
|
||||
type: 'string',
|
||||
description: 'Optional handle identifier for the source connection point',
|
||||
},
|
||||
targetHandle: {
|
||||
type: 'string',
|
||||
description: 'Optional handle identifier for the target connection point',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
removeBlock: {
|
||||
description: 'Remove a block from the workflow',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
required: ['id'],
|
||||
properties: {
|
||||
id: { type: 'string', description: 'ID of the block to remove' },
|
||||
},
|
||||
},
|
||||
},
|
||||
removeEdge: {
|
||||
description: 'Remove a connection (edge) between blocks',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
required: ['id'],
|
||||
properties: {
|
||||
id: { type: 'string', description: 'ID of the edge to remove' },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Schema for creating chats
|
||||
const CreateChatSchema = z.object({
|
||||
workflowId: z.string().min(1, 'Workflow ID is required'),
|
||||
title: z.string().optional(),
|
||||
initialMessage: z.string().optional(),
|
||||
})
|
||||
|
||||
// System prompt that references workflow state
|
||||
const getSystemPrompt = (workflowState: any) => {
|
||||
const blockCount = Object.keys(workflowState.blocks).length
|
||||
const edgeCount = workflowState.edges.length
|
||||
// Schema for updating chats
|
||||
const UpdateChatSchema = z.object({
|
||||
chatId: z.string().min(1, 'Chat ID is required'),
|
||||
messages: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
role: z.enum(['user', 'assistant', 'system']),
|
||||
content: z.string(),
|
||||
timestamp: z.string(),
|
||||
citations: z
|
||||
.array(
|
||||
z.object({
|
||||
id: z.number(),
|
||||
title: z.string(),
|
||||
url: z.string(),
|
||||
similarity: z.number().optional(),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
title: z.string().optional(),
|
||||
})
|
||||
|
||||
// Create a summary of existing blocks
|
||||
const blockSummary = Object.values(workflowState.blocks)
|
||||
.map((block: any) => `- ${block.type} block named "${block.name}" with id ${block.id}`)
|
||||
.join('\n')
|
||||
// Schema for listing chats
|
||||
const ListChatsSchema = z.object({
|
||||
workflowId: z.string().min(1, 'Workflow ID is required'),
|
||||
limit: z.number().min(1).max(100).optional().default(50),
|
||||
offset: z.number().min(0).optional().default(0),
|
||||
})
|
||||
|
||||
// Create a summary of existing edges
|
||||
const edgeSummary = workflowState.edges
|
||||
.map((edge: any) => `- ${edge.source} -> ${edge.target} with id ${edge.id}`)
|
||||
.join('\n')
|
||||
|
||||
return `You are a workflow assistant that helps users modify their workflow by adding/removing blocks and connections.
|
||||
|
||||
Current Workflow State:
|
||||
${
|
||||
blockCount === 0
|
||||
? 'The workflow is empty.'
|
||||
: `${blockSummary}
|
||||
|
||||
Connections:
|
||||
${edgeCount === 0 ? 'No connections between blocks.' : edgeSummary}`
|
||||
}
|
||||
|
||||
When users request changes:
|
||||
- Consider existing blocks when suggesting connections
|
||||
- Provide clear feedback about what actions you've taken
|
||||
|
||||
Use the following functions to modify the workflow:
|
||||
1. Use the addBlock function to create a new block
|
||||
2. Use the addEdge function to connect one block to another
|
||||
3. Use the removeBlock function to remove a block
|
||||
4. Use the removeEdge function to remove a connection
|
||||
|
||||
Only use the provided functions and respond naturally to the user's requests.`
|
||||
}
|
||||
|
||||
export async function POST(request: Request) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
/**
|
||||
* POST /api/copilot
|
||||
* Send a message to the copilot
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID()
|
||||
|
||||
try {
|
||||
// Validate API key
|
||||
const apiKey = request.headers.get('X-OpenAI-Key')
|
||||
if (!apiKey) {
|
||||
return NextResponse.json({ error: 'OpenAI API key is required' }, { status: 401 })
|
||||
const body = await req.json()
|
||||
const { message, chatId, workflowId, createNewChat, stream } = SendMessageSchema.parse(body)
|
||||
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Parse and validate request body
|
||||
const body = await request.json()
|
||||
const validatedData = RequestSchema.parse(body)
|
||||
const { messages, workflowState } = validatedData
|
||||
|
||||
// Initialize OpenAI client
|
||||
const openai = new OpenAI({ apiKey })
|
||||
|
||||
// Create message history with workflow context
|
||||
const messageHistory = [
|
||||
{ role: 'system', content: getSystemPrompt(workflowState) },
|
||||
...messages,
|
||||
]
|
||||
|
||||
// Make OpenAI API call with workflow context
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: messageHistory as ChatCompletionMessageParam[],
|
||||
tools: Object.entries(workflowActions).map(([name, config]) => ({
|
||||
type: 'function',
|
||||
function: {
|
||||
name,
|
||||
description: config.description,
|
||||
parameters: config.parameters,
|
||||
},
|
||||
})),
|
||||
tool_choice: 'auto',
|
||||
logger.info(`[${requestId}] Copilot message: "${message}"`, {
|
||||
chatId,
|
||||
workflowId,
|
||||
createNewChat,
|
||||
stream,
|
||||
userId: session.user.id,
|
||||
})
|
||||
|
||||
const message = completion.choices[0].message
|
||||
// Send message using the service
|
||||
const result = await sendMessage({
|
||||
message,
|
||||
chatId,
|
||||
workflowId,
|
||||
createNewChat,
|
||||
stream,
|
||||
userId: session.user.id,
|
||||
})
|
||||
|
||||
// Process tool calls if present
|
||||
if (message.tool_calls) {
|
||||
logger.debug(`[${requestId}] Tool calls:`, {
|
||||
toolCalls: message.tool_calls,
|
||||
})
|
||||
const actions = message.tool_calls.map((call) => ({
|
||||
name: call.function.name,
|
||||
parameters: JSON.parse(call.function.arguments),
|
||||
}))
|
||||
// Handle streaming response (ReadableStream or StreamingExecution)
|
||||
let streamToRead: ReadableStream | null = null
|
||||
|
||||
return NextResponse.json({
|
||||
message: message.content || "I've updated the workflow based on your request.",
|
||||
actions,
|
||||
})
|
||||
// Debug logging to see what we actually got
|
||||
logger.info(`[${requestId}] Response type analysis:`, {
|
||||
responseType: typeof result.response,
|
||||
isReadableStream: result.response instanceof ReadableStream,
|
||||
hasStreamProperty:
|
||||
typeof result.response === 'object' && result.response && 'stream' in result.response,
|
||||
hasExecutionProperty:
|
||||
typeof result.response === 'object' && result.response && 'execution' in result.response,
|
||||
responseKeys:
|
||||
typeof result.response === 'object' && result.response ? Object.keys(result.response) : [],
|
||||
})
|
||||
|
||||
if (result.response instanceof ReadableStream) {
|
||||
logger.info(`[${requestId}] Direct ReadableStream detected`)
|
||||
streamToRead = result.response
|
||||
} else if (
|
||||
typeof result.response === 'object' &&
|
||||
result.response &&
|
||||
'stream' in result.response &&
|
||||
'execution' in result.response
|
||||
) {
|
||||
// Handle StreamingExecution (from providers with tool calls)
|
||||
logger.info(`[${requestId}] StreamingExecution detected`)
|
||||
const streamingExecution = result.response as StreamingExecution
|
||||
streamToRead = streamingExecution.stream
|
||||
|
||||
// No need to extract citations - LLM generates direct markdown links
|
||||
}
|
||||
|
||||
// Return response with no actions
|
||||
if (streamToRead) {
|
||||
logger.info(`[${requestId}] Returning streaming response`)
|
||||
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
async start(controller) {
|
||||
const reader = streamToRead!.getReader()
|
||||
let accumulatedResponse = ''
|
||||
|
||||
// Send initial metadata
|
||||
const metadata = {
|
||||
type: 'metadata',
|
||||
chatId: result.chatId,
|
||||
metadata: {
|
||||
requestId,
|
||||
message,
|
||||
},
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(metadata)}\n\n`))
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
const chunkText = new TextDecoder().decode(value)
|
||||
accumulatedResponse += chunkText
|
||||
|
||||
const contentChunk = {
|
||||
type: 'content',
|
||||
content: chunkText,
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(contentChunk)}\n\n`))
|
||||
}
|
||||
|
||||
// Send completion signal
|
||||
const completion = {
|
||||
type: 'complete',
|
||||
finalContent: accumulatedResponse,
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(completion)}\n\n`))
|
||||
controller.close()
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Streaming error:`, error)
|
||||
const errorChunk = {
|
||||
type: 'error',
|
||||
error: 'Streaming failed',
|
||||
}
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`))
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// Handle non-streaming response
|
||||
logger.info(`[${requestId}] Chat response generated successfully`)
|
||||
|
||||
return NextResponse.json({
|
||||
message:
|
||||
message.content ||
|
||||
"I'm not sure what changes to make to the workflow. Can you please provide more specific instructions?",
|
||||
success: true,
|
||||
response: result.response,
|
||||
chatId: result.chatId,
|
||||
metadata: {
|
||||
requestId,
|
||||
message,
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Copilot API error:`, { error })
|
||||
|
||||
// Handle specific error types
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request format', details: error.errors },
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
return NextResponse.json({ error: 'Failed to process copilot message' }, { status: 500 })
|
||||
logger.error(`[${requestId}] Copilot error:`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/copilot
|
||||
* List chats or get a specific chat
|
||||
*/
|
||||
export async function GET(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(req.url)
|
||||
const chatId = searchParams.get('chatId')
|
||||
|
||||
// If chatId is provided, get specific chat
|
||||
if (chatId) {
|
||||
const chat = await getChat(chatId, session.user.id)
|
||||
if (!chat) {
|
||||
return NextResponse.json({ error: 'Chat not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat,
|
||||
})
|
||||
}
|
||||
|
||||
// Otherwise, list chats
|
||||
const workflowId = searchParams.get('workflowId')
|
||||
const limit = Number.parseInt(searchParams.get('limit') || '50')
|
||||
const offset = Number.parseInt(searchParams.get('offset') || '0')
|
||||
|
||||
if (!workflowId) {
|
||||
return NextResponse.json(
|
||||
{ error: 'workflowId is required for listing chats' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const chats = await listChats(session.user.id, workflowId, { limit, offset })
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chats,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to handle GET request:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PUT /api/copilot
|
||||
* Create a new chat
|
||||
*/
|
||||
export async function PUT(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { workflowId, title, initialMessage } = CreateChatSchema.parse(body)
|
||||
|
||||
logger.info(`Creating new chat for user ${session.user.id}, workflow ${workflowId}`)
|
||||
|
||||
const chat = await createChat(session.user.id, workflowId, {
|
||||
title,
|
||||
initialMessage,
|
||||
})
|
||||
|
||||
logger.info(`Created chat ${chat.id} for user ${session.user.id}`)
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error('Failed to create chat:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PATCH /api/copilot
|
||||
* Update a chat with new messages
|
||||
*/
|
||||
export async function PATCH(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const body = await req.json()
|
||||
const { chatId, messages, title } = UpdateChatSchema.parse(body)
|
||||
|
||||
logger.info(`Updating chat ${chatId} for user ${session.user.id}`)
|
||||
|
||||
// Get the current chat to check if it has a title
|
||||
const existingChat = await getChat(chatId, session.user.id)
|
||||
|
||||
let titleToUse = title
|
||||
|
||||
// Generate title if chat doesn't have one and we have messages
|
||||
if (!titleToUse && existingChat && !existingChat.title && messages && messages.length > 0) {
|
||||
const firstUserMessage = messages.find((msg) => msg.role === 'user')
|
||||
if (firstUserMessage) {
|
||||
logger.info('Generating LLM-based title for chat without title')
|
||||
try {
|
||||
titleToUse = await generateChatTitle(firstUserMessage.content)
|
||||
logger.info(`Generated title: ${titleToUse}`)
|
||||
} catch (error) {
|
||||
logger.error('Failed to generate chat title:', error)
|
||||
titleToUse = 'New Chat'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const chat = await updateChat(chatId, session.user.id, {
|
||||
messages,
|
||||
title: titleToUse,
|
||||
})
|
||||
|
||||
if (!chat) {
|
||||
return NextResponse.json({ error: 'Chat not found or access denied' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Invalid request data', details: error.errors },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error('Failed to update chat:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* DELETE /api/copilot
|
||||
* Delete a chat
|
||||
*/
|
||||
export async function DELETE(req: NextRequest) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { searchParams } = new URL(req.url)
|
||||
const chatId = searchParams.get('chatId')
|
||||
|
||||
if (!chatId) {
|
||||
return NextResponse.json({ error: 'chatId is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const success = await deleteChat(chatId, session.user.id)
|
||||
|
||||
if (!success) {
|
||||
return NextResponse.json({ error: 'Chat not found or access denied' }, { status: 404 })
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
message: 'Chat deleted successfully',
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete chat:', error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
|
||||
76
apps/sim/app/api/docs/search/route.ts
Normal file
76
apps/sim/app/api/docs/search/route.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { searchDocumentation } from '@/lib/copilot/service'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
|
||||
const logger = createLogger('DocsSearchAPI')
|
||||
|
||||
// Request and response type definitions
|
||||
interface DocsSearchRequest {
|
||||
query: string
|
||||
topK?: number
|
||||
}
|
||||
|
||||
interface DocsSearchResult {
|
||||
id: number
|
||||
title: string
|
||||
url: string
|
||||
content: string
|
||||
similarity: number
|
||||
}
|
||||
|
||||
interface DocsSearchSuccessResponse {
|
||||
success: true
|
||||
results: DocsSearchResult[]
|
||||
query: string
|
||||
totalResults: number
|
||||
searchTime?: number
|
||||
}
|
||||
|
||||
interface DocsSearchErrorResponse {
|
||||
success: false
|
||||
error: string
|
||||
}
|
||||
|
||||
export async function POST(
|
||||
request: NextRequest
|
||||
): Promise<NextResponse<DocsSearchSuccessResponse | DocsSearchErrorResponse>> {
|
||||
try {
|
||||
const requestBody: DocsSearchRequest = await request.json()
|
||||
const { query, topK = 5 } = requestBody
|
||||
|
||||
if (!query) {
|
||||
const errorResponse: DocsSearchErrorResponse = {
|
||||
success: false,
|
||||
error: 'Query is required',
|
||||
}
|
||||
return NextResponse.json(errorResponse, { status: 400 })
|
||||
}
|
||||
|
||||
logger.info('Executing documentation search', { query, topK })
|
||||
|
||||
const startTime = Date.now()
|
||||
const results = await searchDocumentation(query, { topK })
|
||||
const searchTime = Date.now() - startTime
|
||||
|
||||
logger.info(`Found ${results.length} documentation results`, { query })
|
||||
|
||||
const successResponse: DocsSearchSuccessResponse = {
|
||||
success: true,
|
||||
results,
|
||||
query,
|
||||
totalResults: results.length,
|
||||
searchTime,
|
||||
}
|
||||
|
||||
return NextResponse.json(successResponse)
|
||||
} catch (error) {
|
||||
logger.error('Documentation search API failed', error)
|
||||
|
||||
const errorResponse: DocsSearchErrorResponse = {
|
||||
success: false,
|
||||
error: `Documentation search failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
}
|
||||
|
||||
return NextResponse.json(errorResponse, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,7 @@ import {
|
||||
isBlobPath,
|
||||
isCloudPath,
|
||||
isS3Path,
|
||||
} from '../utils'
|
||||
} from '@/app/api/files/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
|
||||
@@ -447,7 +447,7 @@ async function handleCsvBuffer(
|
||||
logger.info(`Parsing CSV in memory: ${filename}`)
|
||||
|
||||
// Use the parseBuffer function from our library
|
||||
const { parseBuffer } = await import('../../../../lib/file-parsers')
|
||||
const { parseBuffer } = await import('@/lib/file-parsers')
|
||||
const result = await parseBuffer(fileBuffer, 'csv')
|
||||
|
||||
return {
|
||||
@@ -492,7 +492,7 @@ async function handleGenericTextBuffer(
|
||||
|
||||
// Try to use a specialized parser if available
|
||||
try {
|
||||
const { parseBuffer, isSupportedFileType } = await import('../../../../lib/file-parsers')
|
||||
const { parseBuffer, isSupportedFileType } = await import('@/lib/file-parsers')
|
||||
|
||||
if (isSupportedFileType(extension)) {
|
||||
const result = await parseBuffer(fileBuffer, extension)
|
||||
@@ -578,7 +578,7 @@ async function parseBufferAsPdf(buffer: Buffer) {
|
||||
// Import parsers dynamically to avoid initialization issues in tests
|
||||
// First try to use the main PDF parser
|
||||
try {
|
||||
const { PdfParser } = await import('../../../../lib/file-parsers/pdf-parser')
|
||||
const { PdfParser } = await import('@/lib/file-parsers/pdf-parser')
|
||||
const parser = new PdfParser()
|
||||
logger.info('Using main PDF parser for buffer')
|
||||
|
||||
@@ -589,7 +589,7 @@ async function parseBufferAsPdf(buffer: Buffer) {
|
||||
} catch (error) {
|
||||
// Fallback to raw PDF parser
|
||||
logger.warn('Main PDF parser failed, using raw parser for buffer:', error)
|
||||
const { RawPdfParser } = await import('../../../../lib/file-parsers/raw-pdf-parser')
|
||||
const { RawPdfParser } = await import('@/lib/file-parsers/raw-pdf-parser')
|
||||
const rawParser = new RawPdfParser()
|
||||
|
||||
return await rawParser.parseBuffer(buffer)
|
||||
|
||||
@@ -39,8 +39,9 @@ describe('/api/files/presigned', () => {
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(response.status).toBe(500) // Changed from 400 to 500 (StorageConfigError)
|
||||
expect(data.error).toBe('Direct uploads are only available when cloud storage is enabled')
|
||||
expect(data.code).toBe('STORAGE_CONFIG_ERROR')
|
||||
expect(data.directUploadSupported).toBe(false)
|
||||
})
|
||||
|
||||
@@ -64,7 +65,8 @@ describe('/api/files/presigned', () => {
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Missing fileName or contentType')
|
||||
expect(data.error).toBe('fileName is required and cannot be empty')
|
||||
expect(data.code).toBe('VALIDATION_ERROR')
|
||||
})
|
||||
|
||||
it('should return error when contentType is missing', async () => {
|
||||
@@ -87,7 +89,59 @@ describe('/api/files/presigned', () => {
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Missing fileName or contentType')
|
||||
expect(data.error).toBe('contentType is required and cannot be empty')
|
||||
expect(data.code).toBe('VALIDATION_ERROR')
|
||||
})
|
||||
|
||||
it('should return error when fileSize is invalid', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
storageProvider: 's3',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
|
||||
const request = new NextRequest('http://localhost:3000/api/files/presigned', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
fileName: 'test.txt',
|
||||
contentType: 'text/plain',
|
||||
fileSize: 0,
|
||||
}),
|
||||
})
|
||||
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('fileSize must be a positive number')
|
||||
expect(data.code).toBe('VALIDATION_ERROR')
|
||||
})
|
||||
|
||||
it('should return error when file size exceeds limit', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
storageProvider: 's3',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
|
||||
const largeFileSize = 150 * 1024 * 1024 // 150MB (exceeds 100MB limit)
|
||||
const request = new NextRequest('http://localhost:3000/api/files/presigned', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
fileName: 'large-file.txt',
|
||||
contentType: 'text/plain',
|
||||
fileSize: largeFileSize,
|
||||
}),
|
||||
})
|
||||
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toContain('exceeds maximum allowed size')
|
||||
expect(data.code).toBe('VALIDATION_ERROR')
|
||||
})
|
||||
|
||||
it('should generate S3 presigned URL successfully', async () => {
|
||||
@@ -122,6 +176,34 @@ describe('/api/files/presigned', () => {
|
||||
expect(data.directUploadSupported).toBe(true)
|
||||
})
|
||||
|
||||
it('should generate knowledge-base S3 presigned URL with kb prefix', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
storageProvider: 's3',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
|
||||
const request = new NextRequest(
|
||||
'http://localhost:3000/api/files/presigned?type=knowledge-base',
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
fileName: 'knowledge-doc.pdf',
|
||||
contentType: 'application/pdf',
|
||||
fileSize: 2048,
|
||||
}),
|
||||
}
|
||||
)
|
||||
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.fileInfo.key).toMatch(/^kb\/.*knowledge-doc\.pdf$/)
|
||||
expect(data.directUploadSupported).toBe(true)
|
||||
})
|
||||
|
||||
it('should generate Azure Blob presigned URL successfully', async () => {
|
||||
setupFileApiMocks({
|
||||
cloudEnabled: true,
|
||||
@@ -182,8 +264,9 @@ describe('/api/files/presigned', () => {
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Unknown storage provider')
|
||||
expect(response.status).toBe(500) // Changed from 400 to 500 (StorageConfigError)
|
||||
expect(data.error).toBe('Unknown storage provider: unknown') // Updated error message
|
||||
expect(data.code).toBe('STORAGE_CONFIG_ERROR')
|
||||
expect(data.directUploadSupported).toBe(false)
|
||||
})
|
||||
|
||||
@@ -225,8 +308,10 @@ describe('/api/files/presigned', () => {
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.error).toBe('Error')
|
||||
expect(data.message).toBe('S3 service unavailable')
|
||||
expect(data.error).toBe(
|
||||
'Failed to generate S3 presigned URL - check AWS credentials and permissions'
|
||||
) // Updated error message
|
||||
expect(data.code).toBe('STORAGE_CONFIG_ERROR')
|
||||
})
|
||||
|
||||
it('should handle Azure Blob errors gracefully', async () => {
|
||||
@@ -269,8 +354,8 @@ describe('/api/files/presigned', () => {
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.error).toBe('Error')
|
||||
expect(data.message).toBe('Azure service unavailable')
|
||||
expect(data.error).toBe('Failed to generate Azure Blob presigned URL') // Updated error message
|
||||
expect(data.code).toBe('STORAGE_CONFIG_ERROR')
|
||||
})
|
||||
|
||||
it('should handle malformed JSON gracefully', async () => {
|
||||
@@ -289,9 +374,9 @@ describe('/api/files/presigned', () => {
|
||||
const response = await POST(request)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.error).toBe('SyntaxError')
|
||||
expect(data.message).toContain('Unexpected token')
|
||||
expect(response.status).toBe(400) // Changed from 500 to 400 (ValidationError)
|
||||
expect(data.error).toBe('Invalid JSON in request body') // Updated error message
|
||||
expect(data.code).toBe('VALIDATION_ERROR')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getStorageProvider, isUsingCloudStorage } from '@/lib/uploads'
|
||||
import { getBlobServiceClient } from '@/lib/uploads/blob/blob-client'
|
||||
import { getS3Client, sanitizeFilenameForMetadata } from '@/lib/uploads/s3/s3-client'
|
||||
import { BLOB_CONFIG, S3_CONFIG } from '@/lib/uploads/setup'
|
||||
import { createErrorResponse, createOptionsResponse } from '../utils'
|
||||
import { BLOB_CONFIG, BLOB_KB_CONFIG, S3_CONFIG, S3_KB_CONFIG } from '@/lib/uploads/setup'
|
||||
import { createErrorResponse, createOptionsResponse } from '@/app/api/files/utils'
|
||||
|
||||
const logger = createLogger('PresignedUploadAPI')
|
||||
|
||||
@@ -17,124 +17,148 @@ interface PresignedUrlRequest {
|
||||
fileSize: number
|
||||
}
|
||||
|
||||
type UploadType = 'general' | 'knowledge-base'
|
||||
|
||||
class PresignedUrlError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
public code: string,
|
||||
public statusCode = 400
|
||||
) {
|
||||
super(message)
|
||||
this.name = 'PresignedUrlError'
|
||||
}
|
||||
}
|
||||
|
||||
class StorageConfigError extends PresignedUrlError {
|
||||
constructor(message: string) {
|
||||
super(message, 'STORAGE_CONFIG_ERROR', 500)
|
||||
}
|
||||
}
|
||||
|
||||
class ValidationError extends PresignedUrlError {
|
||||
constructor(message: string) {
|
||||
super(message, 'VALIDATION_ERROR', 400)
|
||||
}
|
||||
}
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
try {
|
||||
// Parse the request body
|
||||
const data: PresignedUrlRequest = await request.json()
|
||||
const { fileName, contentType, fileSize } = data
|
||||
|
||||
if (!fileName || !contentType) {
|
||||
return NextResponse.json({ error: 'Missing fileName or contentType' }, { status: 400 })
|
||||
let data: PresignedUrlRequest
|
||||
try {
|
||||
data = await request.json()
|
||||
} catch {
|
||||
throw new ValidationError('Invalid JSON in request body')
|
||||
}
|
||||
|
||||
// Only proceed if cloud storage is enabled
|
||||
const { fileName, contentType, fileSize } = data
|
||||
|
||||
if (!fileName?.trim()) {
|
||||
throw new ValidationError('fileName is required and cannot be empty')
|
||||
}
|
||||
if (!contentType?.trim()) {
|
||||
throw new ValidationError('contentType is required and cannot be empty')
|
||||
}
|
||||
if (!fileSize || fileSize <= 0) {
|
||||
throw new ValidationError('fileSize must be a positive number')
|
||||
}
|
||||
|
||||
const MAX_FILE_SIZE = 100 * 1024 * 1024
|
||||
if (fileSize > MAX_FILE_SIZE) {
|
||||
throw new ValidationError(
|
||||
`File size (${fileSize} bytes) exceeds maximum allowed size (${MAX_FILE_SIZE} bytes)`
|
||||
)
|
||||
}
|
||||
|
||||
const uploadTypeParam = request.nextUrl.searchParams.get('type')
|
||||
const uploadType: UploadType =
|
||||
uploadTypeParam === 'knowledge-base' ? 'knowledge-base' : 'general'
|
||||
|
||||
if (!isUsingCloudStorage()) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Direct uploads are only available when cloud storage is enabled',
|
||||
directUploadSupported: false,
|
||||
},
|
||||
{ status: 400 }
|
||||
throw new StorageConfigError(
|
||||
'Direct uploads are only available when cloud storage is enabled'
|
||||
)
|
||||
}
|
||||
|
||||
const storageProvider = getStorageProvider()
|
||||
logger.info(`Generating ${uploadType} presigned URL for ${fileName} using ${storageProvider}`)
|
||||
|
||||
switch (storageProvider) {
|
||||
case 's3':
|
||||
return await handleS3PresignedUrl(fileName, contentType, fileSize)
|
||||
return await handleS3PresignedUrl(fileName, contentType, fileSize, uploadType)
|
||||
case 'blob':
|
||||
return await handleBlobPresignedUrl(fileName, contentType, fileSize)
|
||||
return await handleBlobPresignedUrl(fileName, contentType, fileSize, uploadType)
|
||||
default:
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Unknown storage provider',
|
||||
directUploadSupported: false,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
throw new StorageConfigError(`Unknown storage provider: ${storageProvider}`)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error generating presigned URL:', error)
|
||||
|
||||
if (error instanceof PresignedUrlError) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: error.message,
|
||||
code: error.code,
|
||||
directUploadSupported: false,
|
||||
},
|
||||
{ status: error.statusCode }
|
||||
)
|
||||
}
|
||||
|
||||
return createErrorResponse(
|
||||
error instanceof Error ? error : new Error('Failed to generate presigned URL')
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async function handleS3PresignedUrl(fileName: string, contentType: string, fileSize: number) {
|
||||
// Create a unique key for the file
|
||||
const safeFileName = fileName.replace(/\s+/g, '-')
|
||||
const uniqueKey = `${Date.now()}-${uuidv4()}-${safeFileName}`
|
||||
|
||||
// Sanitize the original filename for S3 metadata to prevent header errors
|
||||
const sanitizedOriginalName = sanitizeFilenameForMetadata(fileName)
|
||||
|
||||
// Create the S3 command
|
||||
const command = new PutObjectCommand({
|
||||
Bucket: S3_CONFIG.bucket,
|
||||
Key: uniqueKey,
|
||||
ContentType: contentType,
|
||||
Metadata: {
|
||||
originalName: sanitizedOriginalName,
|
||||
uploadedAt: new Date().toISOString(),
|
||||
},
|
||||
})
|
||||
|
||||
// Generate the presigned URL
|
||||
const presignedUrl = await getSignedUrl(getS3Client(), command, { expiresIn: 3600 })
|
||||
|
||||
// Create a path for API to serve the file
|
||||
const servePath = `/api/files/serve/s3/${encodeURIComponent(uniqueKey)}`
|
||||
|
||||
logger.info(`Generated presigned URL for ${fileName} (${uniqueKey})`)
|
||||
|
||||
return NextResponse.json({
|
||||
presignedUrl,
|
||||
fileInfo: {
|
||||
path: servePath,
|
||||
key: uniqueKey,
|
||||
name: fileName,
|
||||
size: fileSize,
|
||||
type: contentType,
|
||||
},
|
||||
directUploadSupported: true,
|
||||
})
|
||||
}
|
||||
|
||||
async function handleBlobPresignedUrl(fileName: string, contentType: string, fileSize: number) {
|
||||
// Create a unique key for the file
|
||||
const safeFileName = fileName.replace(/\s+/g, '-')
|
||||
const uniqueKey = `${Date.now()}-${uuidv4()}-${safeFileName}`
|
||||
|
||||
async function handleS3PresignedUrl(
|
||||
fileName: string,
|
||||
contentType: string,
|
||||
fileSize: number,
|
||||
uploadType: UploadType
|
||||
) {
|
||||
try {
|
||||
const blobServiceClient = getBlobServiceClient()
|
||||
const containerClient = blobServiceClient.getContainerClient(BLOB_CONFIG.containerName)
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(uniqueKey)
|
||||
const config = uploadType === 'knowledge-base' ? S3_KB_CONFIG : S3_CONFIG
|
||||
|
||||
// Generate SAS token for upload (write permission)
|
||||
const { BlobSASPermissions, generateBlobSASQueryParameters, StorageSharedKeyCredential } =
|
||||
await import('@azure/storage-blob')
|
||||
|
||||
const sasOptions = {
|
||||
containerName: BLOB_CONFIG.containerName,
|
||||
blobName: uniqueKey,
|
||||
permissions: BlobSASPermissions.parse('w'), // Write permission for upload
|
||||
startsOn: new Date(),
|
||||
expiresOn: new Date(Date.now() + 3600 * 1000), // 1 hour expiration
|
||||
if (!config.bucket || !config.region) {
|
||||
throw new StorageConfigError(`S3 configuration missing for ${uploadType} uploads`)
|
||||
}
|
||||
|
||||
const sasToken = generateBlobSASQueryParameters(
|
||||
sasOptions,
|
||||
new StorageSharedKeyCredential(BLOB_CONFIG.accountName, BLOB_CONFIG.accountKey || '')
|
||||
).toString()
|
||||
const safeFileName = fileName.replace(/\s+/g, '-').replace(/[^a-zA-Z0-9.-]/g, '_')
|
||||
const prefix = uploadType === 'knowledge-base' ? 'kb/' : ''
|
||||
const uniqueKey = `${prefix}${Date.now()}-${uuidv4()}-${safeFileName}`
|
||||
|
||||
const presignedUrl = `${blockBlobClient.url}?${sasToken}`
|
||||
const sanitizedOriginalName = sanitizeFilenameForMetadata(fileName)
|
||||
|
||||
// Create a path for API to serve the file
|
||||
const servePath = `/api/files/serve/blob/${encodeURIComponent(uniqueKey)}`
|
||||
const metadata: Record<string, string> = {
|
||||
originalName: sanitizedOriginalName,
|
||||
uploadedAt: new Date().toISOString(),
|
||||
}
|
||||
|
||||
logger.info(`Generated presigned URL for ${fileName} (${uniqueKey})`)
|
||||
if (uploadType === 'knowledge-base') {
|
||||
metadata.purpose = 'knowledge-base'
|
||||
}
|
||||
|
||||
const command = new PutObjectCommand({
|
||||
Bucket: config.bucket,
|
||||
Key: uniqueKey,
|
||||
ContentType: contentType,
|
||||
Metadata: metadata,
|
||||
})
|
||||
|
||||
let presignedUrl: string
|
||||
try {
|
||||
presignedUrl = await getSignedUrl(getS3Client(), command, { expiresIn: 3600 })
|
||||
} catch (s3Error) {
|
||||
logger.error('Failed to generate S3 presigned URL:', s3Error)
|
||||
throw new StorageConfigError(
|
||||
'Failed to generate S3 presigned URL - check AWS credentials and permissions'
|
||||
)
|
||||
}
|
||||
|
||||
const servePath = `/api/files/serve/s3/${encodeURIComponent(uniqueKey)}`
|
||||
|
||||
logger.info(`Generated ${uploadType} S3 presigned URL for ${fileName} (${uniqueKey})`)
|
||||
|
||||
return NextResponse.json({
|
||||
presignedUrl,
|
||||
@@ -146,22 +170,103 @@ async function handleBlobPresignedUrl(fileName: string, contentType: string, fil
|
||||
type: contentType,
|
||||
},
|
||||
directUploadSupported: true,
|
||||
uploadHeaders: {
|
||||
'x-ms-blob-type': 'BlockBlob',
|
||||
'x-ms-blob-content-type': contentType,
|
||||
'x-ms-meta-originalname': encodeURIComponent(fileName),
|
||||
'x-ms-meta-uploadedat': new Date().toISOString(),
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Error generating Blob presigned URL:', error)
|
||||
return createErrorResponse(
|
||||
error instanceof Error ? error : new Error('Failed to generate Blob presigned URL')
|
||||
)
|
||||
if (error instanceof PresignedUrlError) {
|
||||
throw error
|
||||
}
|
||||
logger.error('Error in S3 presigned URL generation:', error)
|
||||
throw new StorageConfigError('Failed to generate S3 presigned URL')
|
||||
}
|
||||
}
|
||||
|
||||
async function handleBlobPresignedUrl(
|
||||
fileName: string,
|
||||
contentType: string,
|
||||
fileSize: number,
|
||||
uploadType: UploadType
|
||||
) {
|
||||
try {
|
||||
const config = uploadType === 'knowledge-base' ? BLOB_KB_CONFIG : BLOB_CONFIG
|
||||
|
||||
if (
|
||||
!config.accountName ||
|
||||
!config.containerName ||
|
||||
(!config.accountKey && !config.connectionString)
|
||||
) {
|
||||
throw new StorageConfigError(`Azure Blob configuration missing for ${uploadType} uploads`)
|
||||
}
|
||||
|
||||
const safeFileName = fileName.replace(/\s+/g, '-').replace(/[^a-zA-Z0-9.-]/g, '_')
|
||||
const prefix = uploadType === 'knowledge-base' ? 'kb/' : ''
|
||||
const uniqueKey = `${prefix}${Date.now()}-${uuidv4()}-${safeFileName}`
|
||||
|
||||
const blobServiceClient = getBlobServiceClient()
|
||||
const containerClient = blobServiceClient.getContainerClient(config.containerName)
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(uniqueKey)
|
||||
|
||||
const { BlobSASPermissions, generateBlobSASQueryParameters, StorageSharedKeyCredential } =
|
||||
await import('@azure/storage-blob')
|
||||
|
||||
const sasOptions = {
|
||||
containerName: config.containerName,
|
||||
blobName: uniqueKey,
|
||||
permissions: BlobSASPermissions.parse('w'), // Write permission for upload
|
||||
startsOn: new Date(),
|
||||
expiresOn: new Date(Date.now() + 3600 * 1000), // 1 hour expiration
|
||||
}
|
||||
|
||||
let sasToken: string
|
||||
try {
|
||||
sasToken = generateBlobSASQueryParameters(
|
||||
sasOptions,
|
||||
new StorageSharedKeyCredential(config.accountName, config.accountKey || '')
|
||||
).toString()
|
||||
} catch (blobError) {
|
||||
logger.error('Failed to generate Azure Blob SAS token:', blobError)
|
||||
throw new StorageConfigError(
|
||||
'Failed to generate Azure Blob SAS token - check Azure credentials and permissions'
|
||||
)
|
||||
}
|
||||
|
||||
const presignedUrl = `${blockBlobClient.url}?${sasToken}`
|
||||
|
||||
const servePath = `/api/files/serve/blob/${encodeURIComponent(uniqueKey)}`
|
||||
|
||||
logger.info(`Generated ${uploadType} Azure Blob presigned URL for ${fileName} (${uniqueKey})`)
|
||||
|
||||
const uploadHeaders: Record<string, string> = {
|
||||
'x-ms-blob-type': 'BlockBlob',
|
||||
'x-ms-blob-content-type': contentType,
|
||||
'x-ms-meta-originalname': encodeURIComponent(fileName),
|
||||
'x-ms-meta-uploadedat': new Date().toISOString(),
|
||||
}
|
||||
|
||||
if (uploadType === 'knowledge-base') {
|
||||
uploadHeaders['x-ms-meta-purpose'] = 'knowledge-base'
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
presignedUrl,
|
||||
fileInfo: {
|
||||
path: servePath,
|
||||
key: uniqueKey,
|
||||
name: fileName,
|
||||
size: fileSize,
|
||||
type: contentType,
|
||||
},
|
||||
directUploadSupported: true,
|
||||
uploadHeaders,
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof PresignedUrlError) {
|
||||
throw error
|
||||
}
|
||||
logger.error('Error in Azure Blob presigned URL generation:', error)
|
||||
throw new StorageConfigError('Failed to generate Azure Blob presigned URL')
|
||||
}
|
||||
}
|
||||
|
||||
// Handle preflight requests
|
||||
export async function OPTIONS() {
|
||||
return createOptionsResponse()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { readFile } from 'fs/promises'
|
||||
import type { NextRequest, NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { downloadFile, isUsingCloudStorage } from '@/lib/uploads'
|
||||
import { downloadFile, getStorageProvider, isUsingCloudStorage } from '@/lib/uploads'
|
||||
import { BLOB_KB_CONFIG, S3_KB_CONFIG } from '@/lib/uploads/setup'
|
||||
import '@/lib/uploads/setup.server'
|
||||
|
||||
import {
|
||||
@@ -10,12 +11,25 @@ import {
|
||||
FileNotFoundError,
|
||||
findLocalFile,
|
||||
getContentType,
|
||||
} from '../../utils'
|
||||
} from '@/app/api/files/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('FilesServeAPI')
|
||||
|
||||
async function streamToBuffer(readableStream: NodeJS.ReadableStream): Promise<Buffer> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = []
|
||||
readableStream.on('data', (data) => {
|
||||
chunks.push(data instanceof Buffer ? data : Buffer.from(data))
|
||||
})
|
||||
readableStream.on('end', () => {
|
||||
resolve(Buffer.concat(chunks))
|
||||
})
|
||||
readableStream.on('error', reject)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Main API route handler for serving files
|
||||
*/
|
||||
@@ -85,12 +99,65 @@ async function handleLocalFile(filename: string): Promise<NextResponse> {
|
||||
}
|
||||
}
|
||||
|
||||
async function downloadKBFile(cloudKey: string): Promise<Buffer> {
|
||||
const storageProvider = getStorageProvider()
|
||||
|
||||
if (storageProvider === 'blob') {
|
||||
logger.info(`Downloading KB file from Azure Blob Storage: ${cloudKey}`)
|
||||
// Use KB-specific blob configuration
|
||||
const { getBlobServiceClient } = await import('@/lib/uploads/blob/blob-client')
|
||||
const blobServiceClient = getBlobServiceClient()
|
||||
const containerClient = blobServiceClient.getContainerClient(BLOB_KB_CONFIG.containerName)
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(cloudKey)
|
||||
|
||||
const downloadBlockBlobResponse = await blockBlobClient.download()
|
||||
if (!downloadBlockBlobResponse.readableStreamBody) {
|
||||
throw new Error('Failed to get readable stream from blob download')
|
||||
}
|
||||
|
||||
// Convert stream to buffer
|
||||
return await streamToBuffer(downloadBlockBlobResponse.readableStreamBody)
|
||||
}
|
||||
|
||||
if (storageProvider === 's3') {
|
||||
logger.info(`Downloading KB file from S3: ${cloudKey}`)
|
||||
// Use KB-specific S3 configuration
|
||||
const { getS3Client } = await import('@/lib/uploads/s3/s3-client')
|
||||
const { GetObjectCommand } = await import('@aws-sdk/client-s3')
|
||||
|
||||
const s3Client = getS3Client()
|
||||
const command = new GetObjectCommand({
|
||||
Bucket: S3_KB_CONFIG.bucket,
|
||||
Key: cloudKey,
|
||||
})
|
||||
|
||||
const response = await s3Client.send(command)
|
||||
if (!response.Body) {
|
||||
throw new Error('No body in S3 response')
|
||||
}
|
||||
|
||||
// Convert stream to buffer using the same method as the regular S3 client
|
||||
const stream = response.Body as any
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
const chunks: Buffer[] = []
|
||||
stream.on('data', (chunk: Buffer) => chunks.push(chunk))
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks)))
|
||||
stream.on('error', reject)
|
||||
})
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported storage provider for KB files: ${storageProvider}`)
|
||||
}
|
||||
|
||||
/**
|
||||
* Proxy cloud file through our server
|
||||
*/
|
||||
async function handleCloudProxy(cloudKey: string): Promise<NextResponse> {
|
||||
try {
|
||||
const fileBuffer = await downloadFile(cloudKey)
|
||||
// Check if this is a KB file (starts with 'kb/')
|
||||
const isKBFile = cloudKey.startsWith('kb/')
|
||||
|
||||
const fileBuffer = isKBFile ? await downloadKBFile(cloudKey) : await downloadFile(cloudKey)
|
||||
|
||||
// Extract the original filename from the key (last part after last /)
|
||||
const originalFilename = cloudKey.split('/').pop() || 'download'
|
||||
|
||||
@@ -40,6 +40,7 @@ describe('Individual Folder API Route', () => {
|
||||
}
|
||||
|
||||
const { mockAuthenticatedUser, mockUnauthenticated } = mockAuth(TEST_USER)
|
||||
const mockGetUserEntityPermissions = vi.fn()
|
||||
|
||||
function createFolderDbMock(options: FolderDbMockOptions = {}) {
|
||||
const {
|
||||
@@ -109,6 +110,12 @@ describe('Individual Folder API Route', () => {
|
||||
vi.resetModules()
|
||||
vi.clearAllMocks()
|
||||
setupCommonApiMocks()
|
||||
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin')
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: mockGetUserEntityPermissions,
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -181,6 +188,72 @@ describe('Individual Folder API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Unauthorized')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('PUT', {
|
||||
name: 'Updated Folder',
|
||||
})
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { PUT } = await import('./route')
|
||||
|
||||
const response = await PUT(req, { params })
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Write access required to update folders')
|
||||
})
|
||||
|
||||
it('should allow folder update for write permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('PUT', {
|
||||
name: 'Updated Folder',
|
||||
})
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { PUT } = await import('./route')
|
||||
|
||||
const response = await PUT(req, { params })
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should allow folder update for admin permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('PUT', {
|
||||
name: 'Updated Folder',
|
||||
})
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { PUT } = await import('./route')
|
||||
|
||||
const response = await PUT(req, { params })
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should return 400 when trying to set folder as its own parent', async () => {
|
||||
mockAuthenticatedUser()
|
||||
|
||||
@@ -387,6 +460,68 @@ describe('Individual Folder API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Unauthorized')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions for delete', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('DELETE')
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { DELETE } = await import('./route')
|
||||
|
||||
const response = await DELETE(req, { params })
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Admin access required to delete folders')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only write permissions for delete', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions (not enough for delete)
|
||||
|
||||
const dbMock = createFolderDbMock()
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('DELETE')
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { DELETE } = await import('./route')
|
||||
|
||||
const response = await DELETE(req, { params })
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Admin access required to delete folders')
|
||||
})
|
||||
|
||||
it('should allow folder deletion for admin permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
|
||||
|
||||
const dbMock = createFolderDbMock({
|
||||
folderLookupResult: mockFolder,
|
||||
})
|
||||
vi.doMock('@/db', () => dbMock)
|
||||
|
||||
const req = createMockRequest('DELETE')
|
||||
const params = Promise.resolve({ id: 'folder-1' })
|
||||
|
||||
const { DELETE } = await import('./route')
|
||||
|
||||
const response = await DELETE(req, { params })
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('success', true)
|
||||
})
|
||||
|
||||
it('should handle database errors during deletion', async () => {
|
||||
mockAuthenticatedUser()
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { db } from '@/db'
|
||||
import { workflow, workflowFolder } from '@/db/schema'
|
||||
|
||||
@@ -19,17 +20,31 @@ export async function PUT(request: NextRequest, { params }: { params: Promise<{
|
||||
const body = await request.json()
|
||||
const { name, color, isExpanded, parentId } = body
|
||||
|
||||
// Verify the folder exists and belongs to the user
|
||||
// Verify the folder exists
|
||||
const existingFolder = await db
|
||||
.select()
|
||||
.from(workflowFolder)
|
||||
.where(and(eq(workflowFolder.id, id), eq(workflowFolder.userId, session.user.id)))
|
||||
.where(eq(workflowFolder.id, id))
|
||||
.then((rows) => rows[0])
|
||||
|
||||
if (!existingFolder) {
|
||||
return NextResponse.json({ error: 'Folder not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Check if user has write permissions for the workspace
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
existingFolder.workspaceId
|
||||
)
|
||||
|
||||
if (!workspacePermission || workspacePermission === 'read') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Write access required to update folders' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Prevent setting a folder as its own parent or creating circular references
|
||||
if (parentId && parentId === id) {
|
||||
return NextResponse.json({ error: 'Folder cannot be its own parent' }, { status: 400 })
|
||||
@@ -81,19 +96,33 @@ export async function DELETE(
|
||||
|
||||
const { id } = await params
|
||||
|
||||
// Verify the folder exists and belongs to the user
|
||||
// Verify the folder exists
|
||||
const existingFolder = await db
|
||||
.select()
|
||||
.from(workflowFolder)
|
||||
.where(and(eq(workflowFolder.id, id), eq(workflowFolder.userId, session.user.id)))
|
||||
.where(eq(workflowFolder.id, id))
|
||||
.then((rows) => rows[0])
|
||||
|
||||
if (!existingFolder) {
|
||||
return NextResponse.json({ error: 'Folder not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Check if user has admin permissions for the workspace (admin-only for deletions)
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
existingFolder.workspaceId
|
||||
)
|
||||
|
||||
if (workspacePermission !== 'admin') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Admin access required to delete folders' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Recursively delete folder and all its contents
|
||||
const deletionStats = await deleteFolderRecursively(id, session.user.id)
|
||||
const deletionStats = await deleteFolderRecursively(id, existingFolder.workspaceId)
|
||||
|
||||
logger.info('Deleted folder and all contents:', {
|
||||
id,
|
||||
@@ -113,41 +142,40 @@ export async function DELETE(
|
||||
// Helper function to recursively delete a folder and all its contents
|
||||
async function deleteFolderRecursively(
|
||||
folderId: string,
|
||||
userId: string
|
||||
workspaceId: string
|
||||
): Promise<{ folders: number; workflows: number }> {
|
||||
const stats = { folders: 0, workflows: 0 }
|
||||
|
||||
// Get all child folders first
|
||||
// Get all child folders first (workspace-scoped, not user-scoped)
|
||||
const childFolders = await db
|
||||
.select({ id: workflowFolder.id })
|
||||
.from(workflowFolder)
|
||||
.where(and(eq(workflowFolder.parentId, folderId), eq(workflowFolder.userId, userId)))
|
||||
.where(and(eq(workflowFolder.parentId, folderId), eq(workflowFolder.workspaceId, workspaceId)))
|
||||
|
||||
// Recursively delete child folders
|
||||
for (const childFolder of childFolders) {
|
||||
const childStats = await deleteFolderRecursively(childFolder.id, userId)
|
||||
const childStats = await deleteFolderRecursively(childFolder.id, workspaceId)
|
||||
stats.folders += childStats.folders
|
||||
stats.workflows += childStats.workflows
|
||||
}
|
||||
|
||||
// Delete all workflows in this folder
|
||||
// Delete all workflows in this folder (workspace-scoped, not user-scoped)
|
||||
// The database cascade will handle deleting related workflow_blocks, workflow_edges, workflow_subflows
|
||||
const workflowsInFolder = await db
|
||||
.select({ id: workflow.id })
|
||||
.from(workflow)
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.userId, userId)))
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.workspaceId, workspaceId)))
|
||||
|
||||
if (workflowsInFolder.length > 0) {
|
||||
await db
|
||||
.delete(workflow)
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.userId, userId)))
|
||||
.where(and(eq(workflow.folderId, folderId), eq(workflow.workspaceId, workspaceId)))
|
||||
|
||||
stats.workflows += workflowsInFolder.length
|
||||
}
|
||||
|
||||
// Delete this folder
|
||||
await db
|
||||
.delete(workflowFolder)
|
||||
.where(and(eq(workflowFolder.id, folderId), eq(workflowFolder.userId, userId)))
|
||||
await db.delete(workflowFolder).where(eq(workflowFolder.id, folderId))
|
||||
|
||||
stats.folders += 1
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ describe('Folders API Route', () => {
|
||||
const mockValues = vi.fn()
|
||||
const mockReturning = vi.fn()
|
||||
const mockTransaction = vi.fn()
|
||||
const mockGetUserEntityPermissions = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
@@ -72,6 +73,8 @@ describe('Folders API Route', () => {
|
||||
mockValues.mockReturnValue({ returning: mockReturning })
|
||||
mockReturning.mockReturnValue([mockFolders[0]])
|
||||
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin')
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
@@ -79,6 +82,10 @@ describe('Folders API Route', () => {
|
||||
transaction: mockTransaction,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/permissions/utils', () => ({
|
||||
getUserEntityPermissions: mockGetUserEntityPermissions,
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
@@ -143,6 +150,42 @@ describe('Folders API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Workspace ID is required')
|
||||
})
|
||||
|
||||
it('should return 403 when user has no workspace permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue(null) // No permissions
|
||||
|
||||
const mockRequest = createMockRequest('GET')
|
||||
Object.defineProperty(mockRequest, 'url', {
|
||||
value: 'http://localhost:3000/api/folders?workspaceId=workspace-123',
|
||||
})
|
||||
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(mockRequest)
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Access denied to this workspace')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const mockRequest = createMockRequest('GET')
|
||||
Object.defineProperty(mockRequest, 'url', {
|
||||
value: 'http://localhost:3000/api/folders?workspaceId=workspace-123',
|
||||
})
|
||||
|
||||
const { GET } = await import('./route')
|
||||
const response = await GET(mockRequest)
|
||||
|
||||
expect(response.status).toBe(200) // Should work for read permissions
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folders')
|
||||
})
|
||||
|
||||
it('should handle database errors gracefully', async () => {
|
||||
mockAuthenticatedUser()
|
||||
|
||||
@@ -295,6 +338,100 @@ describe('Folders API Route', () => {
|
||||
expect(data).toHaveProperty('error', 'Unauthorized')
|
||||
})
|
||||
|
||||
it('should return 403 when user has only read permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('read') // Read-only permissions
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
name: 'Test Folder',
|
||||
workspaceId: 'workspace-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('error', 'Write or Admin access required to create folders')
|
||||
})
|
||||
|
||||
it('should allow folder creation for write permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('write') // Write permissions
|
||||
|
||||
mockTransaction.mockImplementationOnce(async (callback: any) => {
|
||||
const tx = {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
orderBy: vi.fn().mockReturnValue({
|
||||
limit: vi.fn().mockReturnValue([]), // No existing folders
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
insert: vi.fn().mockReturnValue({
|
||||
values: vi.fn().mockReturnValue({
|
||||
returning: vi.fn().mockReturnValue([mockFolders[0]]),
|
||||
}),
|
||||
}),
|
||||
}
|
||||
return await callback(tx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
name: 'Test Folder',
|
||||
workspaceId: 'workspace-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should allow folder creation for admin permissions', async () => {
|
||||
mockAuthenticatedUser()
|
||||
mockGetUserEntityPermissions.mockResolvedValue('admin') // Admin permissions
|
||||
|
||||
mockTransaction.mockImplementationOnce(async (callback: any) => {
|
||||
const tx = {
|
||||
select: vi.fn().mockReturnValue({
|
||||
from: vi.fn().mockReturnValue({
|
||||
where: vi.fn().mockReturnValue({
|
||||
orderBy: vi.fn().mockReturnValue({
|
||||
limit: vi.fn().mockReturnValue([]), // No existing folders
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
insert: vi.fn().mockReturnValue({
|
||||
values: vi.fn().mockReturnValue({
|
||||
returning: vi.fn().mockReturnValue([mockFolders[0]]),
|
||||
}),
|
||||
}),
|
||||
}
|
||||
return await callback(tx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
name: 'Test Folder',
|
||||
workspaceId: 'workspace-123',
|
||||
})
|
||||
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
|
||||
const data = await response.json()
|
||||
expect(data).toHaveProperty('folder')
|
||||
})
|
||||
|
||||
it('should return 400 when required fields are missing', async () => {
|
||||
const testCases = [
|
||||
{ name: '', workspaceId: 'workspace-123' }, // Missing name
|
||||
|
||||
@@ -2,6 +2,7 @@ import { and, asc, desc, eq, isNull } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { getUserEntityPermissions } from '@/lib/permissions/utils'
|
||||
import { db } from '@/db'
|
||||
import { workflowFolder } from '@/db/schema'
|
||||
|
||||
@@ -22,13 +23,23 @@ export async function GET(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Workspace ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Fetch all folders for the workspace, ordered by sortOrder and createdAt
|
||||
// Check if user has workspace permissions
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
workspaceId
|
||||
)
|
||||
|
||||
if (!workspacePermission) {
|
||||
return NextResponse.json({ error: 'Access denied to this workspace' }, { status: 403 })
|
||||
}
|
||||
|
||||
// If user has workspace permissions, fetch ALL folders in the workspace
|
||||
// This allows shared workspace members to see folders created by other users
|
||||
const folders = await db
|
||||
.select()
|
||||
.from(workflowFolder)
|
||||
.where(
|
||||
and(eq(workflowFolder.workspaceId, workspaceId), eq(workflowFolder.userId, session.user.id))
|
||||
)
|
||||
.where(eq(workflowFolder.workspaceId, workspaceId))
|
||||
.orderBy(asc(workflowFolder.sortOrder), asc(workflowFolder.createdAt))
|
||||
|
||||
return NextResponse.json({ folders })
|
||||
@@ -53,19 +64,33 @@ export async function POST(request: NextRequest) {
|
||||
return NextResponse.json({ error: 'Name and workspace ID are required' }, { status: 400 })
|
||||
}
|
||||
|
||||
// Check if user has workspace permissions (at least 'write' access to create folders)
|
||||
const workspacePermission = await getUserEntityPermissions(
|
||||
session.user.id,
|
||||
'workspace',
|
||||
workspaceId
|
||||
)
|
||||
|
||||
if (!workspacePermission || workspacePermission === 'read') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Write or Admin access required to create folders' },
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
// Generate a new ID
|
||||
const id = crypto.randomUUID()
|
||||
|
||||
// Use transaction to ensure sortOrder consistency
|
||||
const newFolder = await db.transaction(async (tx) => {
|
||||
// Get the next sort order for the parent (or root level)
|
||||
// Consider all folders in the workspace, not just those created by current user
|
||||
const existingFolders = await tx
|
||||
.select({ sortOrder: workflowFolder.sortOrder })
|
||||
.from(workflowFolder)
|
||||
.where(
|
||||
and(
|
||||
eq(workflowFolder.workspaceId, workspaceId),
|
||||
eq(workflowFolder.userId, session.user.id),
|
||||
parentId ? eq(workflowFolder.parentId, parentId) : isNull(workflowFolder.parentId)
|
||||
)
|
||||
)
|
||||
|
||||
@@ -513,7 +513,6 @@ export async function POST(req: NextRequest) {
|
||||
// } else {
|
||||
logger.info(`[${requestId}] Using VM for code execution`, {
|
||||
resolvedCode,
|
||||
executionParams,
|
||||
hasEnvVars: Object.keys(envVars).length > 0,
|
||||
})
|
||||
|
||||
|
||||
@@ -0,0 +1,413 @@
|
||||
/**
|
||||
* Tests for knowledge document chunks API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
createMockRequest,
|
||||
mockAuth,
|
||||
mockConsoleLogger,
|
||||
mockDrizzleOrm,
|
||||
mockKnowledgeSchemas,
|
||||
} from '@/app/api/__test-utils__/utils'
|
||||
import type { DocumentAccessCheck } from '../../../../utils'
|
||||
|
||||
mockKnowledgeSchemas()
|
||||
mockDrizzleOrm()
|
||||
mockConsoleLogger()
|
||||
|
||||
vi.mock('@/lib/tokenization/estimators', () => ({
|
||||
estimateTokenCount: vi.fn().mockReturnValue({ count: 452 }),
|
||||
}))
|
||||
|
||||
vi.mock('@/providers/utils', () => ({
|
||||
calculateCost: vi.fn().mockReturnValue({
|
||||
input: 0.00000904,
|
||||
output: 0,
|
||||
total: 0.00000904,
|
||||
pricing: {
|
||||
input: 0.02,
|
||||
output: 0,
|
||||
updatedAt: '2025-07-10',
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.mock('../../../../utils', () => ({
|
||||
checkDocumentAccess: vi.fn(),
|
||||
generateEmbeddings: vi.fn().mockResolvedValue([[0.1, 0.2, 0.3, 0.4, 0.5]]),
|
||||
}))
|
||||
|
||||
describe('Knowledge Document Chunks API Route', () => {
|
||||
const mockAuth$ = mockAuth()
|
||||
|
||||
const mockDbChain = {
|
||||
select: vi.fn().mockReturnThis(),
|
||||
from: vi.fn().mockReturnThis(),
|
||||
where: vi.fn().mockReturnThis(),
|
||||
orderBy: vi.fn().mockReturnThis(),
|
||||
limit: vi.fn().mockReturnThis(),
|
||||
offset: vi.fn().mockReturnThis(),
|
||||
insert: vi.fn().mockReturnThis(),
|
||||
values: vi.fn().mockResolvedValue(undefined),
|
||||
update: vi.fn().mockReturnThis(),
|
||||
set: vi.fn().mockReturnThis(),
|
||||
returning: vi.fn().mockResolvedValue([]),
|
||||
delete: vi.fn().mockReturnThis(),
|
||||
transaction: vi.fn(),
|
||||
}
|
||||
|
||||
const mockGetUserId = vi.fn()
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks()
|
||||
|
||||
vi.doMock('@/db', () => ({
|
||||
db: mockDbChain,
|
||||
}))
|
||||
|
||||
vi.doMock('@/app/api/auth/oauth/utils', () => ({
|
||||
getUserId: mockGetUserId,
|
||||
}))
|
||||
|
||||
Object.values(mockDbChain).forEach((fn) => {
|
||||
if (typeof fn === 'function' && fn !== mockDbChain.values && fn !== mockDbChain.returning) {
|
||||
fn.mockClear().mockReturnThis()
|
||||
}
|
||||
})
|
||||
|
||||
vi.stubGlobal('crypto', {
|
||||
randomUUID: vi.fn().mockReturnValue('mock-chunk-uuid-1234'),
|
||||
createHash: vi.fn().mockReturnValue({
|
||||
update: vi.fn().mockReturnThis(),
|
||||
digest: vi.fn().mockReturnValue('mock-hash-123'),
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('POST /api/knowledge/[id]/documents/[documentId]/chunks', () => {
|
||||
const validChunkData = {
|
||||
content: 'This is test chunk content for uploading to the knowledge base document.',
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
const mockDocumentAccess = {
|
||||
hasAccess: true,
|
||||
notFound: false,
|
||||
reason: '',
|
||||
document: {
|
||||
id: 'doc-123',
|
||||
processingStatus: 'completed',
|
||||
tag1: 'tag1-value',
|
||||
tag2: 'tag2-value',
|
||||
tag3: null,
|
||||
tag4: null,
|
||||
tag5: null,
|
||||
tag6: null,
|
||||
tag7: null,
|
||||
},
|
||||
}
|
||||
|
||||
const mockParams = Promise.resolve({ id: 'kb-123', documentId: 'doc-123' })
|
||||
|
||||
it('should create chunk successfully with cost tracking', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
const { estimateTokenCount } = await import('@/lib/tokenization/estimators')
|
||||
const { calculateCost } = await import('@/providers/utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue(mockDocumentAccess as DocumentAccessCheck)
|
||||
|
||||
// Mock transaction
|
||||
const mockTx = {
|
||||
select: vi.fn().mockReturnThis(),
|
||||
from: vi.fn().mockReturnThis(),
|
||||
where: vi.fn().mockReturnThis(),
|
||||
orderBy: vi.fn().mockReturnThis(),
|
||||
limit: vi.fn().mockResolvedValue([{ chunkIndex: 0 }]),
|
||||
insert: vi.fn().mockReturnThis(),
|
||||
values: vi.fn().mockResolvedValue(undefined),
|
||||
update: vi.fn().mockReturnThis(),
|
||||
set: vi.fn().mockReturnThis(),
|
||||
}
|
||||
|
||||
mockDbChain.transaction.mockImplementation(async (callback) => {
|
||||
return await callback(mockTx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', validChunkData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
|
||||
// Verify cost tracking
|
||||
expect(data.data.cost).toBeDefined()
|
||||
expect(data.data.cost.input).toBe(0.00000904)
|
||||
expect(data.data.cost.output).toBe(0)
|
||||
expect(data.data.cost.total).toBe(0.00000904)
|
||||
expect(data.data.cost.tokens).toEqual({
|
||||
prompt: 452,
|
||||
completion: 0,
|
||||
total: 452,
|
||||
})
|
||||
expect(data.data.cost.model).toBe('text-embedding-3-small')
|
||||
expect(data.data.cost.pricing).toEqual({
|
||||
input: 0.02,
|
||||
output: 0,
|
||||
updatedAt: '2025-07-10',
|
||||
})
|
||||
|
||||
// Verify function calls
|
||||
expect(estimateTokenCount).toHaveBeenCalledWith(validChunkData.content, 'openai')
|
||||
expect(calculateCost).toHaveBeenCalledWith('text-embedding-3-small', 452, 0, false)
|
||||
})
|
||||
|
||||
it('should handle workflow-based authentication', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
const workflowData = {
|
||||
...validChunkData,
|
||||
workflowId: 'workflow-123',
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue(mockDocumentAccess as DocumentAccessCheck)
|
||||
|
||||
const mockTx = {
|
||||
select: vi.fn().mockReturnThis(),
|
||||
from: vi.fn().mockReturnThis(),
|
||||
where: vi.fn().mockReturnThis(),
|
||||
orderBy: vi.fn().mockReturnThis(),
|
||||
limit: vi.fn().mockResolvedValue([]),
|
||||
insert: vi.fn().mockReturnThis(),
|
||||
values: vi.fn().mockResolvedValue(undefined),
|
||||
update: vi.fn().mockReturnThis(),
|
||||
set: vi.fn().mockReturnThis(),
|
||||
}
|
||||
|
||||
mockDbChain.transaction.mockImplementation(async (callback) => {
|
||||
return await callback(mockTx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', workflowData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(mockGetUserId).toHaveBeenCalledWith(expect.any(String), 'workflow-123')
|
||||
})
|
||||
|
||||
it.concurrent('should return unauthorized for unauthenticated request', async () => {
|
||||
mockGetUserId.mockResolvedValue(null)
|
||||
|
||||
const req = createMockRequest('POST', validChunkData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
expect(data.error).toBe('Unauthorized')
|
||||
})
|
||||
|
||||
it('should return not found for workflow that does not exist', async () => {
|
||||
const workflowData = {
|
||||
...validChunkData,
|
||||
workflowId: 'nonexistent-workflow',
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue(null)
|
||||
|
||||
const req = createMockRequest('POST', workflowData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
expect(data.error).toBe('Workflow not found')
|
||||
})
|
||||
|
||||
it.concurrent('should return not found for document access denied', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue({
|
||||
hasAccess: false,
|
||||
notFound: true,
|
||||
reason: 'Document not found',
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', validChunkData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(404)
|
||||
expect(data.error).toBe('Document not found')
|
||||
})
|
||||
|
||||
it('should return unauthorized for unauthorized document access', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue({
|
||||
hasAccess: false,
|
||||
notFound: false,
|
||||
reason: 'Unauthorized access',
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', validChunkData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
expect(data.error).toBe('Unauthorized')
|
||||
})
|
||||
|
||||
it('should reject chunks for failed documents', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue({
|
||||
...mockDocumentAccess,
|
||||
document: {
|
||||
...mockDocumentAccess.document!,
|
||||
processingStatus: 'failed',
|
||||
},
|
||||
} as DocumentAccessCheck)
|
||||
|
||||
const req = createMockRequest('POST', validChunkData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Cannot add chunks to failed document')
|
||||
})
|
||||
|
||||
it.concurrent('should validate chunk data', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue(mockDocumentAccess as DocumentAccessCheck)
|
||||
|
||||
const invalidData = {
|
||||
content: '', // Empty content
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
const req = createMockRequest('POST', invalidData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.error).toBe('Invalid request data')
|
||||
expect(data.details).toBeDefined()
|
||||
})
|
||||
|
||||
it('should inherit tags from parent document', async () => {
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue(mockDocumentAccess as DocumentAccessCheck)
|
||||
|
||||
const mockTx = {
|
||||
select: vi.fn().mockReturnThis(),
|
||||
from: vi.fn().mockReturnThis(),
|
||||
where: vi.fn().mockReturnThis(),
|
||||
orderBy: vi.fn().mockReturnThis(),
|
||||
limit: vi.fn().mockResolvedValue([]),
|
||||
insert: vi.fn().mockReturnThis(),
|
||||
values: vi.fn().mockImplementation((data) => {
|
||||
// Verify that tags are inherited from document
|
||||
expect(data.tag1).toBe('tag1-value')
|
||||
expect(data.tag2).toBe('tag2-value')
|
||||
expect(data.tag3).toBe(null)
|
||||
return Promise.resolve(undefined)
|
||||
}),
|
||||
update: vi.fn().mockReturnThis(),
|
||||
set: vi.fn().mockReturnThis(),
|
||||
}
|
||||
|
||||
mockDbChain.transaction.mockImplementation(async (callback) => {
|
||||
return await callback(mockTx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', validChunkData)
|
||||
const { POST } = await import('./route')
|
||||
await POST(req, { params: mockParams })
|
||||
|
||||
expect(mockTx.values).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it.concurrent('should handle cost calculation with different content lengths', async () => {
|
||||
const { estimateTokenCount } = await import('@/lib/tokenization/estimators')
|
||||
const { calculateCost } = await import('@/providers/utils')
|
||||
const { checkDocumentAccess } = await import('../../../../utils')
|
||||
|
||||
// Mock larger content with more tokens
|
||||
vi.mocked(estimateTokenCount).mockReturnValue({
|
||||
count: 1000,
|
||||
confidence: 'high',
|
||||
provider: 'openai',
|
||||
method: 'precise',
|
||||
})
|
||||
vi.mocked(calculateCost).mockReturnValue({
|
||||
input: 0.00002,
|
||||
output: 0,
|
||||
total: 0.00002,
|
||||
pricing: {
|
||||
input: 0.02,
|
||||
output: 0,
|
||||
updatedAt: '2025-07-10',
|
||||
},
|
||||
})
|
||||
|
||||
const largeChunkData = {
|
||||
content:
|
||||
'This is a much larger chunk of content that would result in significantly more tokens when processed through the OpenAI tokenization system for embedding generation. This content is designed to test the cost calculation accuracy with larger input sizes.',
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
vi.mocked(checkDocumentAccess).mockResolvedValue(mockDocumentAccess as DocumentAccessCheck)
|
||||
|
||||
const mockTx = {
|
||||
select: vi.fn().mockReturnThis(),
|
||||
from: vi.fn().mockReturnThis(),
|
||||
where: vi.fn().mockReturnThis(),
|
||||
orderBy: vi.fn().mockReturnThis(),
|
||||
limit: vi.fn().mockResolvedValue([]),
|
||||
insert: vi.fn().mockReturnThis(),
|
||||
values: vi.fn().mockResolvedValue(undefined),
|
||||
update: vi.fn().mockReturnThis(),
|
||||
set: vi.fn().mockReturnThis(),
|
||||
}
|
||||
|
||||
mockDbChain.transaction.mockImplementation(async (callback) => {
|
||||
return await callback(mockTx)
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', largeChunkData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req, { params: mockParams })
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.data.cost.input).toBe(0.00002)
|
||||
expect(data.data.cost.tokens.prompt).toBe(1000)
|
||||
expect(calculateCost).toHaveBeenCalledWith('text-embedding-3-small', 1000, 0, false)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -4,9 +4,11 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { estimateTokenCount } from '@/lib/tokenization/estimators'
|
||||
import { getUserId } from '@/app/api/auth/oauth/utils'
|
||||
import { db } from '@/db'
|
||||
import { document, embedding } from '@/db/schema'
|
||||
import { calculateCost } from '@/providers/utils'
|
||||
import { checkDocumentAccess, generateEmbeddings } from '../../../../utils'
|
||||
|
||||
const logger = createLogger('DocumentChunksAPI')
|
||||
@@ -118,7 +120,13 @@ export async function GET(
|
||||
enabled: embedding.enabled,
|
||||
startOffset: embedding.startOffset,
|
||||
endOffset: embedding.endOffset,
|
||||
metadata: embedding.metadata,
|
||||
tag1: embedding.tag1,
|
||||
tag2: embedding.tag2,
|
||||
tag3: embedding.tag3,
|
||||
tag4: embedding.tag4,
|
||||
tag5: embedding.tag5,
|
||||
tag6: embedding.tag6,
|
||||
tag7: embedding.tag7,
|
||||
createdAt: embedding.createdAt,
|
||||
updatedAt: embedding.updatedAt,
|
||||
})
|
||||
@@ -211,6 +219,9 @@ export async function POST(
|
||||
logger.info(`[${requestId}] Generating embedding for manual chunk`)
|
||||
const embeddings = await generateEmbeddings([validatedData.content])
|
||||
|
||||
// Calculate accurate token count for both database storage and cost calculation
|
||||
const tokenCount = estimateTokenCount(validatedData.content, 'openai')
|
||||
|
||||
const chunkId = crypto.randomUUID()
|
||||
const now = new Date()
|
||||
|
||||
@@ -234,12 +245,19 @@ export async function POST(
|
||||
chunkHash: crypto.createHash('sha256').update(validatedData.content).digest('hex'),
|
||||
content: validatedData.content,
|
||||
contentLength: validatedData.content.length,
|
||||
tokenCount: Math.ceil(validatedData.content.length / 4), // Rough approximation
|
||||
tokenCount: tokenCount.count, // Use accurate token count
|
||||
embedding: embeddings[0],
|
||||
embeddingModel: 'text-embedding-3-small',
|
||||
startOffset: 0, // Manual chunks don't have document offsets
|
||||
endOffset: validatedData.content.length,
|
||||
metadata: { manual: true }, // Mark as manually created
|
||||
// Inherit tags from parent document
|
||||
tag1: doc.tag1,
|
||||
tag2: doc.tag2,
|
||||
tag3: doc.tag3,
|
||||
tag4: doc.tag4,
|
||||
tag5: doc.tag5,
|
||||
tag6: doc.tag6,
|
||||
tag7: doc.tag7,
|
||||
enabled: validatedData.enabled,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
@@ -263,9 +281,38 @@ export async function POST(
|
||||
|
||||
logger.info(`[${requestId}] Manual chunk created: ${chunkId} in document ${documentId}`)
|
||||
|
||||
// Calculate cost for the embedding (with fallback if calculation fails)
|
||||
let cost = null
|
||||
try {
|
||||
cost = calculateCost('text-embedding-3-small', tokenCount.count, 0, false)
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Failed to calculate cost for chunk upload`, {
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
// Continue without cost information rather than failing the upload
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: newChunk,
|
||||
data: {
|
||||
...newChunk,
|
||||
...(cost
|
||||
? {
|
||||
cost: {
|
||||
input: cost.input,
|
||||
output: cost.output,
|
||||
total: cost.total,
|
||||
tokens: {
|
||||
prompt: tokenCount.count,
|
||||
completion: 0,
|
||||
total: tokenCount.count,
|
||||
},
|
||||
model: 'text-embedding-3-small',
|
||||
pricing: cost.pricing,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
})
|
||||
} catch (validationError) {
|
||||
if (validationError instanceof z.ZodError) {
|
||||
|
||||
@@ -153,6 +153,14 @@ const CreateDocumentSchema = z.object({
|
||||
fileUrl: z.string().url('File URL must be valid'),
|
||||
fileSize: z.number().min(1, 'File size must be greater than 0'),
|
||||
mimeType: z.string().min(1, 'MIME type is required'),
|
||||
// Document tags for filtering
|
||||
tag1: z.string().optional(),
|
||||
tag2: z.string().optional(),
|
||||
tag3: z.string().optional(),
|
||||
tag4: z.string().optional(),
|
||||
tag5: z.string().optional(),
|
||||
tag6: z.string().optional(),
|
||||
tag7: z.string().optional(),
|
||||
})
|
||||
|
||||
const BulkCreateDocumentsSchema = z.object({
|
||||
@@ -229,6 +237,14 @@ export async function GET(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
processingError: document.processingError,
|
||||
enabled: document.enabled,
|
||||
uploadedAt: document.uploadedAt,
|
||||
// Include tags in response
|
||||
tag1: document.tag1,
|
||||
tag2: document.tag2,
|
||||
tag3: document.tag3,
|
||||
tag4: document.tag4,
|
||||
tag5: document.tag5,
|
||||
tag6: document.tag6,
|
||||
tag7: document.tag7,
|
||||
})
|
||||
.from(document)
|
||||
.where(and(...whereConditions))
|
||||
@@ -298,6 +314,14 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
processingStatus: 'pending' as const,
|
||||
enabled: true,
|
||||
uploadedAt: now,
|
||||
// Include tags from upload
|
||||
tag1: docData.tag1 || null,
|
||||
tag2: docData.tag2 || null,
|
||||
tag3: docData.tag3 || null,
|
||||
tag4: docData.tag4 || null,
|
||||
tag5: docData.tag5 || null,
|
||||
tag6: docData.tag6 || null,
|
||||
tag7: docData.tag7 || null,
|
||||
}
|
||||
|
||||
await tx.insert(document).values(newDocument)
|
||||
@@ -372,6 +396,14 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
|
||||
characterCount: 0,
|
||||
enabled: true,
|
||||
uploadedAt: now,
|
||||
// Include tags from upload
|
||||
tag1: validatedData.tag1 || null,
|
||||
tag2: validatedData.tag2 || null,
|
||||
tag3: validatedData.tag3 || null,
|
||||
tag4: validatedData.tag4 || null,
|
||||
tag5: validatedData.tag5 || null,
|
||||
tag6: validatedData.tag6 || null,
|
||||
tag7: validatedData.tag7 || null,
|
||||
}
|
||||
|
||||
await db.insert(document).values(newDocument)
|
||||
|
||||
@@ -8,7 +8,6 @@ import { document, knowledgeBase } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('KnowledgeBaseAPI')
|
||||
|
||||
// Schema for knowledge base creation
|
||||
const CreateKnowledgeBaseSchema = z.object({
|
||||
name: z.string().min(1, 'Name is required'),
|
||||
description: z.string().optional(),
|
||||
|
||||
@@ -34,6 +34,23 @@ vi.mock('@/lib/documents/utils', () => ({
|
||||
retryWithExponentialBackoff: vi.fn().mockImplementation((fn) => fn()),
|
||||
}))
|
||||
|
||||
vi.mock('@/lib/tokenization/estimators', () => ({
|
||||
estimateTokenCount: vi.fn().mockReturnValue({ count: 521 }),
|
||||
}))
|
||||
|
||||
vi.mock('@/providers/utils', () => ({
|
||||
calculateCost: vi.fn().mockReturnValue({
|
||||
input: 0.00001042,
|
||||
output: 0,
|
||||
total: 0.00001042,
|
||||
pricing: {
|
||||
input: 0.02,
|
||||
output: 0,
|
||||
updatedAt: '2025-07-10',
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
mockConsoleLogger()
|
||||
|
||||
describe('Knowledge Search API Route', () => {
|
||||
@@ -206,7 +223,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(mockGetUserId).toHaveBeenCalledWith(expect.any(String), 'workflow-123')
|
||||
})
|
||||
|
||||
it('should return unauthorized for unauthenticated request', async () => {
|
||||
it.concurrent('should return unauthorized for unauthenticated request', async () => {
|
||||
mockGetUserId.mockResolvedValue(null)
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
@@ -218,7 +235,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.error).toBe('Unauthorized')
|
||||
})
|
||||
|
||||
it('should return not found for workflow that does not exist', async () => {
|
||||
it.concurrent('should return not found for workflow that does not exist', async () => {
|
||||
const workflowData = {
|
||||
...validSearchData,
|
||||
workflowId: 'nonexistent-workflow',
|
||||
@@ -268,7 +285,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.error).toBe('Knowledge bases not found: kb-missing')
|
||||
})
|
||||
|
||||
it('should validate search parameters', async () => {
|
||||
it.concurrent('should validate search parameters', async () => {
|
||||
const invalidData = {
|
||||
knowledgeBaseIds: '', // Empty string
|
||||
query: '', // Empty query
|
||||
@@ -314,7 +331,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.data.topK).toBe(10) // Default value
|
||||
})
|
||||
|
||||
it('should handle OpenAI API errors', async () => {
|
||||
it.concurrent('should handle OpenAI API errors', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
|
||||
@@ -334,7 +351,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.error).toBe('Failed to perform vector search')
|
||||
})
|
||||
|
||||
it('should handle missing OpenAI API key', async () => {
|
||||
it.concurrent('should handle missing OpenAI API key', async () => {
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: {
|
||||
OPENAI_API_KEY: undefined,
|
||||
@@ -353,7 +370,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.error).toBe('Failed to perform vector search')
|
||||
})
|
||||
|
||||
it('should handle database errors during search', async () => {
|
||||
it.concurrent('should handle database errors during search', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
mockDbChain.limit.mockRejectedValueOnce(new Error('Database error'))
|
||||
@@ -375,7 +392,7 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(data.error).toBe('Failed to perform vector search')
|
||||
})
|
||||
|
||||
it('should handle invalid OpenAI response format', async () => {
|
||||
it.concurrent('should handle invalid OpenAI response format', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
|
||||
@@ -395,5 +412,124 @@ describe('Knowledge Search API Route', () => {
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.error).toBe('Failed to perform vector search')
|
||||
})
|
||||
|
||||
describe('Cost tracking', () => {
|
||||
it.concurrent('should include cost information in successful search response', async () => {
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.where.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [{ embedding: mockEmbedding }],
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
|
||||
// Verify cost information is included
|
||||
expect(data.data.cost).toBeDefined()
|
||||
expect(data.data.cost.input).toBe(0.00001042)
|
||||
expect(data.data.cost.output).toBe(0)
|
||||
expect(data.data.cost.total).toBe(0.00001042)
|
||||
expect(data.data.cost.tokens).toEqual({
|
||||
prompt: 521,
|
||||
completion: 0,
|
||||
total: 521,
|
||||
})
|
||||
expect(data.data.cost.model).toBe('text-embedding-3-small')
|
||||
expect(data.data.cost.pricing).toEqual({
|
||||
input: 0.02,
|
||||
output: 0,
|
||||
updatedAt: '2025-07-10',
|
||||
})
|
||||
})
|
||||
|
||||
it('should call cost calculation functions with correct parameters', async () => {
|
||||
const { estimateTokenCount } = await import('@/lib/tokenization/estimators')
|
||||
const { calculateCost } = await import('@/providers/utils')
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.where.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [{ embedding: mockEmbedding }],
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', validSearchData)
|
||||
const { POST } = await import('./route')
|
||||
await POST(req)
|
||||
|
||||
// Verify token estimation was called with correct parameters
|
||||
expect(estimateTokenCount).toHaveBeenCalledWith('test search query', 'openai')
|
||||
|
||||
// Verify cost calculation was called with correct parameters
|
||||
expect(calculateCost).toHaveBeenCalledWith('text-embedding-3-small', 521, 0, false)
|
||||
})
|
||||
|
||||
it('should handle cost calculation with different query lengths', async () => {
|
||||
const { estimateTokenCount } = await import('@/lib/tokenization/estimators')
|
||||
const { calculateCost } = await import('@/providers/utils')
|
||||
|
||||
// Mock different token count for longer query
|
||||
vi.mocked(estimateTokenCount).mockReturnValue({
|
||||
count: 1042,
|
||||
confidence: 'high',
|
||||
provider: 'openai',
|
||||
method: 'precise',
|
||||
})
|
||||
vi.mocked(calculateCost).mockReturnValue({
|
||||
input: 0.00002084,
|
||||
output: 0,
|
||||
total: 0.00002084,
|
||||
pricing: {
|
||||
input: 0.02,
|
||||
output: 0,
|
||||
updatedAt: '2025-07-10',
|
||||
},
|
||||
})
|
||||
|
||||
const longQueryData = {
|
||||
...validSearchData,
|
||||
query:
|
||||
'This is a much longer search query with many more tokens to test cost calculation accuracy',
|
||||
}
|
||||
|
||||
mockGetUserId.mockResolvedValue('user-123')
|
||||
mockDbChain.where.mockResolvedValueOnce(mockKnowledgeBases)
|
||||
mockDbChain.limit.mockResolvedValueOnce(mockSearchResults)
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve({
|
||||
data: [{ embedding: mockEmbedding }],
|
||||
}),
|
||||
})
|
||||
|
||||
const req = createMockRequest('POST', longQueryData)
|
||||
const { POST } = await import('./route')
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.data.cost.input).toBe(0.00002084)
|
||||
expect(data.data.cost.tokens.prompt).toBe(1042)
|
||||
expect(calculateCost).toHaveBeenCalledWith('text-embedding-3-small', 1042, 0, false)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -4,12 +4,37 @@ import { z } from 'zod'
|
||||
import { retryWithExponentialBackoff } from '@/lib/documents/utils'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { estimateTokenCount } from '@/lib/tokenization/estimators'
|
||||
import { getUserId } from '@/app/api/auth/oauth/utils'
|
||||
import { db } from '@/db'
|
||||
import { embedding, knowledgeBase } from '@/db/schema'
|
||||
import { calculateCost } from '@/providers/utils'
|
||||
|
||||
const logger = createLogger('VectorSearchAPI')
|
||||
|
||||
function getTagFilters(filters: Record<string, string>, embedding: any) {
|
||||
return Object.entries(filters).map(([key, value]) => {
|
||||
switch (key) {
|
||||
case 'tag1':
|
||||
return sql`LOWER(${embedding.tag1}) = LOWER(${value})`
|
||||
case 'tag2':
|
||||
return sql`LOWER(${embedding.tag2}) = LOWER(${value})`
|
||||
case 'tag3':
|
||||
return sql`LOWER(${embedding.tag3}) = LOWER(${value})`
|
||||
case 'tag4':
|
||||
return sql`LOWER(${embedding.tag4}) = LOWER(${value})`
|
||||
case 'tag5':
|
||||
return sql`LOWER(${embedding.tag5}) = LOWER(${value})`
|
||||
case 'tag6':
|
||||
return sql`LOWER(${embedding.tag6}) = LOWER(${value})`
|
||||
case 'tag7':
|
||||
return sql`LOWER(${embedding.tag7}) = LOWER(${value})`
|
||||
default:
|
||||
return sql`1=1` // No-op for unknown keys
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
class APIError extends Error {
|
||||
public status: number
|
||||
|
||||
@@ -27,6 +52,17 @@ const VectorSearchSchema = z.object({
|
||||
]),
|
||||
query: z.string().min(1, 'Search query is required'),
|
||||
topK: z.number().min(1).max(100).default(10),
|
||||
filters: z
|
||||
.object({
|
||||
tag1: z.string().optional(),
|
||||
tag2: z.string().optional(),
|
||||
tag3: z.string().optional(),
|
||||
tag4: z.string().optional(),
|
||||
tag5: z.string().optional(),
|
||||
tag6: z.string().optional(),
|
||||
tag7: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
|
||||
async function generateSearchEmbedding(query: string): Promise<number[]> {
|
||||
@@ -102,7 +138,8 @@ async function executeParallelQueries(
|
||||
knowledgeBaseIds: string[],
|
||||
queryVector: string,
|
||||
topK: number,
|
||||
distanceThreshold: number
|
||||
distanceThreshold: number,
|
||||
filters?: Record<string, string>
|
||||
) {
|
||||
const parallelLimit = Math.ceil(topK / knowledgeBaseIds.length) + 5
|
||||
|
||||
@@ -113,7 +150,13 @@ async function executeParallelQueries(
|
||||
content: embedding.content,
|
||||
documentId: embedding.documentId,
|
||||
chunkIndex: embedding.chunkIndex,
|
||||
metadata: embedding.metadata,
|
||||
tag1: embedding.tag1,
|
||||
tag2: embedding.tag2,
|
||||
tag3: embedding.tag3,
|
||||
tag4: embedding.tag4,
|
||||
tag5: embedding.tag5,
|
||||
tag6: embedding.tag6,
|
||||
tag7: embedding.tag7,
|
||||
distance: sql<number>`${embedding.embedding} <=> ${queryVector}::vector`.as('distance'),
|
||||
knowledgeBaseId: embedding.knowledgeBaseId,
|
||||
})
|
||||
@@ -122,7 +165,8 @@ async function executeParallelQueries(
|
||||
and(
|
||||
eq(embedding.knowledgeBaseId, kbId),
|
||||
eq(embedding.enabled, true),
|
||||
sql`${embedding.embedding} <=> ${queryVector}::vector < ${distanceThreshold}`
|
||||
sql`${embedding.embedding} <=> ${queryVector}::vector < ${distanceThreshold}`,
|
||||
...(filters ? getTagFilters(filters, embedding) : [])
|
||||
)
|
||||
)
|
||||
.orderBy(sql`${embedding.embedding} <=> ${queryVector}::vector`)
|
||||
@@ -139,7 +183,8 @@ async function executeSingleQuery(
|
||||
knowledgeBaseIds: string[],
|
||||
queryVector: string,
|
||||
topK: number,
|
||||
distanceThreshold: number
|
||||
distanceThreshold: number,
|
||||
filters?: Record<string, string>
|
||||
) {
|
||||
return await db
|
||||
.select({
|
||||
@@ -147,7 +192,13 @@ async function executeSingleQuery(
|
||||
content: embedding.content,
|
||||
documentId: embedding.documentId,
|
||||
chunkIndex: embedding.chunkIndex,
|
||||
metadata: embedding.metadata,
|
||||
tag1: embedding.tag1,
|
||||
tag2: embedding.tag2,
|
||||
tag3: embedding.tag3,
|
||||
tag4: embedding.tag4,
|
||||
tag5: embedding.tag5,
|
||||
tag6: embedding.tag6,
|
||||
tag7: embedding.tag7,
|
||||
distance: sql<number>`${embedding.embedding} <=> ${queryVector}::vector`.as('distance'),
|
||||
})
|
||||
.from(embedding)
|
||||
@@ -155,7 +206,29 @@ async function executeSingleQuery(
|
||||
and(
|
||||
inArray(embedding.knowledgeBaseId, knowledgeBaseIds),
|
||||
eq(embedding.enabled, true),
|
||||
sql`${embedding.embedding} <=> ${queryVector}::vector < ${distanceThreshold}`
|
||||
sql`${embedding.embedding} <=> ${queryVector}::vector < ${distanceThreshold}`,
|
||||
...(filters
|
||||
? Object.entries(filters).map(([key, value]) => {
|
||||
switch (key) {
|
||||
case 'tag1':
|
||||
return sql`LOWER(${embedding.tag1}) = LOWER(${value})`
|
||||
case 'tag2':
|
||||
return sql`LOWER(${embedding.tag2}) = LOWER(${value})`
|
||||
case 'tag3':
|
||||
return sql`LOWER(${embedding.tag3}) = LOWER(${value})`
|
||||
case 'tag4':
|
||||
return sql`LOWER(${embedding.tag4}) = LOWER(${value})`
|
||||
case 'tag5':
|
||||
return sql`LOWER(${embedding.tag5}) = LOWER(${value})`
|
||||
case 'tag6':
|
||||
return sql`LOWER(${embedding.tag6}) = LOWER(${value})`
|
||||
case 'tag7':
|
||||
return sql`LOWER(${embedding.tag7}) = LOWER(${value})`
|
||||
default:
|
||||
return sql`1=1` // No-op for unknown keys
|
||||
}
|
||||
})
|
||||
: [])
|
||||
)
|
||||
)
|
||||
.orderBy(sql`${embedding.embedding} <=> ${queryVector}::vector`)
|
||||
@@ -231,7 +304,8 @@ export async function POST(request: NextRequest) {
|
||||
foundKbIds,
|
||||
queryVector,
|
||||
validatedData.topK,
|
||||
strategy.distanceThreshold
|
||||
strategy.distanceThreshold,
|
||||
validatedData.filters
|
||||
)
|
||||
results = mergeAndRankResults(parallelResults, validatedData.topK)
|
||||
} else {
|
||||
@@ -240,10 +314,24 @@ export async function POST(request: NextRequest) {
|
||||
foundKbIds,
|
||||
queryVector,
|
||||
validatedData.topK,
|
||||
strategy.distanceThreshold
|
||||
strategy.distanceThreshold,
|
||||
validatedData.filters
|
||||
)
|
||||
}
|
||||
|
||||
// Calculate cost for the embedding (with fallback if calculation fails)
|
||||
let cost = null
|
||||
let tokenCount = null
|
||||
try {
|
||||
tokenCount = estimateTokenCount(validatedData.query, 'openai')
|
||||
cost = calculateCost('text-embedding-3-small', tokenCount.count, 0, false)
|
||||
} catch (error) {
|
||||
logger.warn(`[${requestId}] Failed to calculate cost for search query`, {
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
})
|
||||
// Continue without cost information rather than failing the search
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: {
|
||||
@@ -252,7 +340,13 @@ export async function POST(request: NextRequest) {
|
||||
content: result.content,
|
||||
documentId: result.documentId,
|
||||
chunkIndex: result.chunkIndex,
|
||||
metadata: result.metadata,
|
||||
tag1: result.tag1,
|
||||
tag2: result.tag2,
|
||||
tag3: result.tag3,
|
||||
tag4: result.tag4,
|
||||
tag5: result.tag5,
|
||||
tag6: result.tag6,
|
||||
tag7: result.tag7,
|
||||
similarity: 1 - result.distance,
|
||||
})),
|
||||
query: validatedData.query,
|
||||
@@ -260,6 +354,22 @@ export async function POST(request: NextRequest) {
|
||||
knowledgeBaseId: foundKbIds[0],
|
||||
topK: validatedData.topK,
|
||||
totalResults: results.length,
|
||||
...(cost && tokenCount
|
||||
? {
|
||||
cost: {
|
||||
input: cost.input,
|
||||
output: cost.output,
|
||||
total: cost.total,
|
||||
tokens: {
|
||||
prompt: tokenCount.count,
|
||||
completion: 0,
|
||||
total: tokenCount.count,
|
||||
},
|
||||
model: 'text-embedding-3-small',
|
||||
pricing: cost.pricing,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
})
|
||||
} catch (validationError) {
|
||||
|
||||
@@ -73,6 +73,14 @@ export interface DocumentData {
|
||||
enabled: boolean
|
||||
deletedAt?: Date | null
|
||||
uploadedAt: Date
|
||||
// Document tags
|
||||
tag1?: string | null
|
||||
tag2?: string | null
|
||||
tag3?: string | null
|
||||
tag4?: string | null
|
||||
tag5?: string | null
|
||||
tag6?: string | null
|
||||
tag7?: string | null
|
||||
}
|
||||
|
||||
export interface EmbeddingData {
|
||||
@@ -88,7 +96,14 @@ export interface EmbeddingData {
|
||||
embeddingModel: string
|
||||
startOffset: number
|
||||
endOffset: number
|
||||
metadata: unknown
|
||||
// Tag fields for filtering
|
||||
tag1?: string | null
|
||||
tag2?: string | null
|
||||
tag3?: string | null
|
||||
tag4?: string | null
|
||||
tag5?: string | null
|
||||
tag6?: string | null
|
||||
tag7?: string | null
|
||||
enabled: boolean
|
||||
createdAt: Date
|
||||
updatedAt: Date
|
||||
@@ -445,7 +460,26 @@ export async function processDocumentAsync(
|
||||
const chunkTexts = processed.chunks.map((chunk) => chunk.text)
|
||||
const embeddings = chunkTexts.length > 0 ? await generateEmbeddings(chunkTexts) : []
|
||||
|
||||
logger.info(`[${documentId}] Embeddings generated, updating document record`)
|
||||
logger.info(`[${documentId}] Embeddings generated, fetching document tags`)
|
||||
|
||||
// Fetch document to get tags
|
||||
const documentRecord = await db
|
||||
.select({
|
||||
tag1: document.tag1,
|
||||
tag2: document.tag2,
|
||||
tag3: document.tag3,
|
||||
tag4: document.tag4,
|
||||
tag5: document.tag5,
|
||||
tag6: document.tag6,
|
||||
tag7: document.tag7,
|
||||
})
|
||||
.from(document)
|
||||
.where(eq(document.id, documentId))
|
||||
.limit(1)
|
||||
|
||||
const documentTags = documentRecord[0] || {}
|
||||
|
||||
logger.info(`[${documentId}] Creating embedding records with tags`)
|
||||
|
||||
const embeddingRecords = processed.chunks.map((chunk, chunkIndex) => ({
|
||||
id: crypto.randomUUID(),
|
||||
@@ -460,7 +494,14 @@ export async function processDocumentAsync(
|
||||
embeddingModel: 'text-embedding-3-small',
|
||||
startOffset: chunk.metadata.startIndex,
|
||||
endOffset: chunk.metadata.endIndex,
|
||||
metadata: {},
|
||||
// Copy tags from document
|
||||
tag1: documentTags.tag1,
|
||||
tag2: documentTags.tag2,
|
||||
tag3: documentTags.tag3,
|
||||
tag4: documentTags.tag4,
|
||||
tag5: documentTags.tag5,
|
||||
tag6: documentTags.tag6,
|
||||
tag7: documentTags.tag7,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
}))
|
||||
|
||||
76
apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts
Normal file
76
apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { createLogger } from '@/lib/logs/console-logger'
|
||||
import { db } from '@/db'
|
||||
import { workflowExecutionLogs, workflowExecutionSnapshots } from '@/db/schema'
|
||||
|
||||
const logger = createLogger('FrozenCanvasAPI')
|
||||
|
||||
export async function GET(
|
||||
_request: NextRequest,
|
||||
{ params }: { params: Promise<{ executionId: string }> }
|
||||
) {
|
||||
try {
|
||||
const { executionId } = await params
|
||||
|
||||
logger.debug(`Fetching frozen canvas data for execution: ${executionId}`)
|
||||
|
||||
// Get the workflow execution log to find the snapshot
|
||||
const [workflowLog] = await db
|
||||
.select()
|
||||
.from(workflowExecutionLogs)
|
||||
.where(eq(workflowExecutionLogs.executionId, executionId))
|
||||
.limit(1)
|
||||
|
||||
if (!workflowLog) {
|
||||
return NextResponse.json({ error: 'Workflow execution not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Get the workflow state snapshot
|
||||
const [snapshot] = await db
|
||||
.select()
|
||||
.from(workflowExecutionSnapshots)
|
||||
.where(eq(workflowExecutionSnapshots.id, workflowLog.stateSnapshotId))
|
||||
.limit(1)
|
||||
|
||||
if (!snapshot) {
|
||||
return NextResponse.json({ error: 'Workflow state snapshot not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const response = {
|
||||
executionId,
|
||||
workflowId: workflowLog.workflowId,
|
||||
workflowState: snapshot.stateData,
|
||||
executionMetadata: {
|
||||
trigger: workflowLog.trigger,
|
||||
startedAt: workflowLog.startedAt.toISOString(),
|
||||
endedAt: workflowLog.endedAt?.toISOString(),
|
||||
totalDurationMs: workflowLog.totalDurationMs,
|
||||
blockStats: {
|
||||
total: workflowLog.blockCount,
|
||||
success: workflowLog.successCount,
|
||||
error: workflowLog.errorCount,
|
||||
skipped: workflowLog.skippedCount,
|
||||
},
|
||||
cost: {
|
||||
total: workflowLog.totalCost ? Number.parseFloat(workflowLog.totalCost) : null,
|
||||
input: workflowLog.totalInputCost ? Number.parseFloat(workflowLog.totalInputCost) : null,
|
||||
output: workflowLog.totalOutputCost
|
||||
? Number.parseFloat(workflowLog.totalOutputCost)
|
||||
: null,
|
||||
},
|
||||
totalTokens: workflowLog.totalTokens,
|
||||
},
|
||||
}
|
||||
|
||||
logger.debug(`Successfully fetched frozen canvas data for execution: ${executionId}`)
|
||||
logger.debug(
|
||||
`Workflow state contains ${Object.keys((snapshot.stateData as any)?.blocks || {}).length} blocks`
|
||||
)
|
||||
|
||||
return NextResponse.json(response)
|
||||
} catch (error) {
|
||||
logger.error('Error fetching frozen canvas data:', error)
|
||||
return NextResponse.json({ error: 'Failed to fetch frozen canvas data' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user