mirror of
https://github.com/simstudioai/sim.git
synced 2026-02-06 20:55:23 -05:00
Compare commits
6 Commits
v0.5.82
...
feat/tool-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80ea3e7783 | ||
|
|
dc7c321970 | ||
|
|
1edaf197b2 | ||
|
|
474b1af145 | ||
|
|
1e21ec1fa3 | ||
|
|
71bd535d04 |
@@ -1131,6 +1131,32 @@ export function AirtableIcon(props: SVGProps<SVGSVGElement>) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function AirweaveIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
|
return (
|
||||||
|
<svg
|
||||||
|
{...props}
|
||||||
|
width='143'
|
||||||
|
height='143'
|
||||||
|
viewBox='0 0 143 143'
|
||||||
|
fill='none'
|
||||||
|
xmlns='http://www.w3.org/2000/svg'
|
||||||
|
>
|
||||||
|
<path
|
||||||
|
d='M89.8854 128.872C79.9165 123.339 66.7502 115.146 60.5707 107.642L60.0432 107.018C58.7836 105.5 57.481 104.014 56.1676 102.593C51.9152 97.9641 47.3614 93.7978 42.646 90.2021C40.7405 88.7487 38.7704 87.3492 36.8111 86.0789C35.7991 85.4222 34.8302 84.8193 33.9151 84.2703C31.6221 82.903 28.8338 82.5263 26.2716 83.2476C23.8385 83.9366 21.89 85.5406 20.7596 87.7476C18.5634 92.0323 20.0814 97.3289 24.2046 99.805C27.5204 101.786 30.7608 104.111 33.8398 106.717C34.2381 107.05 34.3996 107.578 34.2596 108.062C33.1292 112.185 31.9989 118.957 31.5682 121.67C30.6424 127.429 33.4737 133.081 38.5982 135.751L38.7812 135.848C41.0204 137 43.6472 136.946 45.8219 135.697C47.9858 134.459 49.353 132.231 49.4822 129.733C49.536 128.657 49.6006 127.58 49.676 126.59C49.719 126.062 50.042 125.632 50.5264 125.459C50.6772 125.406 50.8494 125.373 51.0001 125.373C51.3554 125.373 51.6784 125.513 51.9475 125.782C56.243 130.185 60.8829 134.169 65.7167 137.625C70.3674 140.951 75.8686 142.706 81.639 142.706C83.7383 142.706 85.8376 142.469 87.8938 141.995L88.1199 141.942C90.9943 141.274 93.029 139.024 93.4488 136.085C93.8687 133.146 92.4476 130.315 89.8747 128.883H89.8639L89.8854 128.872Z'
|
||||||
|
fill='currentColor'
|
||||||
|
/>
|
||||||
|
<path
|
||||||
|
d='M142.551 58.1747L142.529 58.0563C142.045 55.591 140.118 53.7069 137.598 53.2548C135.112 52.8134 132.754 53.8577 131.484 55.9893L131.408 56.1077C126.704 64.1604 120.061 71.6101 111.653 78.2956C109.446 80.0504 107.293 81.902 105.226 83.8075C103.644 85.2717 101.265 85.53 99.4452 84.4212C97.6474 83.3339 95.8495 82.1389 94.1055 80.8686C90.3268 78.1233 86.6772 74.9475 83.2753 71.4271C81.4989 69.597 79.798 67.6915 78.1939 65.7321C76.0408 63.1161 73.7477 60.5539 71.3685 58.1316C66.3195 52.9857 56.6089 45.9127 53.7453 43.878C53.3792 43.6304 53.1639 43.2428 53.0993 42.8014C53.0455 42.3601 53.1639 41.9509 53.4546 41.6064C55.274 39.4318 56.9965 37.1818 58.5683 34.921C60.2369 32.5311 60.786 29.6028 60.0862 26.8899C59.408 24.2523 57.6424 22.11 55.134 20.8827C50.9139 18.7942 45.8972 20.0968 43.2273 23.9293C40.8373 27.3636 38.0167 30.7332 34.8732 33.9306C34.5718 34.232 34.1304 34.3397 33.7213 34.1889C30.5239 33.1447 27.2296 32.2942 23.9461 31.659C23.7093 31.616 23.354 31.5514 22.9126 31.4975C16.4102 30.5286 10.1123 33.7798 7.21639 39.5717L7.1195 39.7548C6.18289 41.628 6.26902 43.8349 7.32405 45.6651C8.40061 47.5167 10.3277 48.701 12.4592 48.8194C13.4604 48.8732 14.4401 48.9378 15.3659 49.0024C15.7966 49.0347 16.1411 49.2823 16.3025 49.6914C16.4533 50.1112 16.3671 50.5419 16.0657 50.8541C12.147 54.8804 8.60515 59.1974 5.5262 63.6867C1.1446 70.0814 -0.481008 78.2095 1.08 85.9822L1.10154 86.1006C1.70441 89.0719 4.05131 91.2035 7.07644 91.5264C9.98315 91.8386 12.6099 90.3208 13.7619 87.6724L13.8265 87.5109C18.6925 75.8625 26.7559 65.5168 37.7907 56.7536C38.3182 56.3445 39.0072 56.28 39.567 56.5922C45.3373 59.768 50.8601 63.902 55.9738 68.8864C56.5982 69.4893 56.6089 70.5013 56.0168 71.1257C53.4761 73.8063 51.0862 76.6054 48.9115 79.469C47.2106 81.7083 47.5335 84.8949 49.6221 86.7358L53.3254 89.9977L53.2824 90.0409C53.8637 90.5576 54.445 91.0744 55.0264 91.5911L55.8123 92.194C56.9319 93.1844 58.3529 93.6365 59.8386 93.4858C61.3027 93.3351 62.67 92.56 63.5635 91.3758C65.1353 89.2873 66.8578 87.2525 68.6556 85.304C68.957 84.9702 69.3661 84.798 69.8075 84.7872C70.2705 84.7872 70.6257 84.9379 70.9164 85.2286C75.8147 90.0624 81.1114 94.3686 86.6772 97.9966C88.8626 99.4176 89.4978 102.26 88.1306 104.477C86.9248 106.448 85.7729 108.493 84.7179 110.539C83.5014 112.918 83.2968 115.738 84.1688 118.257C84.9978 120.68 86.7095 122.585 88.981 123.64C90.2514 124.232 91.5971 124.534 92.9859 124.534C96.5062 124.534 99.682 122.596 101.286 119.452C102.729 116.61 104.419 113.8 106.281 111.131C107.369 109.559 109.36 108.838 111.255 109.322C115.26 110.355 120.643 111.421 124.454 112.143C128.308 112.864 132.119 111.023 133.96 107.578L134.143 107.233C135.521 104.628 135.531 101.506 134.164 98.8901C132.786 96.2526 130.181 94.4655 127.21 94.121C126.478 94.0349 125.778 93.9488 125.11 93.8626C124.97 93.8411 124.852 93.8196 124.744 93.798L123.356 93.4751L124.357 92.4523C124.432 92.377 124.529 92.2801 124.658 92.194C128.771 88.8028 132.571 85.1963 135.962 81.4714C141.668 75.1951 144.122 66.4965 142.518 58.1747H142.529H142.551Z'
|
||||||
|
fill='currentColor'
|
||||||
|
/>
|
||||||
|
<path
|
||||||
|
d='M56.6506 14.3371C65.5861 19.6338 77.4067 27.3743 82.9833 34.1674C83.64 34.9532 84.2967 35.7391 84.9534 36.4927C86.1591 37.8815 86.2991 39.8731 85.2979 41.4233C83.4892 44.2116 81.4115 46.9569 79.1399 49.5945C77.4713 51.5107 77.4067 54.3098 78.9785 56.2476L79.0431 56.323C79.2261 56.5598 79.4306 56.8074 79.6136 57.0442C81.2931 59.1758 83.0801 61.2213 84.9211 63.1375C85.9007 64.1603 87.2249 64.7309 88.6352 64.7309L88.7644 65.5275L88.7429 64.7309C90.207 64.6986 91.6173 64.0526 92.5969 62.933C94.8362 60.4031 96.9247 57.744 98.8302 55.0633C100.133 53.2224 102.63 52.8026 104.525 54.1052C106.463 55.4402 108.465 56.7105 110.457 57.8839C112.793 59.2511 115.614 59.5095 118.165 58.5621C120.749 57.604 122.762 55.5694 123.656 52.9533C125.055 48.9055 123.257 44.2547 119.382 41.9078C116.755 40.3145 114.15 38.5166 111.674 36.5788C110.382 35.5561 109.833 33.8767 110.296 32.2941C111.437 28.3001 112.481 23.1218 113.148 19.4831C113.837 15.7259 112.147 11.8826 108.939 9.94477L108.562 9.72944C105.871 8.12537 102.587 8.00696 99.7668 9.40649C96.9247 10.8168 95.03 13.5405 94.6855 16.6733L94.6639 16.867C94.6209 17.2546 94.384 17.5453 94.018 17.6637C93.652 17.7821 93.2859 17.6852 93.0168 17.4269C89.0012 13.1422 84.738 9.25576 80.3134 5.8646C74.3708 1.31075 66.7811 -0.583999 59.4928 0.675575L59.1805 0.729423C56.1124 1.2677 53.7547 3.60383 53.1949 6.68279C52.6351 9.72946 53.9915 12.7223 56.6722 14.3048H56.6614L56.6506 14.3371Z'
|
||||||
|
fill='currentColor'
|
||||||
|
/>
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
|
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
return (
|
return (
|
||||||
<svg
|
<svg
|
||||||
@@ -5436,3 +5462,24 @@ export function EnrichSoIcon(props: SVGProps<SVGSVGElement>) {
|
|||||||
</svg>
|
</svg>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function AgentSkillsIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
|
return (
|
||||||
|
<svg
|
||||||
|
{...props}
|
||||||
|
xmlns='http://www.w3.org/2000/svg'
|
||||||
|
width='16'
|
||||||
|
height='16'
|
||||||
|
viewBox='0 0 16 16'
|
||||||
|
fill='none'
|
||||||
|
>
|
||||||
|
<path
|
||||||
|
d='M8 1L14.0622 4.5V11.5L8 15L1.93782 11.5V4.5L8 1Z'
|
||||||
|
stroke='currentColor'
|
||||||
|
strokeWidth='1.5'
|
||||||
|
fill='none'
|
||||||
|
/>
|
||||||
|
<path d='M8 4.5L11 6.25V9.75L8 11.5L5 9.75V6.25L8 4.5Z' fill='currentColor' />
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import {
|
|||||||
A2AIcon,
|
A2AIcon,
|
||||||
AhrefsIcon,
|
AhrefsIcon,
|
||||||
AirtableIcon,
|
AirtableIcon,
|
||||||
|
AirweaveIcon,
|
||||||
ApifyIcon,
|
ApifyIcon,
|
||||||
ApolloIcon,
|
ApolloIcon,
|
||||||
ArxivIcon,
|
ArxivIcon,
|
||||||
@@ -141,6 +142,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
|||||||
a2a: A2AIcon,
|
a2a: A2AIcon,
|
||||||
ahrefs: AhrefsIcon,
|
ahrefs: AhrefsIcon,
|
||||||
airtable: AirtableIcon,
|
airtable: AirtableIcon,
|
||||||
|
airweave: AirweaveIcon,
|
||||||
apify: ApifyIcon,
|
apify: ApifyIcon,
|
||||||
apollo: ApolloIcon,
|
apollo: ApolloIcon,
|
||||||
arxiv: ArxivIcon,
|
arxiv: ArxivIcon,
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
"connections",
|
"connections",
|
||||||
"mcp",
|
"mcp",
|
||||||
"copilot",
|
"copilot",
|
||||||
|
"skills",
|
||||||
"knowledgebase",
|
"knowledgebase",
|
||||||
"variables",
|
"variables",
|
||||||
"execution",
|
"execution",
|
||||||
|
|||||||
134
apps/docs/content/docs/en/skills/index.mdx
Normal file
134
apps/docs/content/docs/en/skills/index.mdx
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
---
|
||||||
|
title: Agent Skills
|
||||||
|
---
|
||||||
|
|
||||||
|
import { Callout } from 'fumadocs-ui/components/callout'
|
||||||
|
|
||||||
|
Agent Skills are reusable packages of instructions that give your AI agents specialized capabilities. Based on the open [Agent Skills](https://agentskills.io) format, skills let you capture domain expertise, workflows, and best practices that agents can load on demand.
|
||||||
|
|
||||||
|
## How Skills Work
|
||||||
|
|
||||||
|
Skills use **progressive disclosure** to keep agent context lean:
|
||||||
|
|
||||||
|
1. **Discovery** — Only skill names and descriptions are included in the agent's system prompt (~50-100 tokens each)
|
||||||
|
2. **Activation** — When the agent decides a skill is relevant, it calls the `load_skill` tool to load the full instructions into context
|
||||||
|
3. **Execution** — The agent follows the loaded instructions to complete the task
|
||||||
|
|
||||||
|
This means you can attach many skills to an agent without bloating its context window. The agent only loads what it needs.
|
||||||
|
|
||||||
|
## Creating Skills
|
||||||
|
|
||||||
|
Go to **Settings** and select **Skills** under the Tools section.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Click **Add** to create a new skill with three fields:
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| **Name** | A kebab-case identifier (e.g. `sql-expert`, `code-reviewer`). Max 64 characters. |
|
||||||
|
| **Description** | A short explanation of what the skill does and when to use it. This is what the agent reads to decide whether to activate the skill. Max 1024 characters. |
|
||||||
|
| **Content** | The full skill instructions in markdown. This is loaded when the agent activates the skill. |
|
||||||
|
|
||||||
|
<Callout type="info">
|
||||||
|
The description is critical — it's the only thing the agent sees before deciding to load a skill. Be specific about when and why the skill should be used.
|
||||||
|
</Callout>
|
||||||
|
|
||||||
|
### Writing Good Skill Content
|
||||||
|
|
||||||
|
Skill content follows the same conventions as [SKILL.md files](https://agentskills.io/specification):
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# SQL Expert
|
||||||
|
|
||||||
|
## When to use this skill
|
||||||
|
Use when the user asks you to write, optimize, or debug SQL queries.
|
||||||
|
|
||||||
|
## Instructions
|
||||||
|
1. Always ask which database engine (PostgreSQL, MySQL, SQLite)
|
||||||
|
2. Use CTEs over subqueries for readability
|
||||||
|
3. Add index recommendations when relevant
|
||||||
|
4. Explain query plans for optimization requests
|
||||||
|
|
||||||
|
## Common Patterns
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
**Recommended structure:**
|
||||||
|
- **When to use** — Specific triggers and scenarios
|
||||||
|
- **Instructions** — Step-by-step guidance with numbered lists
|
||||||
|
- **Examples** — Input/output samples showing expected behavior
|
||||||
|
- **Common Patterns** — Reusable approaches for frequent tasks
|
||||||
|
- **Edge Cases** — Gotchas and special considerations
|
||||||
|
|
||||||
|
Keep skills focused and under 500 lines. If a skill grows too large, split it into multiple specialized skills.
|
||||||
|
|
||||||
|
## Adding Skills to an Agent
|
||||||
|
|
||||||
|
Open any **Agent** block and find the **Skills** dropdown below the tools section. Select the skills you want the agent to have access to.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Selected skills appear as cards that you can click to edit or remove.
|
||||||
|
|
||||||
|
### What Happens at Runtime
|
||||||
|
|
||||||
|
When the workflow runs:
|
||||||
|
|
||||||
|
1. The agent's system prompt includes an `<available_skills>` section listing each skill's name and description
|
||||||
|
2. A `load_skill` tool is automatically added to the agent's available tools
|
||||||
|
3. When the agent determines a skill is relevant to the current task, it calls `load_skill` with the skill name
|
||||||
|
4. The full skill content is returned as a tool response, giving the agent detailed instructions
|
||||||
|
|
||||||
|
This works across all supported LLM providers — the `load_skill` tool uses standard tool-calling, so no provider-specific configuration is needed.
|
||||||
|
|
||||||
|
## Common Use Cases
|
||||||
|
|
||||||
|
Skills are most valuable when agents need specialized knowledge or multi-step workflows:
|
||||||
|
|
||||||
|
**Domain Expertise**
|
||||||
|
- `api-integration-expert` — Best practices for calling specific APIs (authentication, rate limiting, error handling)
|
||||||
|
- `data-transformation` — ETL patterns, data cleaning, and validation rules
|
||||||
|
- `code-reviewer` — Code review guidelines specific to your team's standards
|
||||||
|
|
||||||
|
**Workflow Templates**
|
||||||
|
- `bug-investigation` — Step-by-step debugging methodology (reproduce → isolate → test → fix)
|
||||||
|
- `feature-implementation` — Development workflow from requirements to deployment
|
||||||
|
- `document-generator` — Templates and formatting rules for technical documentation
|
||||||
|
|
||||||
|
**Company-Specific Knowledge**
|
||||||
|
- `our-architecture` — System architecture diagrams, service dependencies, and deployment processes
|
||||||
|
- `style-guide` — Brand guidelines, writing tone, UI/UX patterns
|
||||||
|
- `customer-onboarding` — Standard procedures and common customer questions
|
||||||
|
|
||||||
|
**When to use skills vs. agent instructions:**
|
||||||
|
- Use **skills** for knowledge that applies across multiple workflows or changes frequently
|
||||||
|
- Use **agent instructions** for task-specific context that's unique to a single agent
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
**Writing Effective Descriptions**
|
||||||
|
- **Be specific and keyword-rich** — Instead of "Helps with SQL", write "Write optimized SQL queries for PostgreSQL, MySQL, and SQLite, including index recommendations and query plan analysis"
|
||||||
|
- **Include activation triggers** — Mention specific words or phrases that should prompt the skill (e.g., "Use when the user mentions PDFs, forms, or document extraction")
|
||||||
|
- **Keep it under 200 words** — Agents scan descriptions quickly; make every word count
|
||||||
|
|
||||||
|
**Skill Scope and Organization**
|
||||||
|
- **One skill per domain** — A focused `sql-expert` skill works better than a broad `database-everything` skill
|
||||||
|
- **Limit to 5-10 skills per agent** — More skills = more decision overhead; start small and add as needed
|
||||||
|
- **Split large skills** — If a skill exceeds 500 lines, break it into focused sub-skills
|
||||||
|
|
||||||
|
**Content Structure**
|
||||||
|
- **Use markdown formatting** — Headers, lists, and code blocks help agents parse and follow instructions
|
||||||
|
- **Provide examples** — Show input/output pairs so agents understand expected behavior
|
||||||
|
- **Be explicit about edge cases** — Don't assume agents will infer special handling
|
||||||
|
|
||||||
|
**Testing and Iteration**
|
||||||
|
- **Test activation** — Run your workflow and verify the agent loads the skill when expected
|
||||||
|
- **Check for false positives** — Make sure skills aren't activating when they shouldn't
|
||||||
|
- **Refine descriptions** — If a skill isn't loading when needed, add more keywords to the description
|
||||||
|
|
||||||
|
## Learn More
|
||||||
|
|
||||||
|
- [Agent Skills specification](https://agentskills.io) — The open format for portable agent skills
|
||||||
|
- [Example skills](https://github.com/anthropics/skills) — Browse community skill examples
|
||||||
|
- [Best practices](https://agentskills.io/what-are-skills) — Writing effective skills
|
||||||
67
apps/docs/content/docs/en/tools/airweave.mdx
Normal file
67
apps/docs/content/docs/en/tools/airweave.mdx
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
title: Airweave
|
||||||
|
description: Search your synced data collections
|
||||||
|
---
|
||||||
|
|
||||||
|
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||||
|
|
||||||
|
<BlockInfoCard
|
||||||
|
type="airweave"
|
||||||
|
color="#6366F1"
|
||||||
|
/>
|
||||||
|
|
||||||
|
{/* MANUAL-CONTENT-START:intro */}
|
||||||
|
[Airweave](https://airweave.ai/) is an AI-powered semantic search platform that helps you discover and retrieve knowledge across all your synced data sources. Built for modern teams, Airweave enables fast, relevant search results using neural, hybrid, or keyword-based strategies tailored to your needs.
|
||||||
|
|
||||||
|
With Airweave, you can:
|
||||||
|
|
||||||
|
- **Search smarter**: Use natural language queries to uncover information stored across your connected tools and databases
|
||||||
|
- **Unify your data**: Seamlessly access content from sources like code, docs, chat, emails, cloud files, and more
|
||||||
|
- **Customize retrieval**: Select between hybrid (semantic + keyword), neural, or keyword search strategies for optimal results
|
||||||
|
- **Boost recall**: Expand search queries with AI to find more comprehensive answers
|
||||||
|
- **Rerank results using AI**: Prioritize the most relevant answers with powerful language models
|
||||||
|
- **Get instant answers**: Generate clear, AI-powered responses synthesized from your data
|
||||||
|
|
||||||
|
In Sim, the Airweave integration empowers your agents to search, summarize, and extract insights from all your organization’s data via a single tool. Use Airweave to drive rich, contextual knowledge retrieval within your workflows—whether answering questions, generating summaries, or supporting dynamic decision-making.
|
||||||
|
{/* MANUAL-CONTENT-END */}
|
||||||
|
|
||||||
|
## Usage Instructions
|
||||||
|
|
||||||
|
Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
### `airweave_search`
|
||||||
|
|
||||||
|
Search your synced data collections using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.
|
||||||
|
|
||||||
|
#### Input
|
||||||
|
|
||||||
|
| Parameter | Type | Required | Description |
|
||||||
|
| --------- | ---- | -------- | ----------- |
|
||||||
|
| `apiKey` | string | Yes | Airweave API Key for authentication |
|
||||||
|
| `collectionId` | string | Yes | The readable ID of the collection to search |
|
||||||
|
| `query` | string | Yes | The search query text |
|
||||||
|
| `limit` | number | No | Maximum number of results to return \(default: 100\) |
|
||||||
|
| `retrievalStrategy` | string | No | Retrieval strategy: hybrid \(default\), neural, or keyword |
|
||||||
|
| `expandQuery` | boolean | No | Generate query variations to improve recall |
|
||||||
|
| `rerank` | boolean | No | Reorder results for improved relevance using LLM |
|
||||||
|
| `generateAnswer` | boolean | No | Generate a natural-language answer to the query |
|
||||||
|
|
||||||
|
#### Output
|
||||||
|
|
||||||
|
| Parameter | Type | Description |
|
||||||
|
| --------- | ---- | ----------- |
|
||||||
|
| `results` | array | Search results with content, scores, and metadata from your synced data |
|
||||||
|
| ↳ `entity_id` | string | Unique identifier for the search result entity |
|
||||||
|
| ↳ `source_name` | string | Name of the data source \(e.g., "GitHub", "Slack"\) |
|
||||||
|
| ↳ `md_content` | string | Markdown-formatted content of the result |
|
||||||
|
| ↳ `score` | number | Relevance score from the search |
|
||||||
|
| ↳ `metadata` | object | Additional metadata associated with the result |
|
||||||
|
| ↳ `breadcrumbs` | array | Navigation path to the result within its source |
|
||||||
|
| ↳ `url` | string | URL to the original content |
|
||||||
|
| `completion` | string | AI-generated answer to the query \(when generateAnswer is enabled\) |
|
||||||
|
|
||||||
|
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
"a2a",
|
"a2a",
|
||||||
"ahrefs",
|
"ahrefs",
|
||||||
"airtable",
|
"airtable",
|
||||||
|
"airweave",
|
||||||
"apify",
|
"apify",
|
||||||
"apollo",
|
"apollo",
|
||||||
"arxiv",
|
"arxiv",
|
||||||
|
|||||||
BIN
apps/docs/public/static/skills/add-skill.png
Normal file
BIN
apps/docs/public/static/skills/add-skill.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 28 KiB |
BIN
apps/docs/public/static/skills/manage-skills.png
Normal file
BIN
apps/docs/public/static/skills/manage-skills.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 56 KiB |
@@ -285,6 +285,14 @@ export async function POST(req: NextRequest) {
|
|||||||
apiVersion: 'preview',
|
apiVersion: 'preview',
|
||||||
endpoint: env.AZURE_OPENAI_ENDPOINT,
|
endpoint: env.AZURE_OPENAI_ENDPOINT,
|
||||||
}
|
}
|
||||||
|
} else if (providerEnv === 'azure-anthropic') {
|
||||||
|
providerConfig = {
|
||||||
|
provider: 'azure-anthropic',
|
||||||
|
model: envModel,
|
||||||
|
apiKey: env.AZURE_ANTHROPIC_API_KEY,
|
||||||
|
apiVersion: env.AZURE_ANTHROPIC_API_VERSION,
|
||||||
|
endpoint: env.AZURE_ANTHROPIC_ENDPOINT,
|
||||||
|
}
|
||||||
} else if (providerEnv === 'vertex') {
|
} else if (providerEnv === 'vertex') {
|
||||||
providerConfig = {
|
providerConfig = {
|
||||||
provider: 'vertex',
|
provider: 'vertex',
|
||||||
|
|||||||
@@ -23,7 +23,16 @@ export async function POST(request: NextRequest) {
|
|||||||
topK,
|
topK,
|
||||||
model,
|
model,
|
||||||
apiKey,
|
apiKey,
|
||||||
|
azureEndpoint,
|
||||||
|
azureApiVersion,
|
||||||
|
vertexProject,
|
||||||
|
vertexLocation,
|
||||||
|
vertexCredential,
|
||||||
|
bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey,
|
||||||
|
bedrockRegion,
|
||||||
workflowId,
|
workflowId,
|
||||||
|
workspaceId,
|
||||||
piiEntityTypes,
|
piiEntityTypes,
|
||||||
piiMode,
|
piiMode,
|
||||||
piiLanguage,
|
piiLanguage,
|
||||||
@@ -110,7 +119,18 @@ export async function POST(request: NextRequest) {
|
|||||||
topK,
|
topK,
|
||||||
model,
|
model,
|
||||||
apiKey,
|
apiKey,
|
||||||
|
{
|
||||||
|
azureEndpoint,
|
||||||
|
azureApiVersion,
|
||||||
|
vertexProject,
|
||||||
|
vertexLocation,
|
||||||
|
vertexCredential,
|
||||||
|
bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey,
|
||||||
|
bedrockRegion,
|
||||||
|
},
|
||||||
workflowId,
|
workflowId,
|
||||||
|
workspaceId,
|
||||||
piiEntityTypes,
|
piiEntityTypes,
|
||||||
piiMode,
|
piiMode,
|
||||||
piiLanguage,
|
piiLanguage,
|
||||||
@@ -178,7 +198,18 @@ async function executeValidation(
|
|||||||
topK: string | undefined,
|
topK: string | undefined,
|
||||||
model: string,
|
model: string,
|
||||||
apiKey: string | undefined,
|
apiKey: string | undefined,
|
||||||
|
providerCredentials: {
|
||||||
|
azureEndpoint?: string
|
||||||
|
azureApiVersion?: string
|
||||||
|
vertexProject?: string
|
||||||
|
vertexLocation?: string
|
||||||
|
vertexCredential?: string
|
||||||
|
bedrockAccessKeyId?: string
|
||||||
|
bedrockSecretKey?: string
|
||||||
|
bedrockRegion?: string
|
||||||
|
},
|
||||||
workflowId: string | undefined,
|
workflowId: string | undefined,
|
||||||
|
workspaceId: string | undefined,
|
||||||
piiEntityTypes: string[] | undefined,
|
piiEntityTypes: string[] | undefined,
|
||||||
piiMode: string | undefined,
|
piiMode: string | undefined,
|
||||||
piiLanguage: string | undefined,
|
piiLanguage: string | undefined,
|
||||||
@@ -219,7 +250,9 @@ async function executeValidation(
|
|||||||
topK: topK ? Number.parseInt(topK) : 10, // Default topK is 10
|
topK: topK ? Number.parseInt(topK) : 10, // Default topK is 10
|
||||||
model: model,
|
model: model,
|
||||||
apiKey,
|
apiKey,
|
||||||
|
providerCredentials,
|
||||||
workflowId,
|
workflowId,
|
||||||
|
workspaceId,
|
||||||
requestId,
|
requestId,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ const configSchema = z.object({
|
|||||||
hideFilesTab: z.boolean().optional(),
|
hideFilesTab: z.boolean().optional(),
|
||||||
disableMcpTools: z.boolean().optional(),
|
disableMcpTools: z.boolean().optional(),
|
||||||
disableCustomTools: z.boolean().optional(),
|
disableCustomTools: z.boolean().optional(),
|
||||||
|
disableSkills: z.boolean().optional(),
|
||||||
hideTemplates: z.boolean().optional(),
|
hideTemplates: z.boolean().optional(),
|
||||||
disableInvitations: z.boolean().optional(),
|
disableInvitations: z.boolean().optional(),
|
||||||
hideDeployApi: z.boolean().optional(),
|
hideDeployApi: z.boolean().optional(),
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ const configSchema = z.object({
|
|||||||
hideFilesTab: z.boolean().optional(),
|
hideFilesTab: z.boolean().optional(),
|
||||||
disableMcpTools: z.boolean().optional(),
|
disableMcpTools: z.boolean().optional(),
|
||||||
disableCustomTools: z.boolean().optional(),
|
disableCustomTools: z.boolean().optional(),
|
||||||
|
disableSkills: z.boolean().optional(),
|
||||||
hideTemplates: z.boolean().optional(),
|
hideTemplates: z.boolean().optional(),
|
||||||
disableInvitations: z.boolean().optional(),
|
disableInvitations: z.boolean().optional(),
|
||||||
hideDeployApi: z.boolean().optional(),
|
hideDeployApi: z.boolean().optional(),
|
||||||
|
|||||||
182
apps/sim/app/api/skills/route.ts
Normal file
182
apps/sim/app/api/skills/route.ts
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
import { db } from '@sim/db'
|
||||||
|
import { skill } from '@sim/db/schema'
|
||||||
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { and, desc, eq } from 'drizzle-orm'
|
||||||
|
import { type NextRequest, NextResponse } from 'next/server'
|
||||||
|
import { z } from 'zod'
|
||||||
|
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
|
||||||
|
import { generateRequestId } from '@/lib/core/utils/request'
|
||||||
|
import { upsertSkills } from '@/lib/workflows/skills/operations'
|
||||||
|
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
|
||||||
|
|
||||||
|
const logger = createLogger('SkillsAPI')
|
||||||
|
|
||||||
|
const SkillSchema = z.object({
|
||||||
|
skills: z.array(
|
||||||
|
z.object({
|
||||||
|
id: z.string().optional(),
|
||||||
|
name: z
|
||||||
|
.string()
|
||||||
|
.min(1, 'Skill name is required')
|
||||||
|
.max(64)
|
||||||
|
.regex(/^[a-z0-9]+(-[a-z0-9]+)*$/, 'Name must be kebab-case (e.g. my-skill)'),
|
||||||
|
description: z.string().min(1, 'Description is required').max(1024),
|
||||||
|
content: z.string().min(1, 'Content is required').max(50000, 'Content is too large'),
|
||||||
|
})
|
||||||
|
),
|
||||||
|
workspaceId: z.string().optional(),
|
||||||
|
})
|
||||||
|
|
||||||
|
/** GET - Fetch all skills for a workspace */
|
||||||
|
export async function GET(request: NextRequest) {
|
||||||
|
const requestId = generateRequestId()
|
||||||
|
const searchParams = request.nextUrl.searchParams
|
||||||
|
const workspaceId = searchParams.get('workspaceId')
|
||||||
|
|
||||||
|
try {
|
||||||
|
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false })
|
||||||
|
if (!authResult.success || !authResult.userId) {
|
||||||
|
logger.warn(`[${requestId}] Unauthorized skills access attempt`)
|
||||||
|
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const userId = authResult.userId
|
||||||
|
|
||||||
|
if (!workspaceId) {
|
||||||
|
logger.warn(`[${requestId}] Missing workspaceId`)
|
||||||
|
return NextResponse.json({ error: 'workspaceId is required' }, { status: 400 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const userPermission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
|
||||||
|
if (!userPermission) {
|
||||||
|
logger.warn(`[${requestId}] User ${userId} does not have access to workspace ${workspaceId}`)
|
||||||
|
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await db
|
||||||
|
.select()
|
||||||
|
.from(skill)
|
||||||
|
.where(eq(skill.workspaceId, workspaceId))
|
||||||
|
.orderBy(desc(skill.createdAt))
|
||||||
|
|
||||||
|
return NextResponse.json({ data: result }, { status: 200 })
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[${requestId}] Error fetching skills:`, error)
|
||||||
|
return NextResponse.json({ error: 'Failed to fetch skills' }, { status: 500 })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** POST - Create or update skills */
|
||||||
|
export async function POST(req: NextRequest) {
|
||||||
|
const requestId = generateRequestId()
|
||||||
|
|
||||||
|
try {
|
||||||
|
const authResult = await checkSessionOrInternalAuth(req, { requireWorkflowId: false })
|
||||||
|
if (!authResult.success || !authResult.userId) {
|
||||||
|
logger.warn(`[${requestId}] Unauthorized skills update attempt`)
|
||||||
|
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const userId = authResult.userId
|
||||||
|
const body = await req.json()
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { skills, workspaceId } = SkillSchema.parse(body)
|
||||||
|
|
||||||
|
if (!workspaceId) {
|
||||||
|
logger.warn(`[${requestId}] Missing workspaceId in request body`)
|
||||||
|
return NextResponse.json({ error: 'workspaceId is required' }, { status: 400 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const userPermission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
|
||||||
|
if (!userPermission || (userPermission !== 'admin' && userPermission !== 'write')) {
|
||||||
|
logger.warn(
|
||||||
|
`[${requestId}] User ${userId} does not have write permission for workspace ${workspaceId}`
|
||||||
|
)
|
||||||
|
return NextResponse.json({ error: 'Write permission required' }, { status: 403 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const resultSkills = await upsertSkills({
|
||||||
|
skills,
|
||||||
|
workspaceId,
|
||||||
|
userId,
|
||||||
|
requestId,
|
||||||
|
})
|
||||||
|
|
||||||
|
return NextResponse.json({ success: true, data: resultSkills })
|
||||||
|
} catch (validationError) {
|
||||||
|
if (validationError instanceof z.ZodError) {
|
||||||
|
logger.warn(`[${requestId}] Invalid skills data`, {
|
||||||
|
errors: validationError.errors,
|
||||||
|
})
|
||||||
|
return NextResponse.json(
|
||||||
|
{ error: 'Invalid request data', details: validationError.errors },
|
||||||
|
{ status: 400 }
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if (validationError instanceof Error && validationError.message.includes('already exists')) {
|
||||||
|
return NextResponse.json({ error: validationError.message }, { status: 409 })
|
||||||
|
}
|
||||||
|
throw validationError
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[${requestId}] Error updating skills`, error)
|
||||||
|
return NextResponse.json({ error: 'Failed to update skills' }, { status: 500 })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** DELETE - Delete a skill by ID */
|
||||||
|
export async function DELETE(request: NextRequest) {
|
||||||
|
const requestId = generateRequestId()
|
||||||
|
const searchParams = request.nextUrl.searchParams
|
||||||
|
const skillId = searchParams.get('id')
|
||||||
|
const workspaceId = searchParams.get('workspaceId')
|
||||||
|
|
||||||
|
try {
|
||||||
|
const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false })
|
||||||
|
if (!authResult.success || !authResult.userId) {
|
||||||
|
logger.warn(`[${requestId}] Unauthorized skill deletion attempt`)
|
||||||
|
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const userId = authResult.userId
|
||||||
|
|
||||||
|
if (!skillId) {
|
||||||
|
logger.warn(`[${requestId}] Missing skill ID for deletion`)
|
||||||
|
return NextResponse.json({ error: 'Skill ID is required' }, { status: 400 })
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!workspaceId) {
|
||||||
|
logger.warn(`[${requestId}] Missing workspaceId for deletion`)
|
||||||
|
return NextResponse.json({ error: 'workspaceId is required' }, { status: 400 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const userPermission = await getUserEntityPermissions(userId, 'workspace', workspaceId)
|
||||||
|
if (!userPermission || (userPermission !== 'admin' && userPermission !== 'write')) {
|
||||||
|
logger.warn(
|
||||||
|
`[${requestId}] User ${userId} does not have write permission for workspace ${workspaceId}`
|
||||||
|
)
|
||||||
|
return NextResponse.json({ error: 'Write permission required' }, { status: 403 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const existingSkill = await db.select().from(skill).where(eq(skill.id, skillId)).limit(1)
|
||||||
|
|
||||||
|
if (existingSkill.length === 0) {
|
||||||
|
logger.warn(`[${requestId}] Skill not found: ${skillId}`)
|
||||||
|
return NextResponse.json({ error: 'Skill not found' }, { status: 404 })
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existingSkill[0].workspaceId !== workspaceId) {
|
||||||
|
logger.warn(`[${requestId}] Skill ${skillId} does not belong to workspace ${workspaceId}`)
|
||||||
|
return NextResponse.json({ error: 'Skill not found' }, { status: 404 })
|
||||||
|
}
|
||||||
|
|
||||||
|
await db.delete(skill).where(and(eq(skill.id, skillId), eq(skill.workspaceId, workspaceId)))
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] Deleted skill: ${skillId}`)
|
||||||
|
return NextResponse.json({ success: true })
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[${requestId}] Error deleting skill:`, error)
|
||||||
|
return NextResponse.json({ error: 'Failed to delete skill' }, { status: 500 })
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -89,7 +89,7 @@ export function WorkflowSelector({
|
|||||||
onMouseDown={(e) => handleRemove(e, w.id)}
|
onMouseDown={(e) => handleRemove(e, w.id)}
|
||||||
>
|
>
|
||||||
{w.name}
|
{w.name}
|
||||||
<X className='h-3 w-3' />
|
<X className='!text-[var(--text-primary)] h-4 w-4 flex-shrink-0 opacity-50' />
|
||||||
</Badge>
|
</Badge>
|
||||||
))}
|
))}
|
||||||
{selectedWorkflows.length > 2 && (
|
{selectedWorkflows.length > 2 && (
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ export { ResponseFormat } from './response/response-format'
|
|||||||
export { ScheduleInfo } from './schedule-info/schedule-info'
|
export { ScheduleInfo } from './schedule-info/schedule-info'
|
||||||
export { SheetSelectorInput } from './sheet-selector/sheet-selector-input'
|
export { SheetSelectorInput } from './sheet-selector/sheet-selector-input'
|
||||||
export { ShortInput } from './short-input/short-input'
|
export { ShortInput } from './short-input/short-input'
|
||||||
|
export { SkillInput } from './skill-input/skill-input'
|
||||||
export { SlackSelectorInput } from './slack-selector/slack-selector-input'
|
export { SlackSelectorInput } from './slack-selector/slack-selector-input'
|
||||||
export { SliderInput } from './slider-input/slider-input'
|
export { SliderInput } from './slider-input/slider-input'
|
||||||
export { InputFormat } from './starter/input-format'
|
export { InputFormat } from './starter/input-format'
|
||||||
|
|||||||
@@ -0,0 +1,194 @@
|
|||||||
|
'use client'
|
||||||
|
|
||||||
|
import { useCallback, useMemo, useState } from 'react'
|
||||||
|
import { Plus, XIcon } from 'lucide-react'
|
||||||
|
import { useParams } from 'next/navigation'
|
||||||
|
import { Combobox, type ComboboxOptionGroup } from '@/components/emcn'
|
||||||
|
import { AgentSkillsIcon } from '@/components/icons'
|
||||||
|
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/hooks/use-sub-block-value'
|
||||||
|
import { SkillModal } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/skills/components/skill-modal'
|
||||||
|
import type { SkillDefinition } from '@/hooks/queries/skills'
|
||||||
|
import { useSkills } from '@/hooks/queries/skills'
|
||||||
|
import { usePermissionConfig } from '@/hooks/use-permission-config'
|
||||||
|
|
||||||
|
interface StoredSkill {
|
||||||
|
skillId: string
|
||||||
|
name?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SkillInputProps {
|
||||||
|
blockId: string
|
||||||
|
subBlockId: string
|
||||||
|
isPreview?: boolean
|
||||||
|
previewValue?: unknown
|
||||||
|
disabled?: boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SkillInput({
|
||||||
|
blockId,
|
||||||
|
subBlockId,
|
||||||
|
isPreview,
|
||||||
|
previewValue,
|
||||||
|
disabled,
|
||||||
|
}: SkillInputProps) {
|
||||||
|
const params = useParams()
|
||||||
|
const workspaceId = params.workspaceId as string
|
||||||
|
|
||||||
|
const { config: permissionConfig } = usePermissionConfig()
|
||||||
|
const { data: workspaceSkills = [] } = useSkills(workspaceId)
|
||||||
|
const [value, setValue] = useSubBlockValue<StoredSkill[]>(blockId, subBlockId)
|
||||||
|
const [showCreateModal, setShowCreateModal] = useState(false)
|
||||||
|
const [editingSkill, setEditingSkill] = useState<SkillDefinition | null>(null)
|
||||||
|
const [open, setOpen] = useState(false)
|
||||||
|
|
||||||
|
const selectedSkills: StoredSkill[] = useMemo(() => {
|
||||||
|
if (isPreview && previewValue) {
|
||||||
|
return Array.isArray(previewValue) ? previewValue : []
|
||||||
|
}
|
||||||
|
return Array.isArray(value) ? value : []
|
||||||
|
}, [isPreview, previewValue, value])
|
||||||
|
|
||||||
|
const selectedIds = useMemo(() => new Set(selectedSkills.map((s) => s.skillId)), [selectedSkills])
|
||||||
|
|
||||||
|
const skillsDisabled = permissionConfig.disableSkills
|
||||||
|
|
||||||
|
const skillGroups = useMemo((): ComboboxOptionGroup[] => {
|
||||||
|
const groups: ComboboxOptionGroup[] = []
|
||||||
|
|
||||||
|
if (!skillsDisabled) {
|
||||||
|
groups.push({
|
||||||
|
items: [
|
||||||
|
{
|
||||||
|
label: 'Create Skill',
|
||||||
|
value: 'action-create-skill',
|
||||||
|
icon: Plus,
|
||||||
|
onSelect: () => {
|
||||||
|
setShowCreateModal(true)
|
||||||
|
setOpen(false)
|
||||||
|
},
|
||||||
|
disabled: isPreview,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const availableSkills = workspaceSkills.filter((s) => !selectedIds.has(s.id))
|
||||||
|
if (!skillsDisabled && availableSkills.length > 0) {
|
||||||
|
groups.push({
|
||||||
|
section: 'Skills',
|
||||||
|
items: availableSkills.map((s) => {
|
||||||
|
return {
|
||||||
|
label: s.name,
|
||||||
|
value: `skill-${s.id}`,
|
||||||
|
icon: AgentSkillsIcon,
|
||||||
|
onSelect: () => {
|
||||||
|
const newSkills: StoredSkill[] = [...selectedSkills, { skillId: s.id, name: s.name }]
|
||||||
|
setValue(newSkills)
|
||||||
|
setOpen(false)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return groups
|
||||||
|
}, [workspaceSkills, selectedIds, selectedSkills, setValue, isPreview, skillsDisabled])
|
||||||
|
|
||||||
|
const handleRemove = useCallback(
|
||||||
|
(skillId: string) => {
|
||||||
|
const newSkills = selectedSkills.filter((s) => s.skillId !== skillId)
|
||||||
|
setValue(newSkills)
|
||||||
|
},
|
||||||
|
[selectedSkills, setValue]
|
||||||
|
)
|
||||||
|
|
||||||
|
const handleSkillSaved = useCallback(() => {
|
||||||
|
setShowCreateModal(false)
|
||||||
|
setEditingSkill(null)
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
const resolveSkillName = useCallback(
|
||||||
|
(stored: StoredSkill): string => {
|
||||||
|
const found = workspaceSkills.find((s) => s.id === stored.skillId)
|
||||||
|
return found?.name ?? stored.name ?? stored.skillId
|
||||||
|
},
|
||||||
|
[workspaceSkills]
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<div className='w-full space-y-[8px]'>
|
||||||
|
<Combobox
|
||||||
|
options={[]}
|
||||||
|
groups={skillGroups}
|
||||||
|
placeholder='Add skill...'
|
||||||
|
disabled={disabled}
|
||||||
|
searchable
|
||||||
|
searchPlaceholder='Search skills...'
|
||||||
|
maxHeight={240}
|
||||||
|
emptyMessage='No skills found'
|
||||||
|
onOpenChange={setOpen}
|
||||||
|
/>
|
||||||
|
|
||||||
|
{selectedSkills.length > 0 &&
|
||||||
|
selectedSkills.map((stored) => {
|
||||||
|
const fullSkill = workspaceSkills.find((s) => s.id === stored.skillId)
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
key={stored.skillId}
|
||||||
|
className='group relative flex flex-col overflow-hidden rounded-[4px] border border-[var(--border-1)] transition-all duration-200 ease-in-out'
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
className='flex cursor-pointer items-center justify-between gap-[8px] rounded-t-[4px] bg-[var(--surface-4)] px-[8px] py-[6.5px]'
|
||||||
|
onClick={() => {
|
||||||
|
if (fullSkill && !disabled && !isPreview) {
|
||||||
|
setEditingSkill(fullSkill)
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div className='flex min-w-0 flex-1 items-center gap-[8px]'>
|
||||||
|
<div
|
||||||
|
className='flex h-[16px] w-[16px] flex-shrink-0 items-center justify-center rounded-[4px]'
|
||||||
|
style={{ backgroundColor: '#e0e0e0' }}
|
||||||
|
>
|
||||||
|
<AgentSkillsIcon className='h-[10px] w-[10px] text-[#333]' />
|
||||||
|
</div>
|
||||||
|
<span className='truncate font-medium text-[13px] text-[var(--text-primary)]'>
|
||||||
|
{resolveSkillName(stored)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<div className='flex flex-shrink-0 items-center gap-[8px]'>
|
||||||
|
{!disabled && !isPreview && (
|
||||||
|
<button
|
||||||
|
type='button'
|
||||||
|
onClick={(e) => {
|
||||||
|
e.stopPropagation()
|
||||||
|
handleRemove(stored.skillId)
|
||||||
|
}}
|
||||||
|
className='flex items-center justify-center text-[var(--text-tertiary)] transition-colors hover:text-[var(--text-primary)]'
|
||||||
|
aria-label='Remove skill'
|
||||||
|
>
|
||||||
|
<XIcon className='h-[13px] w-[13px]' />
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<SkillModal
|
||||||
|
open={showCreateModal || !!editingSkill}
|
||||||
|
onOpenChange={(isOpen) => {
|
||||||
|
if (!isOpen) {
|
||||||
|
setShowCreateModal(false)
|
||||||
|
setEditingSkill(null)
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
onSave={handleSkillSaved}
|
||||||
|
initialValues={editingSkill ?? undefined}
|
||||||
|
/>
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -32,6 +32,7 @@ import {
|
|||||||
ScheduleInfo,
|
ScheduleInfo,
|
||||||
SheetSelectorInput,
|
SheetSelectorInput,
|
||||||
ShortInput,
|
ShortInput,
|
||||||
|
SkillInput,
|
||||||
SlackSelectorInput,
|
SlackSelectorInput,
|
||||||
SliderInput,
|
SliderInput,
|
||||||
Switch,
|
Switch,
|
||||||
@@ -687,6 +688,17 @@ function SubBlockComponent({
|
|||||||
/>
|
/>
|
||||||
)
|
)
|
||||||
|
|
||||||
|
case 'skill-input':
|
||||||
|
return (
|
||||||
|
<SkillInput
|
||||||
|
blockId={blockId}
|
||||||
|
subBlockId={config.id}
|
||||||
|
isPreview={isPreview}
|
||||||
|
previewValue={previewValue}
|
||||||
|
disabled={isDisabled}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
|
||||||
case 'checkbox-list':
|
case 'checkbox-list':
|
||||||
return (
|
return (
|
||||||
<CheckboxList
|
<CheckboxList
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import {
|
|||||||
isSubBlockVisibleForMode,
|
isSubBlockVisibleForMode,
|
||||||
} from '@/lib/workflows/subblocks/visibility'
|
} from '@/lib/workflows/subblocks/visibility'
|
||||||
import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types'
|
import type { BlockConfig, SubBlockConfig, SubBlockType } from '@/blocks/types'
|
||||||
|
import { usePermissionConfig } from '@/hooks/use-permission-config'
|
||||||
import { useWorkflowDiffStore } from '@/stores/workflow-diff'
|
import { useWorkflowDiffStore } from '@/stores/workflow-diff'
|
||||||
import { mergeSubblockState } from '@/stores/workflows/utils'
|
import { mergeSubblockState } from '@/stores/workflows/utils'
|
||||||
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
||||||
@@ -35,6 +36,7 @@ export function useEditorSubblockLayout(
|
|||||||
const blockDataFromStore = useWorkflowStore(
|
const blockDataFromStore = useWorkflowStore(
|
||||||
useCallback((state) => state.blocks?.[blockId]?.data, [blockId])
|
useCallback((state) => state.blocks?.[blockId]?.data, [blockId])
|
||||||
)
|
)
|
||||||
|
const { config: permissionConfig } = usePermissionConfig()
|
||||||
|
|
||||||
return useMemo(() => {
|
return useMemo(() => {
|
||||||
// Guard against missing config or block selection
|
// Guard against missing config or block selection
|
||||||
@@ -100,6 +102,9 @@ export function useEditorSubblockLayout(
|
|||||||
const visibleSubBlocks = (config.subBlocks || []).filter((block) => {
|
const visibleSubBlocks = (config.subBlocks || []).filter((block) => {
|
||||||
if (block.hidden) return false
|
if (block.hidden) return false
|
||||||
|
|
||||||
|
// Hide skill-input subblock when skills are disabled via permissions
|
||||||
|
if (block.type === 'skill-input' && permissionConfig.disableSkills) return false
|
||||||
|
|
||||||
// Check required feature if specified - declarative feature gating
|
// Check required feature if specified - declarative feature gating
|
||||||
if (!isSubBlockFeatureEnabled(block)) return false
|
if (!isSubBlockFeatureEnabled(block)) return false
|
||||||
|
|
||||||
@@ -149,5 +154,6 @@ export function useEditorSubblockLayout(
|
|||||||
activeWorkflowId,
|
activeWorkflowId,
|
||||||
isSnapshotView,
|
isSnapshotView,
|
||||||
blockDataFromStore,
|
blockDataFromStore,
|
||||||
|
permissionConfig.disableSkills,
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ import { useCustomTools } from '@/hooks/queries/custom-tools'
|
|||||||
import { useMcpServers, useMcpToolsQuery } from '@/hooks/queries/mcp'
|
import { useMcpServers, useMcpToolsQuery } from '@/hooks/queries/mcp'
|
||||||
import { useCredentialName } from '@/hooks/queries/oauth-credentials'
|
import { useCredentialName } from '@/hooks/queries/oauth-credentials'
|
||||||
import { useReactivateSchedule, useScheduleInfo } from '@/hooks/queries/schedules'
|
import { useReactivateSchedule, useScheduleInfo } from '@/hooks/queries/schedules'
|
||||||
|
import { useSkills } from '@/hooks/queries/skills'
|
||||||
import { useDeployChildWorkflow } from '@/hooks/queries/workflows'
|
import { useDeployChildWorkflow } from '@/hooks/queries/workflows'
|
||||||
import { useSelectorDisplayName } from '@/hooks/use-selector-display-name'
|
import { useSelectorDisplayName } from '@/hooks/use-selector-display-name'
|
||||||
import { useVariablesStore } from '@/stores/panel'
|
import { useVariablesStore } from '@/stores/panel'
|
||||||
@@ -618,6 +619,48 @@ const SubBlockRow = memo(function SubBlockRow({
|
|||||||
return `${toolNames[0]}, ${toolNames[1]} +${toolNames.length - 2}`
|
return `${toolNames[0]}, ${toolNames[1]} +${toolNames.length - 2}`
|
||||||
}, [subBlock?.type, rawValue, customTools, workspaceId])
|
}, [subBlock?.type, rawValue, customTools, workspaceId])
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hydrates skill references to display names.
|
||||||
|
* Resolves skill IDs to their current names from the skills query.
|
||||||
|
*/
|
||||||
|
const { data: workspaceSkills = [] } = useSkills(workspaceId || '')
|
||||||
|
|
||||||
|
const skillsDisplayValue = useMemo(() => {
|
||||||
|
if (subBlock?.type !== 'skill-input' || !Array.isArray(rawValue) || rawValue.length === 0) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
interface StoredSkill {
|
||||||
|
skillId: string
|
||||||
|
name?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
const skillNames = rawValue
|
||||||
|
.map((skill: StoredSkill) => {
|
||||||
|
if (!skill || typeof skill !== 'object') return null
|
||||||
|
|
||||||
|
// Priority 1: Resolve skill name from the skills query (fresh data)
|
||||||
|
if (skill.skillId) {
|
||||||
|
const foundSkill = workspaceSkills.find((s) => s.id === skill.skillId)
|
||||||
|
if (foundSkill?.name) return foundSkill.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Fall back to stored name (for deleted skills)
|
||||||
|
if (skill.name && typeof skill.name === 'string') return skill.name
|
||||||
|
|
||||||
|
// Priority 3: Use skillId as last resort
|
||||||
|
if (skill.skillId) return skill.skillId
|
||||||
|
|
||||||
|
return null
|
||||||
|
})
|
||||||
|
.filter((name): name is string => !!name)
|
||||||
|
|
||||||
|
if (skillNames.length === 0) return null
|
||||||
|
if (skillNames.length === 1) return skillNames[0]
|
||||||
|
if (skillNames.length === 2) return `${skillNames[0]}, ${skillNames[1]}`
|
||||||
|
return `${skillNames[0]}, ${skillNames[1]} +${skillNames.length - 2}`
|
||||||
|
}, [subBlock?.type, rawValue, workspaceSkills])
|
||||||
|
|
||||||
const isPasswordField = subBlock?.password === true
|
const isPasswordField = subBlock?.password === true
|
||||||
const maskedValue = isPasswordField && value && value !== '-' ? '•••' : null
|
const maskedValue = isPasswordField && value && value !== '-' ? '•••' : null
|
||||||
|
|
||||||
@@ -627,6 +670,7 @@ const SubBlockRow = memo(function SubBlockRow({
|
|||||||
dropdownLabel ||
|
dropdownLabel ||
|
||||||
variablesDisplayValue ||
|
variablesDisplayValue ||
|
||||||
toolsDisplayValue ||
|
toolsDisplayValue ||
|
||||||
|
skillsDisplayValue ||
|
||||||
knowledgeBaseDisplayName ||
|
knowledgeBaseDisplayName ||
|
||||||
workflowSelectionName ||
|
workflowSelectionName ||
|
||||||
mcpServerDisplayName ||
|
mcpServerDisplayName ||
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ export { Files as FileUploads } from './files/files'
|
|||||||
export { General } from './general/general'
|
export { General } from './general/general'
|
||||||
export { Integrations } from './integrations/integrations'
|
export { Integrations } from './integrations/integrations'
|
||||||
export { MCP } from './mcp/mcp'
|
export { MCP } from './mcp/mcp'
|
||||||
|
export { Skills } from './skills/skills'
|
||||||
export { Subscription } from './subscription/subscription'
|
export { Subscription } from './subscription/subscription'
|
||||||
export { TeamManagement } from './team-management/team-management'
|
export { TeamManagement } from './team-management/team-management'
|
||||||
export { WorkflowMcpServers } from './workflow-mcp-servers/workflow-mcp-servers'
|
export { WorkflowMcpServers } from './workflow-mcp-servers/workflow-mcp-servers'
|
||||||
|
|||||||
@@ -0,0 +1,225 @@
|
|||||||
|
'use client'
|
||||||
|
|
||||||
|
import type { ChangeEvent } from 'react'
|
||||||
|
import { useEffect, useMemo, useState } from 'react'
|
||||||
|
import { useParams } from 'next/navigation'
|
||||||
|
import {
|
||||||
|
Button,
|
||||||
|
Input,
|
||||||
|
Label,
|
||||||
|
Modal,
|
||||||
|
ModalBody,
|
||||||
|
ModalContent,
|
||||||
|
ModalFooter,
|
||||||
|
ModalHeader,
|
||||||
|
Textarea,
|
||||||
|
} from '@/components/emcn'
|
||||||
|
import type { SkillDefinition } from '@/hooks/queries/skills'
|
||||||
|
import { useCreateSkill, useUpdateSkill } from '@/hooks/queries/skills'
|
||||||
|
|
||||||
|
interface SkillModalProps {
|
||||||
|
open: boolean
|
||||||
|
onOpenChange: (open: boolean) => void
|
||||||
|
onSave: () => void
|
||||||
|
onDelete?: (skillId: string) => void
|
||||||
|
initialValues?: SkillDefinition
|
||||||
|
}
|
||||||
|
|
||||||
|
const KEBAB_CASE_REGEX = /^[a-z0-9]+(-[a-z0-9]+)*$/
|
||||||
|
|
||||||
|
interface FieldErrors {
|
||||||
|
name?: string
|
||||||
|
description?: string
|
||||||
|
content?: string
|
||||||
|
general?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SkillModal({
|
||||||
|
open,
|
||||||
|
onOpenChange,
|
||||||
|
onSave,
|
||||||
|
onDelete,
|
||||||
|
initialValues,
|
||||||
|
}: SkillModalProps) {
|
||||||
|
const params = useParams()
|
||||||
|
const workspaceId = params.workspaceId as string
|
||||||
|
|
||||||
|
const createSkill = useCreateSkill()
|
||||||
|
const updateSkill = useUpdateSkill()
|
||||||
|
|
||||||
|
const [name, setName] = useState('')
|
||||||
|
const [description, setDescription] = useState('')
|
||||||
|
const [content, setContent] = useState('')
|
||||||
|
const [errors, setErrors] = useState<FieldErrors>({})
|
||||||
|
const [saving, setSaving] = useState(false)
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (open) {
|
||||||
|
if (initialValues) {
|
||||||
|
setName(initialValues.name)
|
||||||
|
setDescription(initialValues.description)
|
||||||
|
setContent(initialValues.content)
|
||||||
|
} else {
|
||||||
|
setName('')
|
||||||
|
setDescription('')
|
||||||
|
setContent('')
|
||||||
|
}
|
||||||
|
setErrors({})
|
||||||
|
}
|
||||||
|
}, [open, initialValues])
|
||||||
|
|
||||||
|
const hasChanges = useMemo(() => {
|
||||||
|
if (!initialValues) return true
|
||||||
|
return (
|
||||||
|
name !== initialValues.name ||
|
||||||
|
description !== initialValues.description ||
|
||||||
|
content !== initialValues.content
|
||||||
|
)
|
||||||
|
}, [name, description, content, initialValues])
|
||||||
|
|
||||||
|
const handleSave = async () => {
|
||||||
|
const newErrors: FieldErrors = {}
|
||||||
|
|
||||||
|
if (!name.trim()) {
|
||||||
|
newErrors.name = 'Name is required'
|
||||||
|
} else if (name.length > 64) {
|
||||||
|
newErrors.name = 'Name must be 64 characters or less'
|
||||||
|
} else if (!KEBAB_CASE_REGEX.test(name)) {
|
||||||
|
newErrors.name = 'Name must be kebab-case (e.g. my-skill)'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!description.trim()) {
|
||||||
|
newErrors.description = 'Description is required'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!content.trim()) {
|
||||||
|
newErrors.content = 'Content is required'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Object.keys(newErrors).length > 0) {
|
||||||
|
setErrors(newErrors)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
setSaving(true)
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (initialValues) {
|
||||||
|
await updateSkill.mutateAsync({
|
||||||
|
workspaceId,
|
||||||
|
skillId: initialValues.id,
|
||||||
|
updates: { name, description, content },
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
await createSkill.mutateAsync({
|
||||||
|
workspaceId,
|
||||||
|
skill: { name, description, content },
|
||||||
|
})
|
||||||
|
}
|
||||||
|
onSave()
|
||||||
|
} catch (error) {
|
||||||
|
const message =
|
||||||
|
error instanceof Error && error.message.includes('already exists')
|
||||||
|
? error.message
|
||||||
|
: 'Failed to save skill. Please try again.'
|
||||||
|
setErrors({ general: message })
|
||||||
|
} finally {
|
||||||
|
setSaving(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Modal open={open} onOpenChange={onOpenChange}>
|
||||||
|
<ModalContent size='xl'>
|
||||||
|
<ModalHeader>{initialValues ? 'Edit Skill' : 'Create Skill'}</ModalHeader>
|
||||||
|
<ModalBody>
|
||||||
|
<div className='flex flex-col gap-[16px]'>
|
||||||
|
<div className='flex flex-col gap-[4px]'>
|
||||||
|
<Label htmlFor='skill-name' className='font-medium text-[13px]'>
|
||||||
|
Name
|
||||||
|
</Label>
|
||||||
|
<Input
|
||||||
|
id='skill-name'
|
||||||
|
placeholder='my-skill-name'
|
||||||
|
value={name}
|
||||||
|
onChange={(e) => {
|
||||||
|
setName(e.target.value)
|
||||||
|
if (errors.name || errors.general)
|
||||||
|
setErrors((prev) => ({ ...prev, name: undefined, general: undefined }))
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
{errors.name ? (
|
||||||
|
<p className='text-[12px] text-[var(--text-error)]'>{errors.name}</p>
|
||||||
|
) : (
|
||||||
|
<span className='text-[11px] text-[var(--text-muted)]'>
|
||||||
|
Lowercase letters, numbers, and hyphens (e.g. my-skill)
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className='flex flex-col gap-[4px]'>
|
||||||
|
<Label htmlFor='skill-description' className='font-medium text-[13px]'>
|
||||||
|
Description
|
||||||
|
</Label>
|
||||||
|
<Input
|
||||||
|
id='skill-description'
|
||||||
|
placeholder='What this skill does and when to use it...'
|
||||||
|
value={description}
|
||||||
|
onChange={(e) => {
|
||||||
|
setDescription(e.target.value)
|
||||||
|
if (errors.description || errors.general)
|
||||||
|
setErrors((prev) => ({ ...prev, description: undefined, general: undefined }))
|
||||||
|
}}
|
||||||
|
maxLength={1024}
|
||||||
|
/>
|
||||||
|
{errors.description && (
|
||||||
|
<p className='text-[12px] text-[var(--text-error)]'>{errors.description}</p>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className='flex flex-col gap-[4px]'>
|
||||||
|
<Label htmlFor='skill-content' className='font-medium text-[13px]'>
|
||||||
|
Content
|
||||||
|
</Label>
|
||||||
|
<Textarea
|
||||||
|
id='skill-content'
|
||||||
|
placeholder='Skill instructions in markdown...'
|
||||||
|
value={content}
|
||||||
|
onChange={(e: ChangeEvent<HTMLTextAreaElement>) => {
|
||||||
|
setContent(e.target.value)
|
||||||
|
if (errors.content || errors.general)
|
||||||
|
setErrors((prev) => ({ ...prev, content: undefined, general: undefined }))
|
||||||
|
}}
|
||||||
|
className='min-h-[200px] resize-y font-mono text-[13px]'
|
||||||
|
/>
|
||||||
|
{errors.content && (
|
||||||
|
<p className='text-[12px] text-[var(--text-error)]'>{errors.content}</p>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{errors.general && (
|
||||||
|
<p className='text-[12px] text-[var(--text-error)]'>{errors.general}</p>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</ModalBody>
|
||||||
|
<ModalFooter className='items-center justify-between'>
|
||||||
|
{initialValues && onDelete ? (
|
||||||
|
<Button variant='destructive' onClick={() => onDelete(initialValues.id)}>
|
||||||
|
Delete
|
||||||
|
</Button>
|
||||||
|
) : (
|
||||||
|
<div />
|
||||||
|
)}
|
||||||
|
<div className='flex gap-2'>
|
||||||
|
<Button variant='default' onClick={() => onOpenChange(false)}>
|
||||||
|
Cancel
|
||||||
|
</Button>
|
||||||
|
<Button variant='tertiary' onClick={handleSave} disabled={saving || !hasChanges}>
|
||||||
|
{saving ? 'Saving...' : initialValues ? 'Update' : 'Create'}
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</ModalFooter>
|
||||||
|
</ModalContent>
|
||||||
|
</Modal>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,219 @@
|
|||||||
|
'use client'
|
||||||
|
|
||||||
|
import { useState } from 'react'
|
||||||
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { Plus, Search } from 'lucide-react'
|
||||||
|
import { useParams } from 'next/navigation'
|
||||||
|
import {
|
||||||
|
Button,
|
||||||
|
Input,
|
||||||
|
Modal,
|
||||||
|
ModalBody,
|
||||||
|
ModalContent,
|
||||||
|
ModalFooter,
|
||||||
|
ModalHeader,
|
||||||
|
} from '@/components/emcn'
|
||||||
|
import { Skeleton } from '@/components/ui'
|
||||||
|
import { cn } from '@/lib/core/utils/cn'
|
||||||
|
import { SkillModal } from '@/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/skills/components/skill-modal'
|
||||||
|
import type { SkillDefinition } from '@/hooks/queries/skills'
|
||||||
|
import { useDeleteSkill, useSkills } from '@/hooks/queries/skills'
|
||||||
|
|
||||||
|
const logger = createLogger('SkillsSettings')
|
||||||
|
|
||||||
|
function SkillSkeleton() {
|
||||||
|
return (
|
||||||
|
<div className='flex items-center justify-between gap-[12px]'>
|
||||||
|
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
|
||||||
|
<Skeleton className='h-[14px] w-[100px]' />
|
||||||
|
<Skeleton className='h-[13px] w-[200px]' />
|
||||||
|
</div>
|
||||||
|
<div className='flex flex-shrink-0 items-center gap-[8px]'>
|
||||||
|
<Skeleton className='h-[30px] w-[40px] rounded-[4px]' />
|
||||||
|
<Skeleton className='h-[30px] w-[54px] rounded-[4px]' />
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function Skills() {
|
||||||
|
const params = useParams()
|
||||||
|
const workspaceId = params.workspaceId as string
|
||||||
|
|
||||||
|
const { data: skills = [], isLoading, error, refetch: refetchSkills } = useSkills(workspaceId)
|
||||||
|
const deleteSkillMutation = useDeleteSkill()
|
||||||
|
|
||||||
|
const [searchTerm, setSearchTerm] = useState('')
|
||||||
|
const [deletingSkills, setDeletingSkills] = useState<Set<string>>(new Set())
|
||||||
|
const [editingSkill, setEditingSkill] = useState<SkillDefinition | null>(null)
|
||||||
|
const [showAddForm, setShowAddForm] = useState(false)
|
||||||
|
const [skillToDelete, setSkillToDelete] = useState<{ id: string; name: string } | null>(null)
|
||||||
|
const [showDeleteDialog, setShowDeleteDialog] = useState(false)
|
||||||
|
|
||||||
|
const filteredSkills = skills.filter((s) => {
|
||||||
|
if (!searchTerm.trim()) return true
|
||||||
|
const searchLower = searchTerm.toLowerCase()
|
||||||
|
return (
|
||||||
|
s.name.toLowerCase().includes(searchLower) ||
|
||||||
|
s.description.toLowerCase().includes(searchLower)
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
const handleDeleteClick = (skillId: string) => {
|
||||||
|
const s = skills.find((sk) => sk.id === skillId)
|
||||||
|
if (!s) return
|
||||||
|
|
||||||
|
setSkillToDelete({ id: skillId, name: s.name })
|
||||||
|
setShowDeleteDialog(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleDeleteSkill = async () => {
|
||||||
|
if (!skillToDelete) return
|
||||||
|
|
||||||
|
setDeletingSkills((prev) => new Set(prev).add(skillToDelete.id))
|
||||||
|
setShowDeleteDialog(false)
|
||||||
|
|
||||||
|
try {
|
||||||
|
await deleteSkillMutation.mutateAsync({
|
||||||
|
workspaceId,
|
||||||
|
skillId: skillToDelete.id,
|
||||||
|
})
|
||||||
|
logger.info(`Deleted skill: ${skillToDelete.id}`)
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error deleting skill:', error)
|
||||||
|
} finally {
|
||||||
|
setDeletingSkills((prev) => {
|
||||||
|
const next = new Set(prev)
|
||||||
|
next.delete(skillToDelete.id)
|
||||||
|
return next
|
||||||
|
})
|
||||||
|
setSkillToDelete(null)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleSkillSaved = () => {
|
||||||
|
setShowAddForm(false)
|
||||||
|
setEditingSkill(null)
|
||||||
|
refetchSkills()
|
||||||
|
}
|
||||||
|
|
||||||
|
const hasSkills = skills && skills.length > 0
|
||||||
|
const showEmptyState = !hasSkills && !showAddForm && !editingSkill
|
||||||
|
const showNoResults = searchTerm.trim() && filteredSkills.length === 0 && skills.length > 0
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<div className='flex h-full flex-col gap-[16px]'>
|
||||||
|
<div className='flex items-center gap-[8px]'>
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
'flex flex-1 items-center gap-[8px] rounded-[8px] border border-[var(--border)] bg-transparent px-[8px] py-[5px] transition-colors duration-100 dark:bg-[var(--surface-4)] dark:hover:border-[var(--border-1)] dark:hover:bg-[var(--surface-5)]',
|
||||||
|
isLoading && 'opacity-50'
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<Search
|
||||||
|
className='h-[14px] w-[14px] flex-shrink-0 text-[var(--text-tertiary)]'
|
||||||
|
strokeWidth={2}
|
||||||
|
/>
|
||||||
|
<Input
|
||||||
|
placeholder='Search skills...'
|
||||||
|
value={searchTerm}
|
||||||
|
onChange={(e) => setSearchTerm(e.target.value)}
|
||||||
|
disabled={isLoading}
|
||||||
|
className='h-auto flex-1 border-0 bg-transparent p-0 font-base leading-none placeholder:text-[var(--text-tertiary)] focus-visible:ring-0 focus-visible:ring-offset-0 disabled:cursor-not-allowed disabled:opacity-100'
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<Button onClick={() => setShowAddForm(true)} disabled={isLoading} variant='tertiary'>
|
||||||
|
<Plus className='mr-[6px] h-[13px] w-[13px]' />
|
||||||
|
Add
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className='min-h-0 flex-1 overflow-y-auto'>
|
||||||
|
{error ? (
|
||||||
|
<div className='flex h-full flex-col items-center justify-center gap-[8px]'>
|
||||||
|
<p className='text-[#DC2626] text-[11px] leading-tight dark:text-[#F87171]'>
|
||||||
|
{error instanceof Error ? error.message : 'Failed to load skills'}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
) : isLoading ? (
|
||||||
|
<div className='flex flex-col gap-[8px]'>
|
||||||
|
<SkillSkeleton />
|
||||||
|
<SkillSkeleton />
|
||||||
|
<SkillSkeleton />
|
||||||
|
</div>
|
||||||
|
) : showEmptyState ? (
|
||||||
|
<div className='flex h-full items-center justify-center text-[13px] text-[var(--text-muted)]'>
|
||||||
|
Click "Add" above to get started
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className='flex flex-col gap-[8px]'>
|
||||||
|
{filteredSkills.map((s) => (
|
||||||
|
<div key={s.id} className='flex items-center justify-between gap-[12px]'>
|
||||||
|
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
|
||||||
|
<span className='truncate font-medium text-[14px]'>{s.name}</span>
|
||||||
|
<p className='truncate text-[13px] text-[var(--text-muted)]'>{s.description}</p>
|
||||||
|
</div>
|
||||||
|
<div className='flex flex-shrink-0 items-center gap-[8px]'>
|
||||||
|
<Button variant='default' onClick={() => setEditingSkill(s)}>
|
||||||
|
Edit
|
||||||
|
</Button>
|
||||||
|
<Button
|
||||||
|
variant='ghost'
|
||||||
|
onClick={() => handleDeleteClick(s.id)}
|
||||||
|
disabled={deletingSkills.has(s.id)}
|
||||||
|
>
|
||||||
|
{deletingSkills.has(s.id) ? 'Deleting...' : 'Delete'}
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
{showNoResults && (
|
||||||
|
<div className='py-[16px] text-center text-[13px] text-[var(--text-muted)]'>
|
||||||
|
No skills found matching "{searchTerm}"
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<SkillModal
|
||||||
|
open={showAddForm || !!editingSkill}
|
||||||
|
onOpenChange={(open) => {
|
||||||
|
if (!open) {
|
||||||
|
setShowAddForm(false)
|
||||||
|
setEditingSkill(null)
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
onSave={handleSkillSaved}
|
||||||
|
onDelete={(skillId) => {
|
||||||
|
setEditingSkill(null)
|
||||||
|
handleDeleteClick(skillId)
|
||||||
|
}}
|
||||||
|
initialValues={editingSkill ?? undefined}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<Modal open={showDeleteDialog} onOpenChange={setShowDeleteDialog}>
|
||||||
|
<ModalContent size='sm'>
|
||||||
|
<ModalHeader>Delete Skill</ModalHeader>
|
||||||
|
<ModalBody>
|
||||||
|
<p className='text-[12px] text-[var(--text-secondary)]'>
|
||||||
|
Are you sure you want to delete{' '}
|
||||||
|
<span className='font-medium text-[var(--text-primary)]'>{skillToDelete?.name}</span>?{' '}
|
||||||
|
<span className='text-[var(--text-error)]'>This action cannot be undone.</span>
|
||||||
|
</p>
|
||||||
|
</ModalBody>
|
||||||
|
<ModalFooter>
|
||||||
|
<Button variant='default' onClick={() => setShowDeleteDialog(false)}>
|
||||||
|
Cancel
|
||||||
|
</Button>
|
||||||
|
<Button variant='destructive' onClick={handleDeleteSkill}>
|
||||||
|
Delete
|
||||||
|
</Button>
|
||||||
|
</ModalFooter>
|
||||||
|
</ModalContent>
|
||||||
|
</Modal>
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -34,7 +34,7 @@ import {
|
|||||||
SModalSidebarSection,
|
SModalSidebarSection,
|
||||||
SModalSidebarSectionTitle,
|
SModalSidebarSectionTitle,
|
||||||
} from '@/components/emcn'
|
} from '@/components/emcn'
|
||||||
import { McpIcon } from '@/components/icons'
|
import { AgentSkillsIcon, McpIcon } from '@/components/icons'
|
||||||
import { useSession } from '@/lib/auth/auth-client'
|
import { useSession } from '@/lib/auth/auth-client'
|
||||||
import { getSubscriptionStatus } from '@/lib/billing/client'
|
import { getSubscriptionStatus } from '@/lib/billing/client'
|
||||||
import { getEnv, isTruthy } from '@/lib/core/config/env'
|
import { getEnv, isTruthy } from '@/lib/core/config/env'
|
||||||
@@ -52,6 +52,7 @@ import {
|
|||||||
General,
|
General,
|
||||||
Integrations,
|
Integrations,
|
||||||
MCP,
|
MCP,
|
||||||
|
Skills,
|
||||||
Subscription,
|
Subscription,
|
||||||
TeamManagement,
|
TeamManagement,
|
||||||
WorkflowMcpServers,
|
WorkflowMcpServers,
|
||||||
@@ -93,6 +94,7 @@ type SettingsSection =
|
|||||||
| 'copilot'
|
| 'copilot'
|
||||||
| 'mcp'
|
| 'mcp'
|
||||||
| 'custom-tools'
|
| 'custom-tools'
|
||||||
|
| 'skills'
|
||||||
| 'workflow-mcp-servers'
|
| 'workflow-mcp-servers'
|
||||||
| 'debug'
|
| 'debug'
|
||||||
|
|
||||||
@@ -156,6 +158,7 @@ const allNavigationItems: NavigationItem[] = [
|
|||||||
},
|
},
|
||||||
{ id: 'integrations', label: 'Integrations', icon: Connections, section: 'tools' },
|
{ id: 'integrations', label: 'Integrations', icon: Connections, section: 'tools' },
|
||||||
{ id: 'custom-tools', label: 'Custom Tools', icon: Wrench, section: 'tools' },
|
{ id: 'custom-tools', label: 'Custom Tools', icon: Wrench, section: 'tools' },
|
||||||
|
{ id: 'skills', label: 'Skills', icon: AgentSkillsIcon, section: 'tools' },
|
||||||
{ id: 'mcp', label: 'MCP Tools', icon: McpIcon, section: 'tools' },
|
{ id: 'mcp', label: 'MCP Tools', icon: McpIcon, section: 'tools' },
|
||||||
{ id: 'environment', label: 'Environment', icon: FolderCode, section: 'system' },
|
{ id: 'environment', label: 'Environment', icon: FolderCode, section: 'system' },
|
||||||
{ id: 'apikeys', label: 'API Keys', icon: Key, section: 'system' },
|
{ id: 'apikeys', label: 'API Keys', icon: Key, section: 'system' },
|
||||||
@@ -265,6 +268,9 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
|
|||||||
if (item.id === 'custom-tools' && permissionConfig.disableCustomTools) {
|
if (item.id === 'custom-tools' && permissionConfig.disableCustomTools) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if (item.id === 'skills' && permissionConfig.disableSkills) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Self-hosted override allows showing the item when not on hosted
|
// Self-hosted override allows showing the item when not on hosted
|
||||||
if (item.selfHostedOverride && !isHosted) {
|
if (item.selfHostedOverride && !isHosted) {
|
||||||
@@ -556,6 +562,7 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
|
|||||||
{effectiveActiveSection === 'copilot' && <Copilot />}
|
{effectiveActiveSection === 'copilot' && <Copilot />}
|
||||||
{effectiveActiveSection === 'mcp' && <MCP initialServerId={pendingMcpServerId} />}
|
{effectiveActiveSection === 'mcp' && <MCP initialServerId={pendingMcpServerId} />}
|
||||||
{effectiveActiveSection === 'custom-tools' && <CustomTools />}
|
{effectiveActiveSection === 'custom-tools' && <CustomTools />}
|
||||||
|
{effectiveActiveSection === 'skills' && <Skills />}
|
||||||
{effectiveActiveSection === 'workflow-mcp-servers' && <WorkflowMcpServers />}
|
{effectiveActiveSection === 'workflow-mcp-servers' && <WorkflowMcpServers />}
|
||||||
{effectiveActiveSection === 'debug' && <Debug />}
|
{effectiveActiveSection === 'debug' && <Debug />}
|
||||||
</SModalMainBody>
|
</SModalMainBody>
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import { describe, expect, it, vi } from 'vitest'
|
import { describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
// Use the real registry module, not the global mock from vitest.setup.ts
|
|
||||||
vi.unmock('@/blocks/registry')
|
vi.unmock('@/blocks/registry')
|
||||||
|
|
||||||
import { generateRouterPrompt } from '@/blocks/blocks/router'
|
import { generateRouterPrompt } from '@/blocks/blocks/router'
|
||||||
@@ -15,7 +14,7 @@ import {
|
|||||||
} from '@/blocks/registry'
|
} from '@/blocks/registry'
|
||||||
import { AuthMode } from '@/blocks/types'
|
import { AuthMode } from '@/blocks/types'
|
||||||
|
|
||||||
describe('Blocks Module', () => {
|
describe.concurrent('Blocks Module', () => {
|
||||||
describe('Registry', () => {
|
describe('Registry', () => {
|
||||||
it('should have a non-empty registry of blocks', () => {
|
it('should have a non-empty registry of blocks', () => {
|
||||||
expect(Object.keys(registry).length).toBeGreaterThan(0)
|
expect(Object.keys(registry).length).toBeGreaterThan(0)
|
||||||
@@ -409,6 +408,7 @@ describe('Blocks Module', () => {
|
|||||||
'workflow-input-mapper',
|
'workflow-input-mapper',
|
||||||
'text',
|
'text',
|
||||||
'router-input',
|
'router-input',
|
||||||
|
'skill-input',
|
||||||
]
|
]
|
||||||
|
|
||||||
const blocks = getAllBlocks()
|
const blocks = getAllBlocks()
|
||||||
|
|||||||
@@ -164,6 +164,7 @@ Return ONLY the JSON array.`,
|
|||||||
type: 'dropdown',
|
type: 'dropdown',
|
||||||
placeholder: 'Select reasoning effort...',
|
placeholder: 'Select reasoning effort...',
|
||||||
options: [
|
options: [
|
||||||
|
{ label: 'auto', id: 'auto' },
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
@@ -173,9 +174,12 @@ Return ONLY the JSON array.`,
|
|||||||
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
||||||
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
|
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
|
||||||
|
|
||||||
|
const autoOption = { label: 'auto', id: 'auto' }
|
||||||
|
|
||||||
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
|
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
|
||||||
if (!activeWorkflowId) {
|
if (!activeWorkflowId) {
|
||||||
return [
|
return [
|
||||||
|
autoOption,
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
@@ -188,6 +192,7 @@ Return ONLY the JSON array.`,
|
|||||||
|
|
||||||
if (!modelValue) {
|
if (!modelValue) {
|
||||||
return [
|
return [
|
||||||
|
autoOption,
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
@@ -197,15 +202,15 @@ Return ONLY the JSON array.`,
|
|||||||
const validOptions = getReasoningEffortValuesForModel(modelValue)
|
const validOptions = getReasoningEffortValuesForModel(modelValue)
|
||||||
if (!validOptions) {
|
if (!validOptions) {
|
||||||
return [
|
return [
|
||||||
|
autoOption,
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
return validOptions.map((opt) => ({ label: opt, id: opt }))
|
return [autoOption, ...validOptions.map((opt) => ({ label: opt, id: opt }))]
|
||||||
},
|
},
|
||||||
value: () => 'medium',
|
|
||||||
condition: {
|
condition: {
|
||||||
field: 'model',
|
field: 'model',
|
||||||
value: MODELS_WITH_REASONING_EFFORT,
|
value: MODELS_WITH_REASONING_EFFORT,
|
||||||
@@ -217,6 +222,7 @@ Return ONLY the JSON array.`,
|
|||||||
type: 'dropdown',
|
type: 'dropdown',
|
||||||
placeholder: 'Select verbosity...',
|
placeholder: 'Select verbosity...',
|
||||||
options: [
|
options: [
|
||||||
|
{ label: 'auto', id: 'auto' },
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
@@ -226,9 +232,12 @@ Return ONLY the JSON array.`,
|
|||||||
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
||||||
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
|
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
|
||||||
|
|
||||||
|
const autoOption = { label: 'auto', id: 'auto' }
|
||||||
|
|
||||||
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
|
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
|
||||||
if (!activeWorkflowId) {
|
if (!activeWorkflowId) {
|
||||||
return [
|
return [
|
||||||
|
autoOption,
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
@@ -241,6 +250,7 @@ Return ONLY the JSON array.`,
|
|||||||
|
|
||||||
if (!modelValue) {
|
if (!modelValue) {
|
||||||
return [
|
return [
|
||||||
|
autoOption,
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
@@ -250,15 +260,15 @@ Return ONLY the JSON array.`,
|
|||||||
const validOptions = getVerbosityValuesForModel(modelValue)
|
const validOptions = getVerbosityValuesForModel(modelValue)
|
||||||
if (!validOptions) {
|
if (!validOptions) {
|
||||||
return [
|
return [
|
||||||
|
autoOption,
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
{ label: 'high', id: 'high' },
|
{ label: 'high', id: 'high' },
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
return validOptions.map((opt) => ({ label: opt, id: opt }))
|
return [autoOption, ...validOptions.map((opt) => ({ label: opt, id: opt }))]
|
||||||
},
|
},
|
||||||
value: () => 'medium',
|
|
||||||
condition: {
|
condition: {
|
||||||
field: 'model',
|
field: 'model',
|
||||||
value: MODELS_WITH_VERBOSITY,
|
value: MODELS_WITH_VERBOSITY,
|
||||||
@@ -270,6 +280,7 @@ Return ONLY the JSON array.`,
|
|||||||
type: 'dropdown',
|
type: 'dropdown',
|
||||||
placeholder: 'Select thinking level...',
|
placeholder: 'Select thinking level...',
|
||||||
options: [
|
options: [
|
||||||
|
{ label: 'none', id: 'none' },
|
||||||
{ label: 'minimal', id: 'minimal' },
|
{ label: 'minimal', id: 'minimal' },
|
||||||
{ label: 'low', id: 'low' },
|
{ label: 'low', id: 'low' },
|
||||||
{ label: 'medium', id: 'medium' },
|
{ label: 'medium', id: 'medium' },
|
||||||
@@ -281,12 +292,11 @@ Return ONLY the JSON array.`,
|
|||||||
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
|
||||||
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
|
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
|
||||||
|
|
||||||
|
const noneOption = { label: 'none', id: 'none' }
|
||||||
|
|
||||||
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
|
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
|
||||||
if (!activeWorkflowId) {
|
if (!activeWorkflowId) {
|
||||||
return [
|
return [noneOption, { label: 'low', id: 'low' }, { label: 'high', id: 'high' }]
|
||||||
{ label: 'low', id: 'low' },
|
|
||||||
{ label: 'high', id: 'high' },
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId]
|
const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId]
|
||||||
@@ -294,23 +304,16 @@ Return ONLY the JSON array.`,
|
|||||||
const modelValue = blockValues?.model as string
|
const modelValue = blockValues?.model as string
|
||||||
|
|
||||||
if (!modelValue) {
|
if (!modelValue) {
|
||||||
return [
|
return [noneOption, { label: 'low', id: 'low' }, { label: 'high', id: 'high' }]
|
||||||
{ label: 'low', id: 'low' },
|
|
||||||
{ label: 'high', id: 'high' },
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const validOptions = getThinkingLevelsForModel(modelValue)
|
const validOptions = getThinkingLevelsForModel(modelValue)
|
||||||
if (!validOptions) {
|
if (!validOptions) {
|
||||||
return [
|
return [noneOption, { label: 'low', id: 'low' }, { label: 'high', id: 'high' }]
|
||||||
{ label: 'low', id: 'low' },
|
|
||||||
{ label: 'high', id: 'high' },
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return validOptions.map((opt) => ({ label: opt, id: opt }))
|
return [noneOption, ...validOptions.map((opt) => ({ label: opt, id: opt }))]
|
||||||
},
|
},
|
||||||
value: () => 'high',
|
|
||||||
condition: {
|
condition: {
|
||||||
field: 'model',
|
field: 'model',
|
||||||
value: MODELS_WITH_THINKING,
|
value: MODELS_WITH_THINKING,
|
||||||
@@ -333,11 +336,11 @@ Return ONLY the JSON array.`,
|
|||||||
id: 'azureApiVersion',
|
id: 'azureApiVersion',
|
||||||
title: 'Azure API Version',
|
title: 'Azure API Version',
|
||||||
type: 'short-input',
|
type: 'short-input',
|
||||||
placeholder: '2024-07-01-preview',
|
placeholder: 'Enter API version',
|
||||||
connectionDroppable: false,
|
connectionDroppable: false,
|
||||||
condition: {
|
condition: {
|
||||||
field: 'model',
|
field: 'model',
|
||||||
value: providers['azure-openai'].models,
|
value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -407,6 +410,12 @@ Return ONLY the JSON array.`,
|
|||||||
type: 'tool-input',
|
type: 'tool-input',
|
||||||
defaultValue: [],
|
defaultValue: [],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: 'skills',
|
||||||
|
title: 'Skills',
|
||||||
|
type: 'skill-input',
|
||||||
|
defaultValue: [],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: 'apiKey',
|
id: 'apiKey',
|
||||||
title: 'API Key',
|
title: 'API Key',
|
||||||
@@ -519,6 +528,7 @@ Return ONLY the JSON array.`,
|
|||||||
title: 'Max Output Tokens',
|
title: 'Max Output Tokens',
|
||||||
type: 'short-input',
|
type: 'short-input',
|
||||||
placeholder: 'Enter max tokens (e.g., 4096)...',
|
placeholder: 'Enter max tokens (e.g., 4096)...',
|
||||||
|
mode: 'advanced',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'responseFormat',
|
id: 'responseFormat',
|
||||||
@@ -709,7 +719,7 @@ Example 3 (Array Input):
|
|||||||
},
|
},
|
||||||
model: { type: 'string', description: 'AI model to use' },
|
model: { type: 'string', description: 'AI model to use' },
|
||||||
apiKey: { type: 'string', description: 'Provider API key' },
|
apiKey: { type: 'string', description: 'Provider API key' },
|
||||||
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
|
azureEndpoint: { type: 'string', description: 'Azure endpoint URL' },
|
||||||
azureApiVersion: { type: 'string', description: 'Azure API version' },
|
azureApiVersion: { type: 'string', description: 'Azure API version' },
|
||||||
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
|
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
|
||||||
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
|
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
|
||||||
@@ -769,6 +779,7 @@ Example 3 (Array Input):
|
|||||||
description: 'Thinking level for models with extended thinking (Anthropic Claude, Gemini 3)',
|
description: 'Thinking level for models with extended thinking (Anthropic Claude, Gemini 3)',
|
||||||
},
|
},
|
||||||
tools: { type: 'json', description: 'Available tools configuration' },
|
tools: { type: 'json', description: 'Available tools configuration' },
|
||||||
|
skills: { type: 'json', description: 'Selected skills configuration' },
|
||||||
},
|
},
|
||||||
outputs: {
|
outputs: {
|
||||||
content: { type: 'string', description: 'Generated response content' },
|
content: { type: 'string', description: 'Generated response content' },
|
||||||
|
|||||||
102
apps/sim/blocks/blocks/airweave.ts
Normal file
102
apps/sim/blocks/blocks/airweave.ts
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
import { AirweaveIcon } from '@/components/icons'
|
||||||
|
import type { BlockConfig } from '@/blocks/types'
|
||||||
|
import { AuthMode } from '@/blocks/types'
|
||||||
|
import type { AirweaveSearchResponse } from '@/tools/airweave/types'
|
||||||
|
|
||||||
|
export const AirweaveBlock: BlockConfig<AirweaveSearchResponse> = {
|
||||||
|
type: 'airweave',
|
||||||
|
name: 'Airweave',
|
||||||
|
description: 'Search your synced data collections',
|
||||||
|
authMode: AuthMode.ApiKey,
|
||||||
|
longDescription:
|
||||||
|
'Search across your synced data sources using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.',
|
||||||
|
docsLink: 'https://docs.airweave.ai',
|
||||||
|
category: 'tools',
|
||||||
|
bgColor: '#6366F1',
|
||||||
|
icon: AirweaveIcon,
|
||||||
|
subBlocks: [
|
||||||
|
{
|
||||||
|
id: 'collectionId',
|
||||||
|
title: 'Collection ID',
|
||||||
|
type: 'short-input',
|
||||||
|
placeholder: 'Enter your collection readable ID...',
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'query',
|
||||||
|
title: 'Search Query',
|
||||||
|
type: 'long-input',
|
||||||
|
placeholder: 'Enter your search query...',
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'limit',
|
||||||
|
title: 'Max Results',
|
||||||
|
type: 'dropdown',
|
||||||
|
options: [
|
||||||
|
{ label: '10', id: '10' },
|
||||||
|
{ label: '25', id: '25' },
|
||||||
|
{ label: '50', id: '50' },
|
||||||
|
{ label: '100', id: '100' },
|
||||||
|
],
|
||||||
|
value: () => '25',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'retrievalStrategy',
|
||||||
|
title: 'Retrieval Strategy',
|
||||||
|
type: 'dropdown',
|
||||||
|
options: [
|
||||||
|
{ label: 'Hybrid (Default)', id: 'hybrid' },
|
||||||
|
{ label: 'Neural', id: 'neural' },
|
||||||
|
{ label: 'Keyword', id: 'keyword' },
|
||||||
|
],
|
||||||
|
value: () => 'hybrid',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'expandQuery',
|
||||||
|
title: 'Expand Query',
|
||||||
|
type: 'switch',
|
||||||
|
description: 'Generate query variations to improve recall',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'rerank',
|
||||||
|
title: 'Rerank Results',
|
||||||
|
type: 'switch',
|
||||||
|
description: 'Reorder results for improved relevance using LLM',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'generateAnswer',
|
||||||
|
title: 'Generate Answer',
|
||||||
|
type: 'switch',
|
||||||
|
description: 'Generate a natural-language answer from results',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'apiKey',
|
||||||
|
title: 'API Key',
|
||||||
|
type: 'short-input',
|
||||||
|
placeholder: 'Enter your Airweave API key',
|
||||||
|
password: true,
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
tools: {
|
||||||
|
access: ['airweave_search'],
|
||||||
|
},
|
||||||
|
inputs: {
|
||||||
|
collectionId: { type: 'string', description: 'Airweave collection readable ID' },
|
||||||
|
query: { type: 'string', description: 'Search query text' },
|
||||||
|
apiKey: { type: 'string', description: 'Airweave API key' },
|
||||||
|
limit: { type: 'number', description: 'Maximum number of results' },
|
||||||
|
retrievalStrategy: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Retrieval strategy (hybrid/neural/keyword)',
|
||||||
|
},
|
||||||
|
expandQuery: { type: 'boolean', description: 'Generate query variations' },
|
||||||
|
rerank: { type: 'boolean', description: 'Rerank results with LLM' },
|
||||||
|
generateAnswer: { type: 'boolean', description: 'Generate AI answer' },
|
||||||
|
},
|
||||||
|
outputs: {
|
||||||
|
results: { type: 'json', description: 'Search results with content and metadata' },
|
||||||
|
completion: { type: 'string', description: 'AI-generated answer (when enabled)' },
|
||||||
|
},
|
||||||
|
}
|
||||||
@@ -76,8 +76,9 @@ export const TranslateBlock: BlockConfig = {
|
|||||||
vertexProject: params.vertexProject,
|
vertexProject: params.vertexProject,
|
||||||
vertexLocation: params.vertexLocation,
|
vertexLocation: params.vertexLocation,
|
||||||
vertexCredential: params.vertexCredential,
|
vertexCredential: params.vertexCredential,
|
||||||
bedrockRegion: params.bedrockRegion,
|
bedrockAccessKeyId: params.bedrockAccessKeyId,
|
||||||
bedrockSecretKey: params.bedrockSecretKey,
|
bedrockSecretKey: params.bedrockSecretKey,
|
||||||
|
bedrockRegion: params.bedrockRegion,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import { A2ABlock } from '@/blocks/blocks/a2a'
|
|||||||
import { AgentBlock } from '@/blocks/blocks/agent'
|
import { AgentBlock } from '@/blocks/blocks/agent'
|
||||||
import { AhrefsBlock } from '@/blocks/blocks/ahrefs'
|
import { AhrefsBlock } from '@/blocks/blocks/ahrefs'
|
||||||
import { AirtableBlock } from '@/blocks/blocks/airtable'
|
import { AirtableBlock } from '@/blocks/blocks/airtable'
|
||||||
|
import { AirweaveBlock } from '@/blocks/blocks/airweave'
|
||||||
import { ApiBlock } from '@/blocks/blocks/api'
|
import { ApiBlock } from '@/blocks/blocks/api'
|
||||||
import { ApiTriggerBlock } from '@/blocks/blocks/api_trigger'
|
import { ApiTriggerBlock } from '@/blocks/blocks/api_trigger'
|
||||||
import { ApifyBlock } from '@/blocks/blocks/apify'
|
import { ApifyBlock } from '@/blocks/blocks/apify'
|
||||||
@@ -167,6 +168,7 @@ export const registry: Record<string, BlockConfig> = {
|
|||||||
agent: AgentBlock,
|
agent: AgentBlock,
|
||||||
ahrefs: AhrefsBlock,
|
ahrefs: AhrefsBlock,
|
||||||
airtable: AirtableBlock,
|
airtable: AirtableBlock,
|
||||||
|
airweave: AirweaveBlock,
|
||||||
api: ApiBlock,
|
api: ApiBlock,
|
||||||
api_trigger: ApiTriggerBlock,
|
api_trigger: ApiTriggerBlock,
|
||||||
apify: ApifyBlock,
|
apify: ApifyBlock,
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ export type SubBlockType =
|
|||||||
| 'code' // Code editor
|
| 'code' // Code editor
|
||||||
| 'switch' // Toggle button
|
| 'switch' // Toggle button
|
||||||
| 'tool-input' // Tool configuration
|
| 'tool-input' // Tool configuration
|
||||||
|
| 'skill-input' // Skill selection for agent blocks
|
||||||
| 'checkbox-list' // Multiple selection
|
| 'checkbox-list' // Multiple selection
|
||||||
| 'grouped-checkbox-list' // Grouped, scrollable checkbox list with select all
|
| 'grouped-checkbox-list' // Grouped, scrollable checkbox list with select all
|
||||||
| 'condition-input' // Conditional logic
|
| 'condition-input' // Conditional logic
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ export function getApiKeyCondition() {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the standard provider credential subblocks used by LLM-based blocks.
|
* Returns the standard provider credential subblocks used by LLM-based blocks.
|
||||||
* This includes: Vertex AI OAuth, API Key, Azure OpenAI, Vertex AI config, and Bedrock config.
|
* This includes: Vertex AI OAuth, API Key, Azure (OpenAI + Anthropic), Vertex AI config, and Bedrock config.
|
||||||
*
|
*
|
||||||
* Usage: Spread into your block's subBlocks array after block-specific fields
|
* Usage: Spread into your block's subBlocks array after block-specific fields
|
||||||
*/
|
*/
|
||||||
@@ -111,25 +111,25 @@ export function getProviderCredentialSubBlocks(): SubBlockConfig[] {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'azureEndpoint',
|
id: 'azureEndpoint',
|
||||||
title: 'Azure OpenAI Endpoint',
|
title: 'Azure Endpoint',
|
||||||
type: 'short-input',
|
type: 'short-input',
|
||||||
password: true,
|
password: true,
|
||||||
placeholder: 'https://your-resource.openai.azure.com',
|
placeholder: 'https://your-resource.services.ai.azure.com',
|
||||||
connectionDroppable: false,
|
connectionDroppable: false,
|
||||||
condition: {
|
condition: {
|
||||||
field: 'model',
|
field: 'model',
|
||||||
value: providers['azure-openai'].models,
|
value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'azureApiVersion',
|
id: 'azureApiVersion',
|
||||||
title: 'Azure API Version',
|
title: 'Azure API Version',
|
||||||
type: 'short-input',
|
type: 'short-input',
|
||||||
placeholder: '2024-07-01-preview',
|
placeholder: 'Enter API version',
|
||||||
connectionDroppable: false,
|
connectionDroppable: false,
|
||||||
condition: {
|
condition: {
|
||||||
field: 'model',
|
field: 'model',
|
||||||
value: providers['azure-openai'].models,
|
value: [...providers['azure-openai'].models, ...providers['azure-anthropic'].models],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -202,7 +202,7 @@ export function getProviderCredentialSubBlocks(): SubBlockConfig[] {
|
|||||||
*/
|
*/
|
||||||
export const PROVIDER_CREDENTIAL_INPUTS = {
|
export const PROVIDER_CREDENTIAL_INPUTS = {
|
||||||
apiKey: { type: 'string', description: 'Provider API key' },
|
apiKey: { type: 'string', description: 'Provider API key' },
|
||||||
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
|
azureEndpoint: { type: 'string', description: 'Azure endpoint URL' },
|
||||||
azureApiVersion: { type: 'string', description: 'Azure API version' },
|
azureApiVersion: { type: 'string', description: 'Azure API version' },
|
||||||
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
|
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
|
||||||
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
|
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
|
||||||
|
|||||||
@@ -1131,6 +1131,32 @@ export function AirtableIcon(props: SVGProps<SVGSVGElement>) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function AirweaveIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
|
return (
|
||||||
|
<svg
|
||||||
|
{...props}
|
||||||
|
width='143'
|
||||||
|
height='143'
|
||||||
|
viewBox='0 0 143 143'
|
||||||
|
fill='none'
|
||||||
|
xmlns='http://www.w3.org/2000/svg'
|
||||||
|
>
|
||||||
|
<path
|
||||||
|
d='M89.8854 128.872C79.9165 123.339 66.7502 115.146 60.5707 107.642L60.0432 107.018C58.7836 105.5 57.481 104.014 56.1676 102.593C51.9152 97.9641 47.3614 93.7978 42.646 90.2021C40.7405 88.7487 38.7704 87.3492 36.8111 86.0789C35.7991 85.4222 34.8302 84.8193 33.9151 84.2703C31.6221 82.903 28.8338 82.5263 26.2716 83.2476C23.8385 83.9366 21.89 85.5406 20.7596 87.7476C18.5634 92.0323 20.0814 97.3289 24.2046 99.805C27.5204 101.786 30.7608 104.111 33.8398 106.717C34.2381 107.05 34.3996 107.578 34.2596 108.062C33.1292 112.185 31.9989 118.957 31.5682 121.67C30.6424 127.429 33.4737 133.081 38.5982 135.751L38.7812 135.848C41.0204 137 43.6472 136.946 45.8219 135.697C47.9858 134.459 49.353 132.231 49.4822 129.733C49.536 128.657 49.6006 127.58 49.676 126.59C49.719 126.062 50.042 125.632 50.5264 125.459C50.6772 125.406 50.8494 125.373 51.0001 125.373C51.3554 125.373 51.6784 125.513 51.9475 125.782C56.243 130.185 60.8829 134.169 65.7167 137.625C70.3674 140.951 75.8686 142.706 81.639 142.706C83.7383 142.706 85.8376 142.469 87.8938 141.995L88.1199 141.942C90.9943 141.274 93.029 139.024 93.4488 136.085C93.8687 133.146 92.4476 130.315 89.8747 128.883H89.8639L89.8854 128.872Z'
|
||||||
|
fill='currentColor'
|
||||||
|
/>
|
||||||
|
<path
|
||||||
|
d='M142.551 58.1747L142.529 58.0563C142.045 55.591 140.118 53.7069 137.598 53.2548C135.112 52.8134 132.754 53.8577 131.484 55.9893L131.408 56.1077C126.704 64.1604 120.061 71.6101 111.653 78.2956C109.446 80.0504 107.293 81.902 105.226 83.8075C103.644 85.2717 101.265 85.53 99.4452 84.4212C97.6474 83.3339 95.8495 82.1389 94.1055 80.8686C90.3268 78.1233 86.6772 74.9475 83.2753 71.4271C81.4989 69.597 79.798 67.6915 78.1939 65.7321C76.0408 63.1161 73.7477 60.5539 71.3685 58.1316C66.3195 52.9857 56.6089 45.9127 53.7453 43.878C53.3792 43.6304 53.1639 43.2428 53.0993 42.8014C53.0455 42.3601 53.1639 41.9509 53.4546 41.6064C55.274 39.4318 56.9965 37.1818 58.5683 34.921C60.2369 32.5311 60.786 29.6028 60.0862 26.8899C59.408 24.2523 57.6424 22.11 55.134 20.8827C50.9139 18.7942 45.8972 20.0968 43.2273 23.9293C40.8373 27.3636 38.0167 30.7332 34.8732 33.9306C34.5718 34.232 34.1304 34.3397 33.7213 34.1889C30.5239 33.1447 27.2296 32.2942 23.9461 31.659C23.7093 31.616 23.354 31.5514 22.9126 31.4975C16.4102 30.5286 10.1123 33.7798 7.21639 39.5717L7.1195 39.7548C6.18289 41.628 6.26902 43.8349 7.32405 45.6651C8.40061 47.5167 10.3277 48.701 12.4592 48.8194C13.4604 48.8732 14.4401 48.9378 15.3659 49.0024C15.7966 49.0347 16.1411 49.2823 16.3025 49.6914C16.4533 50.1112 16.3671 50.5419 16.0657 50.8541C12.147 54.8804 8.60515 59.1974 5.5262 63.6867C1.1446 70.0814 -0.481008 78.2095 1.08 85.9822L1.10154 86.1006C1.70441 89.0719 4.05131 91.2035 7.07644 91.5264C9.98315 91.8386 12.6099 90.3208 13.7619 87.6724L13.8265 87.5109C18.6925 75.8625 26.7559 65.5168 37.7907 56.7536C38.3182 56.3445 39.0072 56.28 39.567 56.5922C45.3373 59.768 50.8601 63.902 55.9738 68.8864C56.5982 69.4893 56.6089 70.5013 56.0168 71.1257C53.4761 73.8063 51.0862 76.6054 48.9115 79.469C47.2106 81.7083 47.5335 84.8949 49.6221 86.7358L53.3254 89.9977L53.2824 90.0409C53.8637 90.5576 54.445 91.0744 55.0264 91.5911L55.8123 92.194C56.9319 93.1844 58.3529 93.6365 59.8386 93.4858C61.3027 93.3351 62.67 92.56 63.5635 91.3758C65.1353 89.2873 66.8578 87.2525 68.6556 85.304C68.957 84.9702 69.3661 84.798 69.8075 84.7872C70.2705 84.7872 70.6257 84.9379 70.9164 85.2286C75.8147 90.0624 81.1114 94.3686 86.6772 97.9966C88.8626 99.4176 89.4978 102.26 88.1306 104.477C86.9248 106.448 85.7729 108.493 84.7179 110.539C83.5014 112.918 83.2968 115.738 84.1688 118.257C84.9978 120.68 86.7095 122.585 88.981 123.64C90.2514 124.232 91.5971 124.534 92.9859 124.534C96.5062 124.534 99.682 122.596 101.286 119.452C102.729 116.61 104.419 113.8 106.281 111.131C107.369 109.559 109.36 108.838 111.255 109.322C115.26 110.355 120.643 111.421 124.454 112.143C128.308 112.864 132.119 111.023 133.96 107.578L134.143 107.233C135.521 104.628 135.531 101.506 134.164 98.8901C132.786 96.2526 130.181 94.4655 127.21 94.121C126.478 94.0349 125.778 93.9488 125.11 93.8626C124.97 93.8411 124.852 93.8196 124.744 93.798L123.356 93.4751L124.357 92.4523C124.432 92.377 124.529 92.2801 124.658 92.194C128.771 88.8028 132.571 85.1963 135.962 81.4714C141.668 75.1951 144.122 66.4965 142.518 58.1747H142.529H142.551Z'
|
||||||
|
fill='currentColor'
|
||||||
|
/>
|
||||||
|
<path
|
||||||
|
d='M56.6506 14.3371C65.5861 19.6338 77.4067 27.3743 82.9833 34.1674C83.64 34.9532 84.2967 35.7391 84.9534 36.4927C86.1591 37.8815 86.2991 39.8731 85.2979 41.4233C83.4892 44.2116 81.4115 46.9569 79.1399 49.5945C77.4713 51.5107 77.4067 54.3098 78.9785 56.2476L79.0431 56.323C79.2261 56.5598 79.4306 56.8074 79.6136 57.0442C81.2931 59.1758 83.0801 61.2213 84.9211 63.1375C85.9007 64.1603 87.2249 64.7309 88.6352 64.7309L88.7644 65.5275L88.7429 64.7309C90.207 64.6986 91.6173 64.0526 92.5969 62.933C94.8362 60.4031 96.9247 57.744 98.8302 55.0633C100.133 53.2224 102.63 52.8026 104.525 54.1052C106.463 55.4402 108.465 56.7105 110.457 57.8839C112.793 59.2511 115.614 59.5095 118.165 58.5621C120.749 57.604 122.762 55.5694 123.656 52.9533C125.055 48.9055 123.257 44.2547 119.382 41.9078C116.755 40.3145 114.15 38.5166 111.674 36.5788C110.382 35.5561 109.833 33.8767 110.296 32.2941C111.437 28.3001 112.481 23.1218 113.148 19.4831C113.837 15.7259 112.147 11.8826 108.939 9.94477L108.562 9.72944C105.871 8.12537 102.587 8.00696 99.7668 9.40649C96.9247 10.8168 95.03 13.5405 94.6855 16.6733L94.6639 16.867C94.6209 17.2546 94.384 17.5453 94.018 17.6637C93.652 17.7821 93.2859 17.6852 93.0168 17.4269C89.0012 13.1422 84.738 9.25576 80.3134 5.8646C74.3708 1.31075 66.7811 -0.583999 59.4928 0.675575L59.1805 0.729423C56.1124 1.2677 53.7547 3.60383 53.1949 6.68279C52.6351 9.72946 53.9915 12.7223 56.6722 14.3048H56.6614L56.6506 14.3371Z'
|
||||||
|
fill='currentColor'
|
||||||
|
/>
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
|
export function GoogleDocsIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
return (
|
return (
|
||||||
<svg
|
<svg
|
||||||
@@ -5436,3 +5462,24 @@ export function EnrichSoIcon(props: SVGProps<SVGSVGElement>) {
|
|||||||
</svg>
|
</svg>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function AgentSkillsIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
|
return (
|
||||||
|
<svg
|
||||||
|
{...props}
|
||||||
|
xmlns='http://www.w3.org/2000/svg'
|
||||||
|
width='16'
|
||||||
|
height='16'
|
||||||
|
viewBox='0 0 16 16'
|
||||||
|
fill='none'
|
||||||
|
>
|
||||||
|
<path
|
||||||
|
d='M8 1L14.0622 4.5V11.5L8 15L1.93782 11.5V4.5L8 1Z'
|
||||||
|
stroke='currentColor'
|
||||||
|
strokeWidth='1.5'
|
||||||
|
fill='none'
|
||||||
|
/>
|
||||||
|
<path d='M8 4.5L11 6.25V9.75L8 11.5L5 9.75V6.25L8 4.5Z' fill='currentColor' />
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|||||||
@@ -367,6 +367,12 @@ export function AccessControl() {
|
|||||||
category: 'Tools',
|
category: 'Tools',
|
||||||
configKey: 'disableCustomTools' as const,
|
configKey: 'disableCustomTools' as const,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: 'disable-skills',
|
||||||
|
label: 'Skills',
|
||||||
|
category: 'Tools',
|
||||||
|
configKey: 'disableSkills' as const,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: 'hide-trace-spans',
|
id: 'hide-trace-spans',
|
||||||
label: 'Trace Spans',
|
label: 'Trace Spans',
|
||||||
@@ -950,6 +956,7 @@ export function AccessControl() {
|
|||||||
!editingConfig?.hideFilesTab &&
|
!editingConfig?.hideFilesTab &&
|
||||||
!editingConfig?.disableMcpTools &&
|
!editingConfig?.disableMcpTools &&
|
||||||
!editingConfig?.disableCustomTools &&
|
!editingConfig?.disableCustomTools &&
|
||||||
|
!editingConfig?.disableSkills &&
|
||||||
!editingConfig?.hideTraceSpans &&
|
!editingConfig?.hideTraceSpans &&
|
||||||
!editingConfig?.disableInvitations &&
|
!editingConfig?.disableInvitations &&
|
||||||
!editingConfig?.hideDeployApi &&
|
!editingConfig?.hideDeployApi &&
|
||||||
@@ -969,6 +976,7 @@ export function AccessControl() {
|
|||||||
hideFilesTab: allVisible,
|
hideFilesTab: allVisible,
|
||||||
disableMcpTools: allVisible,
|
disableMcpTools: allVisible,
|
||||||
disableCustomTools: allVisible,
|
disableCustomTools: allVisible,
|
||||||
|
disableSkills: allVisible,
|
||||||
hideTraceSpans: allVisible,
|
hideTraceSpans: allVisible,
|
||||||
disableInvitations: allVisible,
|
disableInvitations: allVisible,
|
||||||
hideDeployApi: allVisible,
|
hideDeployApi: allVisible,
|
||||||
@@ -989,6 +997,7 @@ export function AccessControl() {
|
|||||||
!editingConfig?.hideFilesTab &&
|
!editingConfig?.hideFilesTab &&
|
||||||
!editingConfig?.disableMcpTools &&
|
!editingConfig?.disableMcpTools &&
|
||||||
!editingConfig?.disableCustomTools &&
|
!editingConfig?.disableCustomTools &&
|
||||||
|
!editingConfig?.disableSkills &&
|
||||||
!editingConfig?.hideTraceSpans &&
|
!editingConfig?.hideTraceSpans &&
|
||||||
!editingConfig?.disableInvitations &&
|
!editingConfig?.disableInvitations &&
|
||||||
!editingConfig?.hideDeployApi &&
|
!editingConfig?.hideDeployApi &&
|
||||||
|
|||||||
@@ -43,6 +43,13 @@ export class CustomToolsNotAllowedError extends Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export class SkillsNotAllowedError extends Error {
|
||||||
|
constructor() {
|
||||||
|
super('Skills are not allowed based on your permission group settings')
|
||||||
|
this.name = 'SkillsNotAllowedError'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export class InvitationsNotAllowedError extends Error {
|
export class InvitationsNotAllowedError extends Error {
|
||||||
constructor() {
|
constructor() {
|
||||||
super('Invitations are not allowed based on your permission group settings')
|
super('Invitations are not allowed based on your permission group settings')
|
||||||
@@ -201,6 +208,26 @@ export async function validateCustomToolsAllowed(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export async function validateSkillsAllowed(
|
||||||
|
userId: string | undefined,
|
||||||
|
ctx?: ExecutionContext
|
||||||
|
): Promise<void> {
|
||||||
|
if (!userId) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const config = await getPermissionConfig(userId, ctx)
|
||||||
|
|
||||||
|
if (!config) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.disableSkills) {
|
||||||
|
logger.warn('Skills blocked by permission group', { userId })
|
||||||
|
throw new SkillsNotAllowedError()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Validates if the user is allowed to send invitations.
|
* Validates if the user is allowed to send invitations.
|
||||||
* Also checks the global feature flag.
|
* Also checks the global feature flag.
|
||||||
|
|||||||
@@ -11,9 +11,15 @@ import {
|
|||||||
validateCustomToolsAllowed,
|
validateCustomToolsAllowed,
|
||||||
validateMcpToolsAllowed,
|
validateMcpToolsAllowed,
|
||||||
validateModelProvider,
|
validateModelProvider,
|
||||||
|
validateSkillsAllowed,
|
||||||
} from '@/ee/access-control/utils/permission-check'
|
} from '@/ee/access-control/utils/permission-check'
|
||||||
import { AGENT, BlockType, DEFAULTS, REFERENCE, stripCustomToolPrefix } from '@/executor/constants'
|
import { AGENT, BlockType, DEFAULTS, REFERENCE, stripCustomToolPrefix } from '@/executor/constants'
|
||||||
import { memoryService } from '@/executor/handlers/agent/memory'
|
import { memoryService } from '@/executor/handlers/agent/memory'
|
||||||
|
import {
|
||||||
|
buildLoadSkillTool,
|
||||||
|
buildSkillsSystemPromptSection,
|
||||||
|
resolveSkillMetadata,
|
||||||
|
} from '@/executor/handlers/agent/skills-resolver'
|
||||||
import type {
|
import type {
|
||||||
AgentInputs,
|
AgentInputs,
|
||||||
Message,
|
Message,
|
||||||
@@ -57,8 +63,21 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
const providerId = getProviderFromModel(model)
|
const providerId = getProviderFromModel(model)
|
||||||
const formattedTools = await this.formatTools(ctx, filteredInputs.tools || [])
|
const formattedTools = await this.formatTools(ctx, filteredInputs.tools || [])
|
||||||
|
|
||||||
|
// Resolve skill metadata for progressive disclosure
|
||||||
|
const skillInputs = filteredInputs.skills ?? []
|
||||||
|
let skillMetadata: Array<{ name: string; description: string }> = []
|
||||||
|
if (skillInputs.length > 0 && ctx.workspaceId) {
|
||||||
|
await validateSkillsAllowed(ctx.userId, ctx)
|
||||||
|
skillMetadata = await resolveSkillMetadata(skillInputs, ctx.workspaceId)
|
||||||
|
if (skillMetadata.length > 0) {
|
||||||
|
const skillNames = skillMetadata.map((s) => s.name)
|
||||||
|
formattedTools.push(buildLoadSkillTool(skillNames))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const streamingConfig = this.getStreamingConfig(ctx, block)
|
const streamingConfig = this.getStreamingConfig(ctx, block)
|
||||||
const messages = await this.buildMessages(ctx, filteredInputs)
|
const messages = await this.buildMessages(ctx, filteredInputs, skillMetadata)
|
||||||
|
|
||||||
const providerRequest = this.buildProviderRequest({
|
const providerRequest = this.buildProviderRequest({
|
||||||
ctx,
|
ctx,
|
||||||
@@ -723,7 +742,8 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
private async buildMessages(
|
private async buildMessages(
|
||||||
ctx: ExecutionContext,
|
ctx: ExecutionContext,
|
||||||
inputs: AgentInputs
|
inputs: AgentInputs,
|
||||||
|
skillMetadata: Array<{ name: string; description: string }> = []
|
||||||
): Promise<Message[] | undefined> {
|
): Promise<Message[] | undefined> {
|
||||||
const messages: Message[] = []
|
const messages: Message[] = []
|
||||||
const memoryEnabled = inputs.memoryType && inputs.memoryType !== 'none'
|
const memoryEnabled = inputs.memoryType && inputs.memoryType !== 'none'
|
||||||
@@ -803,6 +823,20 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
messages.unshift(...systemMessages)
|
messages.unshift(...systemMessages)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 8. Inject skill metadata into the system message (progressive disclosure)
|
||||||
|
if (skillMetadata.length > 0) {
|
||||||
|
const skillSection = buildSkillsSystemPromptSection(skillMetadata)
|
||||||
|
const systemIdx = messages.findIndex((m) => m.role === 'system')
|
||||||
|
if (systemIdx >= 0) {
|
||||||
|
messages[systemIdx] = {
|
||||||
|
...messages[systemIdx],
|
||||||
|
content: messages[systemIdx].content + skillSection,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
messages.unshift({ role: 'system', content: skillSection.trim() })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return messages.length > 0 ? messages : undefined
|
return messages.length > 0 ? messages : undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -872,24 +906,17 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find first system message
|
|
||||||
const firstSystemIndex = messages.findIndex((msg) => msg.role === 'system')
|
const firstSystemIndex = messages.findIndex((msg) => msg.role === 'system')
|
||||||
|
|
||||||
if (firstSystemIndex === -1) {
|
if (firstSystemIndex === -1) {
|
||||||
// No system message exists - add at position 0
|
|
||||||
messages.unshift({ role: 'system', content })
|
messages.unshift({ role: 'system', content })
|
||||||
} else if (firstSystemIndex === 0) {
|
} else if (firstSystemIndex === 0) {
|
||||||
// System message already at position 0 - replace it
|
|
||||||
// Explicit systemPrompt parameter takes precedence over memory/messages
|
|
||||||
messages[0] = { role: 'system', content }
|
messages[0] = { role: 'system', content }
|
||||||
} else {
|
} else {
|
||||||
// System message exists but not at position 0 - move it to position 0
|
|
||||||
// and update with new content
|
|
||||||
messages.splice(firstSystemIndex, 1)
|
messages.splice(firstSystemIndex, 1)
|
||||||
messages.unshift({ role: 'system', content })
|
messages.unshift({ role: 'system', content })
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove any additional system messages (keep only the first one)
|
|
||||||
for (let i = messages.length - 1; i >= 1; i--) {
|
for (let i = messages.length - 1; i >= 1; i--) {
|
||||||
if (messages[i].role === 'system') {
|
if (messages[i].role === 'system') {
|
||||||
messages.splice(i, 1)
|
messages.splice(i, 1)
|
||||||
@@ -962,6 +989,7 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
blockNameMapping,
|
blockNameMapping,
|
||||||
reasoningEffort: inputs.reasoningEffort,
|
reasoningEffort: inputs.reasoningEffort,
|
||||||
verbosity: inputs.verbosity,
|
verbosity: inputs.verbosity,
|
||||||
|
thinkingLevel: inputs.thinkingLevel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1030,6 +1058,7 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
isDeployedContext: ctx.isDeployedContext,
|
isDeployedContext: ctx.isDeployedContext,
|
||||||
reasoningEffort: providerRequest.reasoningEffort,
|
reasoningEffort: providerRequest.reasoningEffort,
|
||||||
verbosity: providerRequest.verbosity,
|
verbosity: providerRequest.verbosity,
|
||||||
|
thinkingLevel: providerRequest.thinkingLevel,
|
||||||
})
|
})
|
||||||
|
|
||||||
return this.processProviderResponse(response, block, responseFormat)
|
return this.processProviderResponse(response, block, responseFormat)
|
||||||
@@ -1047,8 +1076,6 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
logger.info(`[${requestId}] Resolving Vertex AI credential: ${credentialId}`)
|
logger.info(`[${requestId}] Resolving Vertex AI credential: ${credentialId}`)
|
||||||
|
|
||||||
// Get the credential - we need to find the owner
|
|
||||||
// Since we're in a workflow context, we can query the credential directly
|
|
||||||
const credential = await db.query.account.findFirst({
|
const credential = await db.query.account.findFirst({
|
||||||
where: eq(account.id, credentialId),
|
where: eq(account.id, credentialId),
|
||||||
})
|
})
|
||||||
@@ -1057,7 +1084,6 @@ export class AgentBlockHandler implements BlockHandler {
|
|||||||
throw new Error(`Vertex AI credential not found: ${credentialId}`)
|
throw new Error(`Vertex AI credential not found: ${credentialId}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the token if needed
|
|
||||||
const { accessToken } = await refreshTokenIfNeeded(requestId, credential, credentialId)
|
const { accessToken } = await refreshTokenIfNeeded(requestId, credential, credentialId)
|
||||||
|
|
||||||
if (!accessToken) {
|
if (!accessToken) {
|
||||||
|
|||||||
122
apps/sim/executor/handlers/agent/skills-resolver.ts
Normal file
122
apps/sim/executor/handlers/agent/skills-resolver.ts
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
import { db } from '@sim/db'
|
||||||
|
import { skill } from '@sim/db/schema'
|
||||||
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { and, eq, inArray } from 'drizzle-orm'
|
||||||
|
import type { SkillInput } from '@/executor/handlers/agent/types'
|
||||||
|
|
||||||
|
const logger = createLogger('SkillsResolver')
|
||||||
|
|
||||||
|
function escapeXml(str: string): string {
|
||||||
|
return str
|
||||||
|
.replace(/&/g, '&')
|
||||||
|
.replace(/</g, '<')
|
||||||
|
.replace(/>/g, '>')
|
||||||
|
.replace(/"/g, '"')
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SkillMetadata {
|
||||||
|
name: string
|
||||||
|
description: string
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch skill metadata (name + description) for system prompt injection.
|
||||||
|
* Only returns lightweight data so the LLM knows what skills are available.
|
||||||
|
*/
|
||||||
|
export async function resolveSkillMetadata(
|
||||||
|
skillInputs: SkillInput[],
|
||||||
|
workspaceId: string
|
||||||
|
): Promise<SkillMetadata[]> {
|
||||||
|
if (!skillInputs.length || !workspaceId) return []
|
||||||
|
|
||||||
|
const skillIds = skillInputs.map((s) => s.skillId)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const rows = await db
|
||||||
|
.select({ name: skill.name, description: skill.description })
|
||||||
|
.from(skill)
|
||||||
|
.where(and(eq(skill.workspaceId, workspaceId), inArray(skill.id, skillIds)))
|
||||||
|
|
||||||
|
return rows
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to resolve skill metadata', { error, skillIds, workspaceId })
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch full skill content for a load_skill tool response.
|
||||||
|
* Called when the LLM decides a skill is relevant and invokes load_skill.
|
||||||
|
*/
|
||||||
|
export async function resolveSkillContent(
|
||||||
|
skillName: string,
|
||||||
|
workspaceId: string
|
||||||
|
): Promise<string | null> {
|
||||||
|
if (!skillName || !workspaceId) return null
|
||||||
|
|
||||||
|
try {
|
||||||
|
const rows = await db
|
||||||
|
.select({ content: skill.content, name: skill.name })
|
||||||
|
.from(skill)
|
||||||
|
.where(and(eq(skill.workspaceId, workspaceId), eq(skill.name, skillName)))
|
||||||
|
.limit(1)
|
||||||
|
|
||||||
|
if (rows.length === 0) {
|
||||||
|
logger.warn('Skill not found', { skillName, workspaceId })
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows[0].content
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to resolve skill content', { error, skillName, workspaceId })
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build the system prompt section that lists available skills.
|
||||||
|
* Uses XML format per the agentskills.io integration guide.
|
||||||
|
*/
|
||||||
|
export function buildSkillsSystemPromptSection(skills: SkillMetadata[]): string {
|
||||||
|
if (!skills.length) return ''
|
||||||
|
|
||||||
|
const skillEntries = skills
|
||||||
|
.map(
|
||||||
|
(s) =>
|
||||||
|
` <skill name="${escapeXml(s.name)}">\n <description>${escapeXml(s.description)}</description>\n </skill>`
|
||||||
|
)
|
||||||
|
.join('\n')
|
||||||
|
|
||||||
|
return [
|
||||||
|
'',
|
||||||
|
'You have access to the following skills. Use the load_skill tool to activate a skill when relevant.',
|
||||||
|
'',
|
||||||
|
'<available_skills>',
|
||||||
|
skillEntries,
|
||||||
|
'</available_skills>',
|
||||||
|
].join('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build the load_skill tool definition for injection into the tools array.
|
||||||
|
* Returns a ProviderToolConfig-compatible object so all providers can process it.
|
||||||
|
*/
|
||||||
|
export function buildLoadSkillTool(skillNames: string[]) {
|
||||||
|
return {
|
||||||
|
id: 'load_skill',
|
||||||
|
name: 'load_skill',
|
||||||
|
description: `Load a skill to get specialized instructions. Available skills: ${skillNames.join(', ')}`,
|
||||||
|
params: {},
|
||||||
|
parameters: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
skill_name: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Name of the skill to load',
|
||||||
|
enum: skillNames,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
required: ['skill_name'],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,14 @@
|
|||||||
|
export interface SkillInput {
|
||||||
|
skillId: string
|
||||||
|
name?: string
|
||||||
|
description?: string
|
||||||
|
}
|
||||||
|
|
||||||
export interface AgentInputs {
|
export interface AgentInputs {
|
||||||
model?: string
|
model?: string
|
||||||
responseFormat?: string | object
|
responseFormat?: string | object
|
||||||
tools?: ToolInput[]
|
tools?: ToolInput[]
|
||||||
|
skills?: SkillInput[]
|
||||||
// Legacy inputs (backward compatible)
|
// Legacy inputs (backward compatible)
|
||||||
systemPrompt?: string
|
systemPrompt?: string
|
||||||
userPrompt?: string | object
|
userPrompt?: string | object
|
||||||
@@ -27,6 +34,7 @@ export interface AgentInputs {
|
|||||||
bedrockRegion?: string
|
bedrockRegion?: string
|
||||||
reasoningEffort?: string
|
reasoningEffort?: string
|
||||||
verbosity?: string
|
verbosity?: string
|
||||||
|
thinkingLevel?: string
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface ToolInput {
|
export interface ToolInput {
|
||||||
|
|||||||
@@ -121,26 +121,17 @@ export class EvaluatorBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
temperature: EVALUATOR.DEFAULT_TEMPERATURE,
|
temperature: EVALUATOR.DEFAULT_TEMPERATURE,
|
||||||
apiKey: finalApiKey,
|
apiKey: finalApiKey,
|
||||||
|
azureEndpoint: inputs.azureEndpoint,
|
||||||
|
azureApiVersion: inputs.azureApiVersion,
|
||||||
|
vertexProject: evaluatorConfig.vertexProject,
|
||||||
|
vertexLocation: evaluatorConfig.vertexLocation,
|
||||||
|
bedrockAccessKeyId: evaluatorConfig.bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey: evaluatorConfig.bedrockSecretKey,
|
||||||
|
bedrockRegion: evaluatorConfig.bedrockRegion,
|
||||||
workflowId: ctx.workflowId,
|
workflowId: ctx.workflowId,
|
||||||
workspaceId: ctx.workspaceId,
|
workspaceId: ctx.workspaceId,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (providerId === 'vertex') {
|
|
||||||
providerRequest.vertexProject = evaluatorConfig.vertexProject
|
|
||||||
providerRequest.vertexLocation = evaluatorConfig.vertexLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
if (providerId === 'azure-openai') {
|
|
||||||
providerRequest.azureEndpoint = inputs.azureEndpoint
|
|
||||||
providerRequest.azureApiVersion = inputs.azureApiVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
if (providerId === 'bedrock') {
|
|
||||||
providerRequest.bedrockAccessKeyId = evaluatorConfig.bedrockAccessKeyId
|
|
||||||
providerRequest.bedrockSecretKey = evaluatorConfig.bedrockSecretKey
|
|
||||||
providerRequest.bedrockRegion = evaluatorConfig.bedrockRegion
|
|
||||||
}
|
|
||||||
|
|
||||||
const response = await fetch(url.toString(), {
|
const response = await fetch(url.toString(), {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: await buildAuthHeaders(),
|
headers: await buildAuthHeaders(),
|
||||||
|
|||||||
@@ -96,26 +96,17 @@ export class RouterBlockHandler implements BlockHandler {
|
|||||||
context: JSON.stringify(messages),
|
context: JSON.stringify(messages),
|
||||||
temperature: ROUTER.INFERENCE_TEMPERATURE,
|
temperature: ROUTER.INFERENCE_TEMPERATURE,
|
||||||
apiKey: finalApiKey,
|
apiKey: finalApiKey,
|
||||||
|
azureEndpoint: inputs.azureEndpoint,
|
||||||
|
azureApiVersion: inputs.azureApiVersion,
|
||||||
|
vertexProject: routerConfig.vertexProject,
|
||||||
|
vertexLocation: routerConfig.vertexLocation,
|
||||||
|
bedrockAccessKeyId: routerConfig.bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey: routerConfig.bedrockSecretKey,
|
||||||
|
bedrockRegion: routerConfig.bedrockRegion,
|
||||||
workflowId: ctx.workflowId,
|
workflowId: ctx.workflowId,
|
||||||
workspaceId: ctx.workspaceId,
|
workspaceId: ctx.workspaceId,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (providerId === 'vertex') {
|
|
||||||
providerRequest.vertexProject = routerConfig.vertexProject
|
|
||||||
providerRequest.vertexLocation = routerConfig.vertexLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
if (providerId === 'azure-openai') {
|
|
||||||
providerRequest.azureEndpoint = inputs.azureEndpoint
|
|
||||||
providerRequest.azureApiVersion = inputs.azureApiVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
if (providerId === 'bedrock') {
|
|
||||||
providerRequest.bedrockAccessKeyId = routerConfig.bedrockAccessKeyId
|
|
||||||
providerRequest.bedrockSecretKey = routerConfig.bedrockSecretKey
|
|
||||||
providerRequest.bedrockRegion = routerConfig.bedrockRegion
|
|
||||||
}
|
|
||||||
|
|
||||||
const response = await fetch(url.toString(), {
|
const response = await fetch(url.toString(), {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: await buildAuthHeaders(),
|
headers: await buildAuthHeaders(),
|
||||||
@@ -234,6 +225,13 @@ export class RouterBlockHandler implements BlockHandler {
|
|||||||
context: JSON.stringify(messages),
|
context: JSON.stringify(messages),
|
||||||
temperature: ROUTER.INFERENCE_TEMPERATURE,
|
temperature: ROUTER.INFERENCE_TEMPERATURE,
|
||||||
apiKey: finalApiKey,
|
apiKey: finalApiKey,
|
||||||
|
azureEndpoint: inputs.azureEndpoint,
|
||||||
|
azureApiVersion: inputs.azureApiVersion,
|
||||||
|
vertexProject: routerConfig.vertexProject,
|
||||||
|
vertexLocation: routerConfig.vertexLocation,
|
||||||
|
bedrockAccessKeyId: routerConfig.bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey: routerConfig.bedrockSecretKey,
|
||||||
|
bedrockRegion: routerConfig.bedrockRegion,
|
||||||
workflowId: ctx.workflowId,
|
workflowId: ctx.workflowId,
|
||||||
workspaceId: ctx.workspaceId,
|
workspaceId: ctx.workspaceId,
|
||||||
responseFormat: {
|
responseFormat: {
|
||||||
@@ -257,22 +255,6 @@ export class RouterBlockHandler implements BlockHandler {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if (providerId === 'vertex') {
|
|
||||||
providerRequest.vertexProject = routerConfig.vertexProject
|
|
||||||
providerRequest.vertexLocation = routerConfig.vertexLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
if (providerId === 'azure-openai') {
|
|
||||||
providerRequest.azureEndpoint = inputs.azureEndpoint
|
|
||||||
providerRequest.azureApiVersion = inputs.azureApiVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
if (providerId === 'bedrock') {
|
|
||||||
providerRequest.bedrockAccessKeyId = routerConfig.bedrockAccessKeyId
|
|
||||||
providerRequest.bedrockSecretKey = routerConfig.bedrockSecretKey
|
|
||||||
providerRequest.bedrockRegion = routerConfig.bedrockRegion
|
|
||||||
}
|
|
||||||
|
|
||||||
const response = await fetch(url.toString(), {
|
const response = await fetch(url.toString(), {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: await buildAuthHeaders(),
|
headers: await buildAuthHeaders(),
|
||||||
|
|||||||
263
apps/sim/hooks/queries/skills.ts
Normal file
263
apps/sim/hooks/queries/skills.ts
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
|
||||||
|
|
||||||
|
const logger = createLogger('SkillsQueries')
|
||||||
|
const API_ENDPOINT = '/api/skills'
|
||||||
|
|
||||||
|
export interface SkillDefinition {
|
||||||
|
id: string
|
||||||
|
workspaceId: string | null
|
||||||
|
userId: string | null
|
||||||
|
name: string
|
||||||
|
description: string
|
||||||
|
content: string
|
||||||
|
createdAt: string
|
||||||
|
updatedAt?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query key factories for skills queries
|
||||||
|
*/
|
||||||
|
export const skillsKeys = {
|
||||||
|
all: ['skills'] as const,
|
||||||
|
lists: () => [...skillsKeys.all, 'list'] as const,
|
||||||
|
list: (workspaceId: string) => [...skillsKeys.lists(), workspaceId] as const,
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch skills for a workspace
|
||||||
|
*/
|
||||||
|
async function fetchSkills(workspaceId: string): Promise<SkillDefinition[]> {
|
||||||
|
const response = await fetch(`${API_ENDPOINT}?workspaceId=${workspaceId}`)
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorData = await response.json().catch(() => ({}))
|
||||||
|
throw new Error(errorData.error || `Failed to fetch skills: ${response.statusText}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const { data } = await response.json()
|
||||||
|
|
||||||
|
if (!Array.isArray(data)) {
|
||||||
|
throw new Error('Invalid response format')
|
||||||
|
}
|
||||||
|
|
||||||
|
return data.map((s: Record<string, unknown>) => ({
|
||||||
|
id: s.id as string,
|
||||||
|
workspaceId: (s.workspaceId as string) ?? null,
|
||||||
|
userId: (s.userId as string) ?? null,
|
||||||
|
name: s.name as string,
|
||||||
|
description: s.description as string,
|
||||||
|
content: s.content as string,
|
||||||
|
createdAt: (s.createdAt as string) ?? new Date().toISOString(),
|
||||||
|
updatedAt: s.updatedAt as string | undefined,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hook to fetch skills for a workspace
|
||||||
|
*/
|
||||||
|
export function useSkills(workspaceId: string) {
|
||||||
|
return useQuery<SkillDefinition[]>({
|
||||||
|
queryKey: skillsKeys.list(workspaceId),
|
||||||
|
queryFn: () => fetchSkills(workspaceId),
|
||||||
|
enabled: !!workspaceId,
|
||||||
|
staleTime: 60 * 1000,
|
||||||
|
placeholderData: keepPreviousData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create skill mutation
|
||||||
|
*/
|
||||||
|
interface CreateSkillParams {
|
||||||
|
workspaceId: string
|
||||||
|
skill: {
|
||||||
|
name: string
|
||||||
|
description: string
|
||||||
|
content: string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useCreateSkill() {
|
||||||
|
const queryClient = useQueryClient()
|
||||||
|
|
||||||
|
return useMutation({
|
||||||
|
mutationFn: async ({ workspaceId, skill: s }: CreateSkillParams) => {
|
||||||
|
logger.info(`Creating skill: ${s.name} in workspace ${workspaceId}`)
|
||||||
|
|
||||||
|
const response = await fetch(API_ENDPOINT, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
skills: [{ name: s.name, description: s.description, content: s.content }],
|
||||||
|
workspaceId,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
|
||||||
|
const data = await response.json()
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(data.error || 'Failed to create skill')
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!data.data || !Array.isArray(data.data)) {
|
||||||
|
throw new Error('Invalid API response: missing skills data')
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Created skill: ${s.name}`)
|
||||||
|
return data.data
|
||||||
|
},
|
||||||
|
onSuccess: (_data, variables) => {
|
||||||
|
queryClient.invalidateQueries({ queryKey: skillsKeys.list(variables.workspaceId) })
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update skill mutation
|
||||||
|
*/
|
||||||
|
interface UpdateSkillParams {
|
||||||
|
workspaceId: string
|
||||||
|
skillId: string
|
||||||
|
updates: {
|
||||||
|
name?: string
|
||||||
|
description?: string
|
||||||
|
content?: string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useUpdateSkill() {
|
||||||
|
const queryClient = useQueryClient()
|
||||||
|
|
||||||
|
return useMutation({
|
||||||
|
mutationFn: async ({ workspaceId, skillId, updates }: UpdateSkillParams) => {
|
||||||
|
logger.info(`Updating skill: ${skillId} in workspace ${workspaceId}`)
|
||||||
|
|
||||||
|
const currentSkills = queryClient.getQueryData<SkillDefinition[]>(
|
||||||
|
skillsKeys.list(workspaceId)
|
||||||
|
)
|
||||||
|
const currentSkill = currentSkills?.find((s) => s.id === skillId)
|
||||||
|
|
||||||
|
if (!currentSkill) {
|
||||||
|
throw new Error('Skill not found')
|
||||||
|
}
|
||||||
|
|
||||||
|
const response = await fetch(API_ENDPOINT, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({
|
||||||
|
skills: [
|
||||||
|
{
|
||||||
|
id: skillId,
|
||||||
|
name: updates.name ?? currentSkill.name,
|
||||||
|
description: updates.description ?? currentSkill.description,
|
||||||
|
content: updates.content ?? currentSkill.content,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
workspaceId,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
|
||||||
|
const data = await response.json()
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(data.error || 'Failed to update skill')
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!data.data || !Array.isArray(data.data)) {
|
||||||
|
throw new Error('Invalid API response: missing skills data')
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Updated skill: ${skillId}`)
|
||||||
|
return data.data
|
||||||
|
},
|
||||||
|
onMutate: async ({ workspaceId, skillId, updates }) => {
|
||||||
|
await queryClient.cancelQueries({ queryKey: skillsKeys.list(workspaceId) })
|
||||||
|
|
||||||
|
const previousSkills = queryClient.getQueryData<SkillDefinition[]>(
|
||||||
|
skillsKeys.list(workspaceId)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (previousSkills) {
|
||||||
|
queryClient.setQueryData<SkillDefinition[]>(
|
||||||
|
skillsKeys.list(workspaceId),
|
||||||
|
previousSkills.map((s) =>
|
||||||
|
s.id === skillId
|
||||||
|
? {
|
||||||
|
...s,
|
||||||
|
name: updates.name ?? s.name,
|
||||||
|
description: updates.description ?? s.description,
|
||||||
|
content: updates.content ?? s.content,
|
||||||
|
}
|
||||||
|
: s
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return { previousSkills }
|
||||||
|
},
|
||||||
|
onError: (_err, variables, context) => {
|
||||||
|
if (context?.previousSkills) {
|
||||||
|
queryClient.setQueryData(skillsKeys.list(variables.workspaceId), context.previousSkills)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onSettled: (_data, _error, variables) => {
|
||||||
|
queryClient.invalidateQueries({ queryKey: skillsKeys.list(variables.workspaceId) })
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete skill mutation
|
||||||
|
*/
|
||||||
|
interface DeleteSkillParams {
|
||||||
|
workspaceId: string
|
||||||
|
skillId: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useDeleteSkill() {
|
||||||
|
const queryClient = useQueryClient()
|
||||||
|
|
||||||
|
return useMutation({
|
||||||
|
mutationFn: async ({ workspaceId, skillId }: DeleteSkillParams) => {
|
||||||
|
logger.info(`Deleting skill: ${skillId}`)
|
||||||
|
|
||||||
|
const response = await fetch(`${API_ENDPOINT}?id=${skillId}&workspaceId=${workspaceId}`, {
|
||||||
|
method: 'DELETE',
|
||||||
|
})
|
||||||
|
|
||||||
|
const data = await response.json()
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(data.error || 'Failed to delete skill')
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(`Deleted skill: ${skillId}`)
|
||||||
|
return data
|
||||||
|
},
|
||||||
|
onMutate: async ({ workspaceId, skillId }) => {
|
||||||
|
await queryClient.cancelQueries({ queryKey: skillsKeys.list(workspaceId) })
|
||||||
|
|
||||||
|
const previousSkills = queryClient.getQueryData<SkillDefinition[]>(
|
||||||
|
skillsKeys.list(workspaceId)
|
||||||
|
)
|
||||||
|
|
||||||
|
if (previousSkills) {
|
||||||
|
queryClient.setQueryData<SkillDefinition[]>(
|
||||||
|
skillsKeys.list(workspaceId),
|
||||||
|
previousSkills.filter((s) => s.id !== skillId)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return { previousSkills }
|
||||||
|
},
|
||||||
|
onError: (_err, variables, context) => {
|
||||||
|
if (context?.previousSkills) {
|
||||||
|
queryClient.setQueryData(skillsKeys.list(variables.workspaceId), context.previousSkills)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onSettled: (_data, _error, variables) => {
|
||||||
|
queryClient.invalidateQueries({ queryKey: skillsKeys.list(variables.workspaceId) })
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -12,6 +12,7 @@ const VALID_PROVIDER_IDS: readonly ProviderId[] = [
|
|||||||
'openai',
|
'openai',
|
||||||
'azure-openai',
|
'azure-openai',
|
||||||
'anthropic',
|
'anthropic',
|
||||||
|
'azure-anthropic',
|
||||||
'google',
|
'google',
|
||||||
'deepseek',
|
'deepseek',
|
||||||
'xai',
|
'xai',
|
||||||
|
|||||||
@@ -147,6 +147,13 @@ export type CopilotProviderConfig =
|
|||||||
apiVersion?: string
|
apiVersion?: string
|
||||||
endpoint?: string
|
endpoint?: string
|
||||||
}
|
}
|
||||||
|
| {
|
||||||
|
provider: 'azure-anthropic'
|
||||||
|
model: string
|
||||||
|
apiKey?: string
|
||||||
|
apiVersion?: string
|
||||||
|
endpoint?: string
|
||||||
|
}
|
||||||
| {
|
| {
|
||||||
provider: 'vertex'
|
provider: 'vertex'
|
||||||
model: string
|
model: string
|
||||||
@@ -155,7 +162,7 @@ export type CopilotProviderConfig =
|
|||||||
vertexLocation?: string
|
vertexLocation?: string
|
||||||
}
|
}
|
||||||
| {
|
| {
|
||||||
provider: Exclude<ProviderId, 'azure-openai' | 'vertex'>
|
provider: Exclude<ProviderId, 'azure-openai' | 'azure-anthropic' | 'vertex'>
|
||||||
model?: string
|
model?: string
|
||||||
apiKey?: string
|
apiKey?: string
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,6 +95,9 @@ export const env = createEnv({
|
|||||||
AZURE_OPENAI_ENDPOINT: z.string().url().optional(), // Shared Azure OpenAI service endpoint
|
AZURE_OPENAI_ENDPOINT: z.string().url().optional(), // Shared Azure OpenAI service endpoint
|
||||||
AZURE_OPENAI_API_VERSION: z.string().optional(), // Shared Azure OpenAI API version
|
AZURE_OPENAI_API_VERSION: z.string().optional(), // Shared Azure OpenAI API version
|
||||||
AZURE_OPENAI_API_KEY: z.string().min(1).optional(), // Shared Azure OpenAI API key
|
AZURE_OPENAI_API_KEY: z.string().min(1).optional(), // Shared Azure OpenAI API key
|
||||||
|
AZURE_ANTHROPIC_ENDPOINT: z.string().url().optional(), // Azure Anthropic service endpoint
|
||||||
|
AZURE_ANTHROPIC_API_KEY: z.string().min(1).optional(), // Azure Anthropic API key
|
||||||
|
AZURE_ANTHROPIC_API_VERSION: z.string().min(1).optional(), // Azure Anthropic API version (e.g. 2023-06-01)
|
||||||
KB_OPENAI_MODEL_NAME: z.string().optional(), // Knowledge base OpenAI model name (works with both regular OpenAI and Azure OpenAI)
|
KB_OPENAI_MODEL_NAME: z.string().optional(), // Knowledge base OpenAI model name (works with both regular OpenAI and Azure OpenAI)
|
||||||
WAND_OPENAI_MODEL_NAME: z.string().optional(), // Wand generation OpenAI model name (works with both regular OpenAI and Azure OpenAI)
|
WAND_OPENAI_MODEL_NAME: z.string().optional(), // Wand generation OpenAI model name (works with both regular OpenAI and Azure OpenAI)
|
||||||
OCR_AZURE_ENDPOINT: z.string().url().optional(), // Azure Mistral OCR service endpoint
|
OCR_AZURE_ENDPOINT: z.string().url().optional(), // Azure Mistral OCR service endpoint
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
|
import { db } from '@sim/db'
|
||||||
|
import { account } from '@sim/db/schema'
|
||||||
import { createLogger } from '@sim/logger'
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { eq } from 'drizzle-orm'
|
||||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||||
|
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||||
import { executeProviderRequest } from '@/providers'
|
import { executeProviderRequest } from '@/providers'
|
||||||
import { getApiKey, getProviderFromModel } from '@/providers/utils'
|
import { getProviderFromModel } from '@/providers/utils'
|
||||||
|
|
||||||
const logger = createLogger('HallucinationValidator')
|
const logger = createLogger('HallucinationValidator')
|
||||||
|
|
||||||
@@ -19,7 +23,18 @@ export interface HallucinationValidationInput {
|
|||||||
topK: number // Number of chunks to retrieve, default 10
|
topK: number // Number of chunks to retrieve, default 10
|
||||||
model: string
|
model: string
|
||||||
apiKey?: string
|
apiKey?: string
|
||||||
|
providerCredentials?: {
|
||||||
|
azureEndpoint?: string
|
||||||
|
azureApiVersion?: string
|
||||||
|
vertexProject?: string
|
||||||
|
vertexLocation?: string
|
||||||
|
vertexCredential?: string
|
||||||
|
bedrockAccessKeyId?: string
|
||||||
|
bedrockSecretKey?: string
|
||||||
|
bedrockRegion?: string
|
||||||
|
}
|
||||||
workflowId?: string
|
workflowId?: string
|
||||||
|
workspaceId?: string
|
||||||
requestId: string
|
requestId: string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,7 +104,9 @@ async function scoreHallucinationWithLLM(
|
|||||||
userInput: string,
|
userInput: string,
|
||||||
ragContext: string[],
|
ragContext: string[],
|
||||||
model: string,
|
model: string,
|
||||||
apiKey: string,
|
apiKey: string | undefined,
|
||||||
|
providerCredentials: HallucinationValidationInput['providerCredentials'],
|
||||||
|
workspaceId: string | undefined,
|
||||||
requestId: string
|
requestId: string
|
||||||
): Promise<{ score: number; reasoning: string }> {
|
): Promise<{ score: number; reasoning: string }> {
|
||||||
try {
|
try {
|
||||||
@@ -127,6 +144,23 @@ Evaluate the consistency and provide your score and reasoning in JSON format.`
|
|||||||
|
|
||||||
const providerId = getProviderFromModel(model)
|
const providerId = getProviderFromModel(model)
|
||||||
|
|
||||||
|
let finalApiKey: string | undefined = apiKey
|
||||||
|
if (providerId === 'vertex' && providerCredentials?.vertexCredential) {
|
||||||
|
const credential = await db.query.account.findFirst({
|
||||||
|
where: eq(account.id, providerCredentials.vertexCredential),
|
||||||
|
})
|
||||||
|
if (credential) {
|
||||||
|
const { accessToken } = await refreshTokenIfNeeded(
|
||||||
|
requestId,
|
||||||
|
credential,
|
||||||
|
providerCredentials.vertexCredential
|
||||||
|
)
|
||||||
|
if (accessToken) {
|
||||||
|
finalApiKey = accessToken
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const response = await executeProviderRequest(providerId, {
|
const response = await executeProviderRequest(providerId, {
|
||||||
model,
|
model,
|
||||||
systemPrompt,
|
systemPrompt,
|
||||||
@@ -137,7 +171,15 @@ Evaluate the consistency and provide your score and reasoning in JSON format.`
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
temperature: 0.1, // Low temperature for consistent scoring
|
temperature: 0.1, // Low temperature for consistent scoring
|
||||||
apiKey,
|
apiKey: finalApiKey,
|
||||||
|
azureEndpoint: providerCredentials?.azureEndpoint,
|
||||||
|
azureApiVersion: providerCredentials?.azureApiVersion,
|
||||||
|
vertexProject: providerCredentials?.vertexProject,
|
||||||
|
vertexLocation: providerCredentials?.vertexLocation,
|
||||||
|
bedrockAccessKeyId: providerCredentials?.bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey: providerCredentials?.bedrockSecretKey,
|
||||||
|
bedrockRegion: providerCredentials?.bedrockRegion,
|
||||||
|
workspaceId,
|
||||||
})
|
})
|
||||||
|
|
||||||
if (response instanceof ReadableStream || ('stream' in response && 'execution' in response)) {
|
if (response instanceof ReadableStream || ('stream' in response && 'execution' in response)) {
|
||||||
@@ -184,8 +226,18 @@ Evaluate the consistency and provide your score and reasoning in JSON format.`
|
|||||||
export async function validateHallucination(
|
export async function validateHallucination(
|
||||||
input: HallucinationValidationInput
|
input: HallucinationValidationInput
|
||||||
): Promise<HallucinationValidationResult> {
|
): Promise<HallucinationValidationResult> {
|
||||||
const { userInput, knowledgeBaseId, threshold, topK, model, apiKey, workflowId, requestId } =
|
const {
|
||||||
input
|
userInput,
|
||||||
|
knowledgeBaseId,
|
||||||
|
threshold,
|
||||||
|
topK,
|
||||||
|
model,
|
||||||
|
apiKey,
|
||||||
|
providerCredentials,
|
||||||
|
workflowId,
|
||||||
|
workspaceId,
|
||||||
|
requestId,
|
||||||
|
} = input
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!userInput || userInput.trim().length === 0) {
|
if (!userInput || userInput.trim().length === 0) {
|
||||||
@@ -202,17 +254,6 @@ export async function validateHallucination(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let finalApiKey: string
|
|
||||||
try {
|
|
||||||
const providerId = getProviderFromModel(model)
|
|
||||||
finalApiKey = getApiKey(providerId, model, apiKey)
|
|
||||||
} catch (error: any) {
|
|
||||||
return {
|
|
||||||
passed: false,
|
|
||||||
error: `API key error: ${error.message}`,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 1: Query knowledge base with RAG
|
// Step 1: Query knowledge base with RAG
|
||||||
const ragContext = await queryKnowledgeBase(
|
const ragContext = await queryKnowledgeBase(
|
||||||
knowledgeBaseId,
|
knowledgeBaseId,
|
||||||
@@ -234,7 +275,9 @@ export async function validateHallucination(
|
|||||||
userInput,
|
userInput,
|
||||||
ragContext,
|
ragContext,
|
||||||
model,
|
model,
|
||||||
finalApiKey,
|
apiKey,
|
||||||
|
providerCredentials,
|
||||||
|
workspaceId,
|
||||||
requestId
|
requestId
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ export interface PermissionGroupConfig {
|
|||||||
hideFilesTab: boolean
|
hideFilesTab: boolean
|
||||||
disableMcpTools: boolean
|
disableMcpTools: boolean
|
||||||
disableCustomTools: boolean
|
disableCustomTools: boolean
|
||||||
|
disableSkills: boolean
|
||||||
hideTemplates: boolean
|
hideTemplates: boolean
|
||||||
disableInvitations: boolean
|
disableInvitations: boolean
|
||||||
// Deploy Modal Tabs
|
// Deploy Modal Tabs
|
||||||
@@ -31,6 +32,7 @@ export const DEFAULT_PERMISSION_GROUP_CONFIG: PermissionGroupConfig = {
|
|||||||
hideFilesTab: false,
|
hideFilesTab: false,
|
||||||
disableMcpTools: false,
|
disableMcpTools: false,
|
||||||
disableCustomTools: false,
|
disableCustomTools: false,
|
||||||
|
disableSkills: false,
|
||||||
hideTemplates: false,
|
hideTemplates: false,
|
||||||
disableInvitations: false,
|
disableInvitations: false,
|
||||||
hideDeployApi: false,
|
hideDeployApi: false,
|
||||||
@@ -59,6 +61,7 @@ export function parsePermissionGroupConfig(config: unknown): PermissionGroupConf
|
|||||||
hideFilesTab: typeof c.hideFilesTab === 'boolean' ? c.hideFilesTab : false,
|
hideFilesTab: typeof c.hideFilesTab === 'boolean' ? c.hideFilesTab : false,
|
||||||
disableMcpTools: typeof c.disableMcpTools === 'boolean' ? c.disableMcpTools : false,
|
disableMcpTools: typeof c.disableMcpTools === 'boolean' ? c.disableMcpTools : false,
|
||||||
disableCustomTools: typeof c.disableCustomTools === 'boolean' ? c.disableCustomTools : false,
|
disableCustomTools: typeof c.disableCustomTools === 'boolean' ? c.disableCustomTools : false,
|
||||||
|
disableSkills: typeof c.disableSkills === 'boolean' ? c.disableSkills : false,
|
||||||
hideTemplates: typeof c.hideTemplates === 'boolean' ? c.hideTemplates : false,
|
hideTemplates: typeof c.hideTemplates === 'boolean' ? c.hideTemplates : false,
|
||||||
disableInvitations: typeof c.disableInvitations === 'boolean' ? c.disableInvitations : false,
|
disableInvitations: typeof c.disableInvitations === 'boolean' ? c.disableInvitations : false,
|
||||||
hideDeployApi: typeof c.hideDeployApi === 'boolean' ? c.hideDeployApi : false,
|
hideDeployApi: typeof c.hideDeployApi === 'boolean' ? c.hideDeployApi : false,
|
||||||
|
|||||||
@@ -21,6 +21,11 @@ export const TOKENIZATION_CONFIG = {
|
|||||||
confidence: 'high',
|
confidence: 'high',
|
||||||
supportedMethods: ['heuristic', 'fallback'],
|
supportedMethods: ['heuristic', 'fallback'],
|
||||||
},
|
},
|
||||||
|
'azure-anthropic': {
|
||||||
|
avgCharsPerToken: 4.5,
|
||||||
|
confidence: 'high',
|
||||||
|
supportedMethods: ['heuristic', 'fallback'],
|
||||||
|
},
|
||||||
google: {
|
google: {
|
||||||
avgCharsPerToken: 5,
|
avgCharsPerToken: 5,
|
||||||
confidence: 'medium',
|
confidence: 'medium',
|
||||||
|
|||||||
@@ -204,6 +204,7 @@ export function estimateTokenCount(text: string, providerId?: string): TokenEsti
|
|||||||
estimatedTokens = estimateOpenAITokens(text)
|
estimatedTokens = estimateOpenAITokens(text)
|
||||||
break
|
break
|
||||||
case 'anthropic':
|
case 'anthropic':
|
||||||
|
case 'azure-anthropic':
|
||||||
estimatedTokens = estimateAnthropicTokens(text)
|
estimatedTokens = estimateAnthropicTokens(text)
|
||||||
break
|
break
|
||||||
case 'google':
|
case 'google':
|
||||||
|
|||||||
100
apps/sim/lib/workflows/skills/operations.ts
Normal file
100
apps/sim/lib/workflows/skills/operations.ts
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
import { db } from '@sim/db'
|
||||||
|
import { skill } from '@sim/db/schema'
|
||||||
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { and, desc, eq, ne } from 'drizzle-orm'
|
||||||
|
import { nanoid } from 'nanoid'
|
||||||
|
import { generateRequestId } from '@/lib/core/utils/request'
|
||||||
|
|
||||||
|
const logger = createLogger('SkillsOperations')
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal function to create/update skills.
|
||||||
|
* Can be called from API routes or internal services.
|
||||||
|
*/
|
||||||
|
export async function upsertSkills(params: {
|
||||||
|
skills: Array<{
|
||||||
|
id?: string
|
||||||
|
name: string
|
||||||
|
description: string
|
||||||
|
content: string
|
||||||
|
}>
|
||||||
|
workspaceId: string
|
||||||
|
userId: string
|
||||||
|
requestId?: string
|
||||||
|
}) {
|
||||||
|
const { skills, workspaceId, userId, requestId = generateRequestId() } = params
|
||||||
|
|
||||||
|
return await db.transaction(async (tx) => {
|
||||||
|
for (const s of skills) {
|
||||||
|
const nowTime = new Date()
|
||||||
|
|
||||||
|
if (s.id) {
|
||||||
|
const existingSkill = await tx
|
||||||
|
.select()
|
||||||
|
.from(skill)
|
||||||
|
.where(and(eq(skill.id, s.id), eq(skill.workspaceId, workspaceId)))
|
||||||
|
.limit(1)
|
||||||
|
|
||||||
|
if (existingSkill.length > 0) {
|
||||||
|
if (s.name !== existingSkill[0].name) {
|
||||||
|
const nameConflict = await tx
|
||||||
|
.select({ id: skill.id })
|
||||||
|
.from(skill)
|
||||||
|
.where(
|
||||||
|
and(eq(skill.workspaceId, workspaceId), eq(skill.name, s.name), ne(skill.id, s.id))
|
||||||
|
)
|
||||||
|
.limit(1)
|
||||||
|
|
||||||
|
if (nameConflict.length > 0) {
|
||||||
|
throw new Error(`A skill with the name "${s.name}" already exists in this workspace`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await tx
|
||||||
|
.update(skill)
|
||||||
|
.set({
|
||||||
|
name: s.name,
|
||||||
|
description: s.description,
|
||||||
|
content: s.content,
|
||||||
|
updatedAt: nowTime,
|
||||||
|
})
|
||||||
|
.where(and(eq(skill.id, s.id), eq(skill.workspaceId, workspaceId)))
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] Updated skill ${s.id}`)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const duplicateName = await tx
|
||||||
|
.select()
|
||||||
|
.from(skill)
|
||||||
|
.where(and(eq(skill.workspaceId, workspaceId), eq(skill.name, s.name)))
|
||||||
|
.limit(1)
|
||||||
|
|
||||||
|
if (duplicateName.length > 0) {
|
||||||
|
throw new Error(`A skill with the name "${s.name}" already exists in this workspace`)
|
||||||
|
}
|
||||||
|
|
||||||
|
await tx.insert(skill).values({
|
||||||
|
id: nanoid(),
|
||||||
|
workspaceId,
|
||||||
|
userId,
|
||||||
|
name: s.name,
|
||||||
|
description: s.description,
|
||||||
|
content: s.content,
|
||||||
|
createdAt: nowTime,
|
||||||
|
updatedAt: nowTime,
|
||||||
|
})
|
||||||
|
|
||||||
|
logger.info(`[${requestId}] Created skill "${s.name}"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const resultSkills = await tx
|
||||||
|
.select()
|
||||||
|
.from(skill)
|
||||||
|
.where(eq(skill.workspaceId, workspaceId))
|
||||||
|
.orderBy(desc(skill.createdAt))
|
||||||
|
|
||||||
|
return resultSkills
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -113,6 +113,28 @@ function buildThinkingConfig(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The Anthropic SDK requires streaming for non-streaming requests when max_tokens exceeds
|
||||||
|
* this threshold, to avoid HTTP timeouts. When thinking is enabled and pushes max_tokens
|
||||||
|
* above this limit, we use streaming internally and collect the final message.
|
||||||
|
*/
|
||||||
|
const ANTHROPIC_SDK_NON_STREAMING_MAX_TOKENS = 21333
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an Anthropic message, automatically using streaming internally when max_tokens
|
||||||
|
* exceeds the SDK's non-streaming threshold. Returns the same Message object either way.
|
||||||
|
*/
|
||||||
|
async function createMessage(
|
||||||
|
anthropic: Anthropic,
|
||||||
|
payload: any
|
||||||
|
): Promise<Anthropic.Messages.Message> {
|
||||||
|
if (payload.max_tokens > ANTHROPIC_SDK_NON_STREAMING_MAX_TOKENS && !payload.stream) {
|
||||||
|
const stream = anthropic.messages.stream(payload)
|
||||||
|
return stream.finalMessage()
|
||||||
|
}
|
||||||
|
return anthropic.messages.create(payload) as Promise<Anthropic.Messages.Message>
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Executes a request using the Anthropic API with full tool loop support.
|
* Executes a request using the Anthropic API with full tool loop support.
|
||||||
* This is the shared core implementation used by both the standard Anthropic provider
|
* This is the shared core implementation used by both the standard Anthropic provider
|
||||||
@@ -268,13 +290,35 @@ export async function executeAnthropicProviderRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add extended thinking configuration if supported and requested
|
// Add extended thinking configuration if supported and requested
|
||||||
if (request.thinkingLevel) {
|
// The 'none' sentinel means "disable thinking" — skip configuration entirely.
|
||||||
|
if (request.thinkingLevel && request.thinkingLevel !== 'none') {
|
||||||
const thinkingConfig = buildThinkingConfig(request.model, request.thinkingLevel)
|
const thinkingConfig = buildThinkingConfig(request.model, request.thinkingLevel)
|
||||||
if (thinkingConfig) {
|
if (thinkingConfig) {
|
||||||
payload.thinking = thinkingConfig.thinking
|
payload.thinking = thinkingConfig.thinking
|
||||||
if (thinkingConfig.outputConfig) {
|
if (thinkingConfig.outputConfig) {
|
||||||
payload.output_config = thinkingConfig.outputConfig
|
payload.output_config = thinkingConfig.outputConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Per Anthropic docs: budget_tokens must be less than max_tokens.
|
||||||
|
// Ensure max_tokens leaves room for both thinking and text output.
|
||||||
|
if (
|
||||||
|
thinkingConfig.thinking.type === 'enabled' &&
|
||||||
|
'budget_tokens' in thinkingConfig.thinking
|
||||||
|
) {
|
||||||
|
const budgetTokens = thinkingConfig.thinking.budget_tokens
|
||||||
|
const minMaxTokens = budgetTokens + 4096
|
||||||
|
if (payload.max_tokens < minMaxTokens) {
|
||||||
|
const modelMax = getMaxOutputTokensForModel(request.model, true)
|
||||||
|
payload.max_tokens = Math.min(minMaxTokens, modelMax)
|
||||||
|
logger.info(
|
||||||
|
`Adjusted max_tokens to ${payload.max_tokens} to satisfy budget_tokens (${budgetTokens}) constraint`
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per Anthropic docs: thinking is not compatible with temperature or top_k modifications.
|
||||||
|
payload.temperature = undefined
|
||||||
|
|
||||||
const isAdaptive = thinkingConfig.thinking.type === 'adaptive'
|
const isAdaptive = thinkingConfig.thinking.type === 'adaptive'
|
||||||
logger.info(
|
logger.info(
|
||||||
`Using ${isAdaptive ? 'adaptive' : 'extended'} thinking for model: ${modelId} with ${isAdaptive ? `effort: ${request.thinkingLevel}` : `budget: ${(thinkingConfig.thinking as { budget_tokens: number }).budget_tokens}`}`
|
`Using ${isAdaptive ? 'adaptive' : 'extended'} thinking for model: ${modelId} with ${isAdaptive ? `effort: ${request.thinkingLevel}` : `budget: ${(thinkingConfig.thinking as { budget_tokens: number }).budget_tokens}`}`
|
||||||
@@ -288,7 +332,16 @@ export async function executeAnthropicProviderRequest(
|
|||||||
|
|
||||||
if (anthropicTools?.length) {
|
if (anthropicTools?.length) {
|
||||||
payload.tools = anthropicTools
|
payload.tools = anthropicTools
|
||||||
if (toolChoice !== 'auto') {
|
// Per Anthropic docs: forced tool_choice (type: "tool" or "any") is incompatible with
|
||||||
|
// thinking. Only auto and none are supported when thinking is enabled.
|
||||||
|
if (payload.thinking) {
|
||||||
|
// Per Anthropic docs: only 'auto' (default) and 'none' work with thinking.
|
||||||
|
if (toolChoice === 'none') {
|
||||||
|
payload.tool_choice = { type: 'none' }
|
||||||
|
}
|
||||||
|
} else if (toolChoice === 'none') {
|
||||||
|
payload.tool_choice = { type: 'none' }
|
||||||
|
} else if (toolChoice !== 'auto') {
|
||||||
payload.tool_choice = toolChoice
|
payload.tool_choice = toolChoice
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,12 +439,16 @@ export async function executeAnthropicProviderRequest(
|
|||||||
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
||||||
|
|
||||||
// Cap intermediate calls at non-streaming limit to avoid SDK timeout errors,
|
// Cap intermediate calls at non-streaming limit to avoid SDK timeout errors,
|
||||||
// but allow users to set lower values if desired
|
// but allow users to set lower values if desired. Use Math.max to preserve
|
||||||
|
// thinking-adjusted max_tokens from payload when it's higher.
|
||||||
const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false)
|
const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false)
|
||||||
const nonStreamingMaxTokens = request.maxTokens
|
const nonStreamingMaxTokens = request.maxTokens
|
||||||
? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit)
|
? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit)
|
||||||
: nonStreamingLimit
|
: nonStreamingLimit
|
||||||
const intermediatePayload = { ...payload, max_tokens: nonStreamingMaxTokens }
|
const intermediatePayload = {
|
||||||
|
...payload,
|
||||||
|
max_tokens: Math.max(nonStreamingMaxTokens, payload.max_tokens),
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const initialCallTime = Date.now()
|
const initialCallTime = Date.now()
|
||||||
@@ -399,7 +456,7 @@ export async function executeAnthropicProviderRequest(
|
|||||||
const forcedTools = preparedTools?.forcedTools || []
|
const forcedTools = preparedTools?.forcedTools || []
|
||||||
let usedForcedTools: string[] = []
|
let usedForcedTools: string[] = []
|
||||||
|
|
||||||
let currentResponse = await anthropic.messages.create(intermediatePayload)
|
let currentResponse = await createMessage(anthropic, intermediatePayload)
|
||||||
const firstResponseTime = Date.now() - initialCallTime
|
const firstResponseTime = Date.now() - initialCallTime
|
||||||
|
|
||||||
let content = ''
|
let content = ''
|
||||||
@@ -583,11 +640,20 @@ export async function executeAnthropicProviderRequest(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add ONE assistant message with ALL tool_use blocks
|
// Per Anthropic docs: thinking blocks must be preserved in assistant messages
|
||||||
|
// during tool use to maintain reasoning continuity.
|
||||||
|
const thinkingBlocks = currentResponse.content.filter(
|
||||||
|
(item) => item.type === 'thinking' || item.type === 'redacted_thinking'
|
||||||
|
)
|
||||||
|
|
||||||
|
// Add ONE assistant message with thinking + tool_use blocks
|
||||||
if (toolUseBlocks.length > 0) {
|
if (toolUseBlocks.length > 0) {
|
||||||
currentMessages.push({
|
currentMessages.push({
|
||||||
role: 'assistant',
|
role: 'assistant',
|
||||||
content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[],
|
content: [
|
||||||
|
...thinkingBlocks,
|
||||||
|
...toolUseBlocks,
|
||||||
|
] as unknown as Anthropic.Messages.ContentBlock[],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -607,7 +673,11 @@ export async function executeAnthropicProviderRequest(
|
|||||||
messages: currentMessages,
|
messages: currentMessages,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Per Anthropic docs: forced tool_choice is incompatible with thinking.
|
||||||
|
// Only auto and none are supported when thinking is enabled.
|
||||||
|
const thinkingEnabled = !!payload.thinking
|
||||||
if (
|
if (
|
||||||
|
!thinkingEnabled &&
|
||||||
typeof originalToolChoice === 'object' &&
|
typeof originalToolChoice === 'object' &&
|
||||||
hasUsedForcedTool &&
|
hasUsedForcedTool &&
|
||||||
forcedTools.length > 0
|
forcedTools.length > 0
|
||||||
@@ -624,7 +694,11 @@ export async function executeAnthropicProviderRequest(
|
|||||||
nextPayload.tool_choice = undefined
|
nextPayload.tool_choice = undefined
|
||||||
logger.info('All forced tools have been used, removing tool_choice parameter')
|
logger.info('All forced tools have been used, removing tool_choice parameter')
|
||||||
}
|
}
|
||||||
} else if (hasUsedForcedTool && typeof originalToolChoice === 'object') {
|
} else if (
|
||||||
|
!thinkingEnabled &&
|
||||||
|
hasUsedForcedTool &&
|
||||||
|
typeof originalToolChoice === 'object'
|
||||||
|
) {
|
||||||
nextPayload.tool_choice = undefined
|
nextPayload.tool_choice = undefined
|
||||||
logger.info(
|
logger.info(
|
||||||
'Removing tool_choice parameter for subsequent requests after forced tool was used'
|
'Removing tool_choice parameter for subsequent requests after forced tool was used'
|
||||||
@@ -633,7 +707,7 @@ export async function executeAnthropicProviderRequest(
|
|||||||
|
|
||||||
const nextModelStartTime = Date.now()
|
const nextModelStartTime = Date.now()
|
||||||
|
|
||||||
currentResponse = await anthropic.messages.create(nextPayload)
|
currentResponse = await createMessage(anthropic, nextPayload)
|
||||||
|
|
||||||
const nextCheckResult = checkForForcedToolUsage(
|
const nextCheckResult = checkForForcedToolUsage(
|
||||||
currentResponse,
|
currentResponse,
|
||||||
@@ -779,12 +853,16 @@ export async function executeAnthropicProviderRequest(
|
|||||||
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
const providerStartTimeISO = new Date(providerStartTime).toISOString()
|
||||||
|
|
||||||
// Cap intermediate calls at non-streaming limit to avoid SDK timeout errors,
|
// Cap intermediate calls at non-streaming limit to avoid SDK timeout errors,
|
||||||
// but allow users to set lower values if desired
|
// but allow users to set lower values if desired. Use Math.max to preserve
|
||||||
|
// thinking-adjusted max_tokens from payload when it's higher.
|
||||||
const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false)
|
const nonStreamingLimit = getMaxOutputTokensForModel(request.model, false)
|
||||||
const toolLoopMaxTokens = request.maxTokens
|
const toolLoopMaxTokens = request.maxTokens
|
||||||
? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit)
|
? Math.min(Number.parseInt(String(request.maxTokens)), nonStreamingLimit)
|
||||||
: nonStreamingLimit
|
: nonStreamingLimit
|
||||||
const toolLoopPayload = { ...payload, max_tokens: toolLoopMaxTokens }
|
const toolLoopPayload = {
|
||||||
|
...payload,
|
||||||
|
max_tokens: Math.max(toolLoopMaxTokens, payload.max_tokens),
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const initialCallTime = Date.now()
|
const initialCallTime = Date.now()
|
||||||
@@ -792,7 +870,7 @@ export async function executeAnthropicProviderRequest(
|
|||||||
const forcedTools = preparedTools?.forcedTools || []
|
const forcedTools = preparedTools?.forcedTools || []
|
||||||
let usedForcedTools: string[] = []
|
let usedForcedTools: string[] = []
|
||||||
|
|
||||||
let currentResponse = await anthropic.messages.create(toolLoopPayload)
|
let currentResponse = await createMessage(anthropic, toolLoopPayload)
|
||||||
const firstResponseTime = Date.now() - initialCallTime
|
const firstResponseTime = Date.now() - initialCallTime
|
||||||
|
|
||||||
let content = ''
|
let content = ''
|
||||||
@@ -989,11 +1067,20 @@ export async function executeAnthropicProviderRequest(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add ONE assistant message with ALL tool_use blocks
|
// Per Anthropic docs: thinking blocks must be preserved in assistant messages
|
||||||
|
// during tool use to maintain reasoning continuity.
|
||||||
|
const thinkingBlocks = currentResponse.content.filter(
|
||||||
|
(item) => item.type === 'thinking' || item.type === 'redacted_thinking'
|
||||||
|
)
|
||||||
|
|
||||||
|
// Add ONE assistant message with thinking + tool_use blocks
|
||||||
if (toolUseBlocks.length > 0) {
|
if (toolUseBlocks.length > 0) {
|
||||||
currentMessages.push({
|
currentMessages.push({
|
||||||
role: 'assistant',
|
role: 'assistant',
|
||||||
content: toolUseBlocks as unknown as Anthropic.Messages.ContentBlock[],
|
content: [
|
||||||
|
...thinkingBlocks,
|
||||||
|
...toolUseBlocks,
|
||||||
|
] as unknown as Anthropic.Messages.ContentBlock[],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1013,7 +1100,15 @@ export async function executeAnthropicProviderRequest(
|
|||||||
messages: currentMessages,
|
messages: currentMessages,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (typeof originalToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) {
|
// Per Anthropic docs: forced tool_choice is incompatible with thinking.
|
||||||
|
// Only auto and none are supported when thinking is enabled.
|
||||||
|
const thinkingEnabled = !!payload.thinking
|
||||||
|
if (
|
||||||
|
!thinkingEnabled &&
|
||||||
|
typeof originalToolChoice === 'object' &&
|
||||||
|
hasUsedForcedTool &&
|
||||||
|
forcedTools.length > 0
|
||||||
|
) {
|
||||||
const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool))
|
const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool))
|
||||||
|
|
||||||
if (remainingTools.length > 0) {
|
if (remainingTools.length > 0) {
|
||||||
@@ -1026,7 +1121,11 @@ export async function executeAnthropicProviderRequest(
|
|||||||
nextPayload.tool_choice = undefined
|
nextPayload.tool_choice = undefined
|
||||||
logger.info('All forced tools have been used, removing tool_choice parameter')
|
logger.info('All forced tools have been used, removing tool_choice parameter')
|
||||||
}
|
}
|
||||||
} else if (hasUsedForcedTool && typeof originalToolChoice === 'object') {
|
} else if (
|
||||||
|
!thinkingEnabled &&
|
||||||
|
hasUsedForcedTool &&
|
||||||
|
typeof originalToolChoice === 'object'
|
||||||
|
) {
|
||||||
nextPayload.tool_choice = undefined
|
nextPayload.tool_choice = undefined
|
||||||
logger.info(
|
logger.info(
|
||||||
'Removing tool_choice parameter for subsequent requests after forced tool was used'
|
'Removing tool_choice parameter for subsequent requests after forced tool was used'
|
||||||
@@ -1035,7 +1134,7 @@ export async function executeAnthropicProviderRequest(
|
|||||||
|
|
||||||
const nextModelStartTime = Date.now()
|
const nextModelStartTime = Date.now()
|
||||||
|
|
||||||
currentResponse = await anthropic.messages.create(nextPayload)
|
currentResponse = await createMessage(anthropic, nextPayload)
|
||||||
|
|
||||||
const nextCheckResult = checkForForcedToolUsage(
|
const nextCheckResult = checkForForcedToolUsage(
|
||||||
currentResponse,
|
currentResponse,
|
||||||
|
|||||||
@@ -35,6 +35,8 @@ export const azureAnthropicProvider: ProviderConfig = {
|
|||||||
// The SDK appends /v1/messages automatically
|
// The SDK appends /v1/messages automatically
|
||||||
const baseURL = `${request.azureEndpoint.replace(/\/$/, '')}/anthropic`
|
const baseURL = `${request.azureEndpoint.replace(/\/$/, '')}/anthropic`
|
||||||
|
|
||||||
|
const anthropicVersion = request.azureApiVersion || '2023-06-01'
|
||||||
|
|
||||||
return executeAnthropicProviderRequest(
|
return executeAnthropicProviderRequest(
|
||||||
{
|
{
|
||||||
...request,
|
...request,
|
||||||
@@ -49,7 +51,7 @@ export const azureAnthropicProvider: ProviderConfig = {
|
|||||||
apiKey,
|
apiKey,
|
||||||
defaultHeaders: {
|
defaultHeaders: {
|
||||||
'api-key': apiKey,
|
'api-key': apiKey,
|
||||||
'anthropic-version': '2023-06-01',
|
'anthropic-version': anthropicVersion,
|
||||||
...(useNativeStructuredOutputs
|
...(useNativeStructuredOutputs
|
||||||
? { 'anthropic-beta': 'structured-outputs-2025-11-13' }
|
? { 'anthropic-beta': 'structured-outputs-2025-11-13' }
|
||||||
: {}),
|
: {}),
|
||||||
|
|||||||
@@ -98,8 +98,10 @@ async function executeChatCompletionsRequest(
|
|||||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||||
|
|
||||||
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
|
if (request.reasoningEffort !== undefined && request.reasoningEffort !== 'auto')
|
||||||
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
|
payload.reasoning_effort = request.reasoningEffort
|
||||||
|
if (request.verbosity !== undefined && request.verbosity !== 'auto')
|
||||||
|
payload.verbosity = request.verbosity
|
||||||
|
|
||||||
if (request.responseFormat) {
|
if (request.responseFormat) {
|
||||||
payload.response_format = {
|
payload.response_format = {
|
||||||
|
|||||||
@@ -197,6 +197,9 @@ export const bedrockProvider: ProviderConfig = {
|
|||||||
} else if (tc.type === 'function' && tc.function?.name) {
|
} else if (tc.type === 'function' && tc.function?.name) {
|
||||||
toolChoice = { tool: { name: tc.function.name } }
|
toolChoice = { tool: { name: tc.function.name } }
|
||||||
logger.info(`Using Bedrock tool_choice format: force tool "${tc.function.name}"`)
|
logger.info(`Using Bedrock tool_choice format: force tool "${tc.function.name}"`)
|
||||||
|
} else if (tc.type === 'any') {
|
||||||
|
toolChoice = { any: {} }
|
||||||
|
logger.info('Using Bedrock tool_choice format: any tool')
|
||||||
} else {
|
} else {
|
||||||
toolChoice = { auto: {} }
|
toolChoice = { auto: {} }
|
||||||
}
|
}
|
||||||
@@ -860,6 +863,11 @@ export const bedrockProvider: ProviderConfig = {
|
|||||||
content,
|
content,
|
||||||
model: request.model,
|
model: request.model,
|
||||||
tokens,
|
tokens,
|
||||||
|
cost: {
|
||||||
|
input: cost.input,
|
||||||
|
output: cost.output,
|
||||||
|
total: cost.total,
|
||||||
|
},
|
||||||
toolCalls:
|
toolCalls:
|
||||||
toolCalls.length > 0
|
toolCalls.length > 0
|
||||||
? toolCalls.map((tc) => ({
|
? toolCalls.map((tc) => ({
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import {
|
|||||||
extractTextContent,
|
extractTextContent,
|
||||||
mapToThinkingLevel,
|
mapToThinkingLevel,
|
||||||
} from '@/providers/google/utils'
|
} from '@/providers/google/utils'
|
||||||
import { getThinkingCapability } from '@/providers/models'
|
|
||||||
import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types'
|
import type { FunctionCallResponse, ProviderRequest, ProviderResponse } from '@/providers/types'
|
||||||
import {
|
import {
|
||||||
calculateCost,
|
calculateCost,
|
||||||
@@ -432,13 +431,11 @@ export async function executeGeminiRequest(
|
|||||||
logger.warn('Gemini does not support responseFormat with tools. Structured output ignored.')
|
logger.warn('Gemini does not support responseFormat with tools. Structured output ignored.')
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure thinking for models that support it
|
// Configure thinking only when the user explicitly selects a thinking level
|
||||||
const thinkingCapability = getThinkingCapability(model)
|
if (request.thinkingLevel && request.thinkingLevel !== 'none') {
|
||||||
if (thinkingCapability) {
|
|
||||||
const level = request.thinkingLevel ?? thinkingCapability.default ?? 'high'
|
|
||||||
const thinkingConfig: ThinkingConfig = {
|
const thinkingConfig: ThinkingConfig = {
|
||||||
includeThoughts: false,
|
includeThoughts: false,
|
||||||
thinkingLevel: mapToThinkingLevel(level),
|
thinkingLevel: mapToThinkingLevel(request.thinkingLevel),
|
||||||
}
|
}
|
||||||
geminiConfig.thinkingConfig = thinkingConfig
|
geminiConfig.thinkingConfig = thinkingConfig
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,9 +40,9 @@ export interface ModelCapabilities {
|
|||||||
* This only applies to direct Anthropic API calls, not Bedrock (which uses AWS SDK).
|
* This only applies to direct Anthropic API calls, not Bedrock (which uses AWS SDK).
|
||||||
*/
|
*/
|
||||||
maxOutputTokens?: {
|
maxOutputTokens?: {
|
||||||
/** Maximum tokens for streaming requests */
|
/** Maximum supported output tokens (used for streaming requests) */
|
||||||
max: number
|
max: number
|
||||||
/** Safe default for non-streaming requests (to avoid Anthropic SDK timeout errors) */
|
/** Conservative default when user doesn't specify maxTokens (controls cost/latency) */
|
||||||
default: number
|
default: number
|
||||||
}
|
}
|
||||||
reasoningEffort?: {
|
reasoningEffort?: {
|
||||||
@@ -109,7 +109,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
name: 'OpenAI',
|
name: 'OpenAI',
|
||||||
description: "OpenAI's models",
|
description: "OpenAI's models",
|
||||||
defaultModel: 'gpt-4o',
|
defaultModel: 'gpt-4o',
|
||||||
modelPatterns: [/^gpt/, /^o1/, /^text-embedding/],
|
modelPatterns: [/^gpt/, /^o\d/, /^text-embedding/],
|
||||||
icon: OpenAIIcon,
|
icon: OpenAIIcon,
|
||||||
capabilities: {
|
capabilities: {
|
||||||
toolUsageControl: true,
|
toolUsageControl: true,
|
||||||
@@ -138,7 +138,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
},
|
},
|
||||||
capabilities: {
|
capabilities: {
|
||||||
reasoningEffort: {
|
reasoningEffort: {
|
||||||
values: ['none', 'minimal', 'low', 'medium', 'high', 'xhigh'],
|
values: ['none', 'low', 'medium', 'high', 'xhigh'],
|
||||||
},
|
},
|
||||||
verbosity: {
|
verbosity: {
|
||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
@@ -164,60 +164,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
},
|
},
|
||||||
contextWindow: 400000,
|
contextWindow: 400000,
|
||||||
},
|
},
|
||||||
// {
|
|
||||||
// id: 'gpt-5.1-mini',
|
|
||||||
// pricing: {
|
|
||||||
// input: 0.25,
|
|
||||||
// cachedInput: 0.025,
|
|
||||||
// output: 2.0,
|
|
||||||
// updatedAt: '2025-11-14',
|
|
||||||
// },
|
|
||||||
// capabilities: {
|
|
||||||
// reasoningEffort: {
|
|
||||||
// values: ['none', 'low', 'medium', 'high'],
|
|
||||||
// },
|
|
||||||
// verbosity: {
|
|
||||||
// values: ['low', 'medium', 'high'],
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// contextWindow: 400000,
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// id: 'gpt-5.1-nano',
|
|
||||||
// pricing: {
|
|
||||||
// input: 0.05,
|
|
||||||
// cachedInput: 0.005,
|
|
||||||
// output: 0.4,
|
|
||||||
// updatedAt: '2025-11-14',
|
|
||||||
// },
|
|
||||||
// capabilities: {
|
|
||||||
// reasoningEffort: {
|
|
||||||
// values: ['none', 'low', 'medium', 'high'],
|
|
||||||
// },
|
|
||||||
// verbosity: {
|
|
||||||
// values: ['low', 'medium', 'high'],
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// contextWindow: 400000,
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// id: 'gpt-5.1-codex',
|
|
||||||
// pricing: {
|
|
||||||
// input: 1.25,
|
|
||||||
// cachedInput: 0.125,
|
|
||||||
// output: 10.0,
|
|
||||||
// updatedAt: '2025-11-14',
|
|
||||||
// },
|
|
||||||
// capabilities: {
|
|
||||||
// reasoningEffort: {
|
|
||||||
// values: ['none', 'medium', 'high'],
|
|
||||||
// },
|
|
||||||
// verbosity: {
|
|
||||||
// values: ['low', 'medium', 'high'],
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// contextWindow: 400000,
|
|
||||||
// },
|
|
||||||
{
|
{
|
||||||
id: 'gpt-5',
|
id: 'gpt-5',
|
||||||
pricing: {
|
pricing: {
|
||||||
@@ -280,8 +226,10 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
output: 10.0,
|
output: 10.0,
|
||||||
updatedAt: '2025-08-07',
|
updatedAt: '2025-08-07',
|
||||||
},
|
},
|
||||||
capabilities: {},
|
capabilities: {
|
||||||
contextWindow: 400000,
|
temperature: { min: 0, max: 2 },
|
||||||
|
},
|
||||||
|
contextWindow: 128000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'o1',
|
id: 'o1',
|
||||||
@@ -311,7 +259,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 128000,
|
contextWindow: 200000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'o4-mini',
|
id: 'o4-mini',
|
||||||
@@ -326,7 +274,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 128000,
|
contextWindow: 200000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'gpt-4.1',
|
id: 'gpt-4.1',
|
||||||
@@ -413,7 +361,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -429,10 +377,10 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
capabilities: {
|
capabilities: {
|
||||||
temperature: { min: 0, max: 1 },
|
temperature: { min: 0, max: 1 },
|
||||||
nativeStructuredOutputs: true,
|
nativeStructuredOutputs: true,
|
||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 32000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -447,10 +395,10 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
},
|
},
|
||||||
capabilities: {
|
capabilities: {
|
||||||
temperature: { min: 0, max: 1 },
|
temperature: { min: 0, max: 1 },
|
||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 32000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -469,7 +417,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -487,7 +435,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -506,7 +454,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -515,7 +463,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
id: 'claude-3-haiku-20240307',
|
id: 'claude-3-haiku-20240307',
|
||||||
pricing: {
|
pricing: {
|
||||||
input: 0.25,
|
input: 0.25,
|
||||||
cachedInput: 0.025,
|
cachedInput: 0.03,
|
||||||
output: 1.25,
|
output: 1.25,
|
||||||
updatedAt: '2026-02-05',
|
updatedAt: '2026-02-05',
|
||||||
},
|
},
|
||||||
@@ -536,10 +484,10 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
capabilities: {
|
capabilities: {
|
||||||
temperature: { min: 0, max: 1 },
|
temperature: { min: 0, max: 1 },
|
||||||
computerUse: true,
|
computerUse: true,
|
||||||
maxOutputTokens: { max: 8192, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -580,7 +528,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
},
|
},
|
||||||
capabilities: {
|
capabilities: {
|
||||||
reasoningEffort: {
|
reasoningEffort: {
|
||||||
values: ['none', 'minimal', 'low', 'medium', 'high', 'xhigh'],
|
values: ['none', 'low', 'medium', 'high', 'xhigh'],
|
||||||
},
|
},
|
||||||
verbosity: {
|
verbosity: {
|
||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
@@ -606,42 +554,6 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
},
|
},
|
||||||
contextWindow: 400000,
|
contextWindow: 400000,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: 'azure/gpt-5.1-mini',
|
|
||||||
pricing: {
|
|
||||||
input: 0.25,
|
|
||||||
cachedInput: 0.025,
|
|
||||||
output: 2.0,
|
|
||||||
updatedAt: '2025-11-14',
|
|
||||||
},
|
|
||||||
capabilities: {
|
|
||||||
reasoningEffort: {
|
|
||||||
values: ['none', 'low', 'medium', 'high'],
|
|
||||||
},
|
|
||||||
verbosity: {
|
|
||||||
values: ['low', 'medium', 'high'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
contextWindow: 400000,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: 'azure/gpt-5.1-nano',
|
|
||||||
pricing: {
|
|
||||||
input: 0.05,
|
|
||||||
cachedInput: 0.005,
|
|
||||||
output: 0.4,
|
|
||||||
updatedAt: '2025-11-14',
|
|
||||||
},
|
|
||||||
capabilities: {
|
|
||||||
reasoningEffort: {
|
|
||||||
values: ['none', 'low', 'medium', 'high'],
|
|
||||||
},
|
|
||||||
verbosity: {
|
|
||||||
values: ['low', 'medium', 'high'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
contextWindow: 400000,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
id: 'azure/gpt-5.1-codex',
|
id: 'azure/gpt-5.1-codex',
|
||||||
pricing: {
|
pricing: {
|
||||||
@@ -652,7 +564,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
},
|
},
|
||||||
capabilities: {
|
capabilities: {
|
||||||
reasoningEffort: {
|
reasoningEffort: {
|
||||||
values: ['none', 'medium', 'high'],
|
values: ['none', 'low', 'medium', 'high'],
|
||||||
},
|
},
|
||||||
verbosity: {
|
verbosity: {
|
||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
@@ -722,23 +634,25 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
output: 10.0,
|
output: 10.0,
|
||||||
updatedAt: '2025-08-07',
|
updatedAt: '2025-08-07',
|
||||||
},
|
},
|
||||||
capabilities: {},
|
capabilities: {
|
||||||
contextWindow: 400000,
|
temperature: { min: 0, max: 2 },
|
||||||
|
},
|
||||||
|
contextWindow: 128000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'azure/o3',
|
id: 'azure/o3',
|
||||||
pricing: {
|
pricing: {
|
||||||
input: 10,
|
input: 2,
|
||||||
cachedInput: 2.5,
|
cachedInput: 0.5,
|
||||||
output: 40,
|
output: 8,
|
||||||
updatedAt: '2025-06-15',
|
updatedAt: '2026-02-06',
|
||||||
},
|
},
|
||||||
capabilities: {
|
capabilities: {
|
||||||
reasoningEffort: {
|
reasoningEffort: {
|
||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 128000,
|
contextWindow: 200000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'azure/o4-mini',
|
id: 'azure/o4-mini',
|
||||||
@@ -753,7 +667,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
values: ['low', 'medium', 'high'],
|
values: ['low', 'medium', 'high'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 128000,
|
contextWindow: 200000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: 'azure/gpt-4.1',
|
id: 'azure/gpt-4.1',
|
||||||
@@ -763,7 +677,35 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
output: 8.0,
|
output: 8.0,
|
||||||
updatedAt: '2025-06-15',
|
updatedAt: '2025-06-15',
|
||||||
},
|
},
|
||||||
capabilities: {},
|
capabilities: {
|
||||||
|
temperature: { min: 0, max: 2 },
|
||||||
|
},
|
||||||
|
contextWindow: 1000000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'azure/gpt-4.1-mini',
|
||||||
|
pricing: {
|
||||||
|
input: 0.4,
|
||||||
|
cachedInput: 0.1,
|
||||||
|
output: 1.6,
|
||||||
|
updatedAt: '2025-06-15',
|
||||||
|
},
|
||||||
|
capabilities: {
|
||||||
|
temperature: { min: 0, max: 2 },
|
||||||
|
},
|
||||||
|
contextWindow: 1000000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'azure/gpt-4.1-nano',
|
||||||
|
pricing: {
|
||||||
|
input: 0.1,
|
||||||
|
cachedInput: 0.025,
|
||||||
|
output: 0.4,
|
||||||
|
updatedAt: '2025-06-15',
|
||||||
|
},
|
||||||
|
capabilities: {
|
||||||
|
temperature: { min: 0, max: 2 },
|
||||||
|
},
|
||||||
contextWindow: 1000000,
|
contextWindow: 1000000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -775,7 +717,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
updatedAt: '2025-06-15',
|
updatedAt: '2025-06-15',
|
||||||
},
|
},
|
||||||
capabilities: {},
|
capabilities: {},
|
||||||
contextWindow: 1000000,
|
contextWindow: 200000,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@@ -823,7 +765,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -842,7 +784,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -858,10 +800,10 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
capabilities: {
|
capabilities: {
|
||||||
temperature: { min: 0, max: 1 },
|
temperature: { min: 0, max: 1 },
|
||||||
nativeStructuredOutputs: true,
|
nativeStructuredOutputs: true,
|
||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 32000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
@@ -880,7 +822,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
|||||||
maxOutputTokens: { max: 64000, default: 8192 },
|
maxOutputTokens: { max: 64000, default: 8192 },
|
||||||
thinking: {
|
thinking: {
|
||||||
levels: ['low', 'medium', 'high'],
|
levels: ['low', 'medium', 'high'],
|
||||||
default: 'medium',
|
default: 'high',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
contextWindow: 200000,
|
contextWindow: 200000,
|
||||||
|
|||||||
@@ -130,14 +130,14 @@ export async function executeResponsesProviderRequest(
|
|||||||
if (request.temperature !== undefined) basePayload.temperature = request.temperature
|
if (request.temperature !== undefined) basePayload.temperature = request.temperature
|
||||||
if (request.maxTokens != null) basePayload.max_output_tokens = request.maxTokens
|
if (request.maxTokens != null) basePayload.max_output_tokens = request.maxTokens
|
||||||
|
|
||||||
if (request.reasoningEffort !== undefined) {
|
if (request.reasoningEffort !== undefined && request.reasoningEffort !== 'auto') {
|
||||||
basePayload.reasoning = {
|
basePayload.reasoning = {
|
||||||
effort: request.reasoningEffort,
|
effort: request.reasoningEffort,
|
||||||
summary: 'auto',
|
summary: 'auto',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.verbosity !== undefined) {
|
if (request.verbosity !== undefined && request.verbosity !== 'auto') {
|
||||||
basePayload.text = {
|
basePayload.text = {
|
||||||
...(basePayload.text ?? {}),
|
...(basePayload.text ?? {}),
|
||||||
verbosity: request.verbosity,
|
verbosity: request.verbosity,
|
||||||
@@ -627,13 +627,13 @@ export async function executeResponsesProviderRequest(
|
|||||||
// Copy over non-tool related settings
|
// Copy over non-tool related settings
|
||||||
if (request.temperature !== undefined) finalPayload.temperature = request.temperature
|
if (request.temperature !== undefined) finalPayload.temperature = request.temperature
|
||||||
if (request.maxTokens != null) finalPayload.max_output_tokens = request.maxTokens
|
if (request.maxTokens != null) finalPayload.max_output_tokens = request.maxTokens
|
||||||
if (request.reasoningEffort !== undefined) {
|
if (request.reasoningEffort !== undefined && request.reasoningEffort !== 'auto') {
|
||||||
finalPayload.reasoning = {
|
finalPayload.reasoning = {
|
||||||
effort: request.reasoningEffort,
|
effort: request.reasoningEffort,
|
||||||
summary: 'auto',
|
summary: 'auto',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (request.verbosity !== undefined) {
|
if (request.verbosity !== undefined && request.verbosity !== 'auto') {
|
||||||
finalPayload.text = {
|
finalPayload.text = {
|
||||||
...finalPayload.text,
|
...finalPayload.text,
|
||||||
verbosity: request.verbosity,
|
verbosity: request.verbosity,
|
||||||
|
|||||||
@@ -12,16 +12,22 @@ import {
|
|||||||
getApiKey,
|
getApiKey,
|
||||||
getBaseModelProviders,
|
getBaseModelProviders,
|
||||||
getHostedModels,
|
getHostedModels,
|
||||||
|
getMaxOutputTokensForModel,
|
||||||
getMaxTemperature,
|
getMaxTemperature,
|
||||||
|
getModelPricing,
|
||||||
getProvider,
|
getProvider,
|
||||||
getProviderConfigFromModel,
|
getProviderConfigFromModel,
|
||||||
getProviderFromModel,
|
getProviderFromModel,
|
||||||
getProviderModels,
|
getProviderModels,
|
||||||
|
getReasoningEffortValuesForModel,
|
||||||
|
getThinkingLevelsForModel,
|
||||||
|
getVerbosityValuesForModel,
|
||||||
isProviderBlacklisted,
|
isProviderBlacklisted,
|
||||||
MODELS_TEMP_RANGE_0_1,
|
MODELS_TEMP_RANGE_0_1,
|
||||||
MODELS_TEMP_RANGE_0_2,
|
MODELS_TEMP_RANGE_0_2,
|
||||||
MODELS_WITH_REASONING_EFFORT,
|
MODELS_WITH_REASONING_EFFORT,
|
||||||
MODELS_WITH_TEMPERATURE_SUPPORT,
|
MODELS_WITH_TEMPERATURE_SUPPORT,
|
||||||
|
MODELS_WITH_THINKING,
|
||||||
MODELS_WITH_VERBOSITY,
|
MODELS_WITH_VERBOSITY,
|
||||||
PROVIDERS_WITH_TOOL_USAGE_CONTROL,
|
PROVIDERS_WITH_TOOL_USAGE_CONTROL,
|
||||||
prepareToolExecution,
|
prepareToolExecution,
|
||||||
@@ -169,6 +175,8 @@ describe('Model Capabilities', () => {
|
|||||||
'gpt-4.1',
|
'gpt-4.1',
|
||||||
'gpt-4.1-mini',
|
'gpt-4.1-mini',
|
||||||
'gpt-4.1-nano',
|
'gpt-4.1-nano',
|
||||||
|
'gpt-5-chat-latest',
|
||||||
|
'azure/gpt-5-chat-latest',
|
||||||
'gemini-2.5-flash',
|
'gemini-2.5-flash',
|
||||||
'claude-sonnet-4-0',
|
'claude-sonnet-4-0',
|
||||||
'claude-opus-4-0',
|
'claude-opus-4-0',
|
||||||
@@ -186,34 +194,27 @@ describe('Model Capabilities', () => {
|
|||||||
it.concurrent('should return false for models that do not support temperature', () => {
|
it.concurrent('should return false for models that do not support temperature', () => {
|
||||||
const unsupportedModels = [
|
const unsupportedModels = [
|
||||||
'unsupported-model',
|
'unsupported-model',
|
||||||
'cerebras/llama-3.3-70b', // Cerebras models don't have temperature defined
|
'cerebras/llama-3.3-70b',
|
||||||
'groq/meta-llama/llama-4-scout-17b-16e-instruct', // Groq models don't have temperature defined
|
'groq/meta-llama/llama-4-scout-17b-16e-instruct',
|
||||||
// Reasoning models that don't support temperature
|
|
||||||
'o1',
|
'o1',
|
||||||
'o3',
|
'o3',
|
||||||
'o4-mini',
|
'o4-mini',
|
||||||
'azure/o3',
|
'azure/o3',
|
||||||
'azure/o4-mini',
|
'azure/o4-mini',
|
||||||
'deepseek-r1',
|
'deepseek-r1',
|
||||||
// Chat models that don't support temperature
|
|
||||||
'deepseek-chat',
|
'deepseek-chat',
|
||||||
'azure/gpt-4.1',
|
|
||||||
'azure/model-router',
|
'azure/model-router',
|
||||||
// GPT-5.1 models don't support temperature (removed in our implementation)
|
|
||||||
'gpt-5.1',
|
'gpt-5.1',
|
||||||
'azure/gpt-5.1',
|
'azure/gpt-5.1',
|
||||||
'azure/gpt-5.1-mini',
|
'azure/gpt-5.1-mini',
|
||||||
'azure/gpt-5.1-nano',
|
'azure/gpt-5.1-nano',
|
||||||
'azure/gpt-5.1-codex',
|
'azure/gpt-5.1-codex',
|
||||||
// GPT-5 models don't support temperature (removed in our implementation)
|
|
||||||
'gpt-5',
|
'gpt-5',
|
||||||
'gpt-5-mini',
|
'gpt-5-mini',
|
||||||
'gpt-5-nano',
|
'gpt-5-nano',
|
||||||
'gpt-5-chat-latest',
|
|
||||||
'azure/gpt-5',
|
'azure/gpt-5',
|
||||||
'azure/gpt-5-mini',
|
'azure/gpt-5-mini',
|
||||||
'azure/gpt-5-nano',
|
'azure/gpt-5-nano',
|
||||||
'azure/gpt-5-chat-latest',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
for (const model of unsupportedModels) {
|
for (const model of unsupportedModels) {
|
||||||
@@ -240,6 +241,8 @@ describe('Model Capabilities', () => {
|
|||||||
const modelsRange02 = [
|
const modelsRange02 = [
|
||||||
'gpt-4o',
|
'gpt-4o',
|
||||||
'azure/gpt-4o',
|
'azure/gpt-4o',
|
||||||
|
'gpt-5-chat-latest',
|
||||||
|
'azure/gpt-5-chat-latest',
|
||||||
'gemini-2.5-pro',
|
'gemini-2.5-pro',
|
||||||
'gemini-2.5-flash',
|
'gemini-2.5-flash',
|
||||||
'deepseek-v3',
|
'deepseek-v3',
|
||||||
@@ -268,28 +271,23 @@ describe('Model Capabilities', () => {
|
|||||||
expect(getMaxTemperature('unsupported-model')).toBeUndefined()
|
expect(getMaxTemperature('unsupported-model')).toBeUndefined()
|
||||||
expect(getMaxTemperature('cerebras/llama-3.3-70b')).toBeUndefined()
|
expect(getMaxTemperature('cerebras/llama-3.3-70b')).toBeUndefined()
|
||||||
expect(getMaxTemperature('groq/meta-llama/llama-4-scout-17b-16e-instruct')).toBeUndefined()
|
expect(getMaxTemperature('groq/meta-llama/llama-4-scout-17b-16e-instruct')).toBeUndefined()
|
||||||
// Reasoning models that don't support temperature
|
|
||||||
expect(getMaxTemperature('o1')).toBeUndefined()
|
expect(getMaxTemperature('o1')).toBeUndefined()
|
||||||
expect(getMaxTemperature('o3')).toBeUndefined()
|
expect(getMaxTemperature('o3')).toBeUndefined()
|
||||||
expect(getMaxTemperature('o4-mini')).toBeUndefined()
|
expect(getMaxTemperature('o4-mini')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/o3')).toBeUndefined()
|
expect(getMaxTemperature('azure/o3')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/o4-mini')).toBeUndefined()
|
expect(getMaxTemperature('azure/o4-mini')).toBeUndefined()
|
||||||
expect(getMaxTemperature('deepseek-r1')).toBeUndefined()
|
expect(getMaxTemperature('deepseek-r1')).toBeUndefined()
|
||||||
// GPT-5.1 models don't support temperature
|
|
||||||
expect(getMaxTemperature('gpt-5.1')).toBeUndefined()
|
expect(getMaxTemperature('gpt-5.1')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5.1')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5.1')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5.1-mini')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5.1-mini')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5.1-nano')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5.1-nano')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5.1-codex')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5.1-codex')).toBeUndefined()
|
||||||
// GPT-5 models don't support temperature
|
|
||||||
expect(getMaxTemperature('gpt-5')).toBeUndefined()
|
expect(getMaxTemperature('gpt-5')).toBeUndefined()
|
||||||
expect(getMaxTemperature('gpt-5-mini')).toBeUndefined()
|
expect(getMaxTemperature('gpt-5-mini')).toBeUndefined()
|
||||||
expect(getMaxTemperature('gpt-5-nano')).toBeUndefined()
|
expect(getMaxTemperature('gpt-5-nano')).toBeUndefined()
|
||||||
expect(getMaxTemperature('gpt-5-chat-latest')).toBeUndefined()
|
|
||||||
expect(getMaxTemperature('azure/gpt-5')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5-mini')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5-mini')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5-nano')).toBeUndefined()
|
expect(getMaxTemperature('azure/gpt-5-nano')).toBeUndefined()
|
||||||
expect(getMaxTemperature('azure/gpt-5-chat-latest')).toBeUndefined()
|
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should be case insensitive', () => {
|
it.concurrent('should be case insensitive', () => {
|
||||||
@@ -340,13 +338,13 @@ describe('Model Capabilities', () => {
|
|||||||
expect(MODELS_TEMP_RANGE_0_2).toContain('gpt-4o')
|
expect(MODELS_TEMP_RANGE_0_2).toContain('gpt-4o')
|
||||||
expect(MODELS_TEMP_RANGE_0_2).toContain('gemini-2.5-flash')
|
expect(MODELS_TEMP_RANGE_0_2).toContain('gemini-2.5-flash')
|
||||||
expect(MODELS_TEMP_RANGE_0_2).toContain('deepseek-v3')
|
expect(MODELS_TEMP_RANGE_0_2).toContain('deepseek-v3')
|
||||||
expect(MODELS_TEMP_RANGE_0_2).not.toContain('claude-sonnet-4-0') // Should be in 0-1 range
|
expect(MODELS_TEMP_RANGE_0_2).not.toContain('claude-sonnet-4-0')
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should have correct models in MODELS_TEMP_RANGE_0_1', () => {
|
it.concurrent('should have correct models in MODELS_TEMP_RANGE_0_1', () => {
|
||||||
expect(MODELS_TEMP_RANGE_0_1).toContain('claude-sonnet-4-0')
|
expect(MODELS_TEMP_RANGE_0_1).toContain('claude-sonnet-4-0')
|
||||||
expect(MODELS_TEMP_RANGE_0_1).toContain('grok-3-latest')
|
expect(MODELS_TEMP_RANGE_0_1).toContain('grok-3-latest')
|
||||||
expect(MODELS_TEMP_RANGE_0_1).not.toContain('gpt-4o') // Should be in 0-2 range
|
expect(MODELS_TEMP_RANGE_0_1).not.toContain('gpt-4o')
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should have correct providers in PROVIDERS_WITH_TOOL_USAGE_CONTROL', () => {
|
it.concurrent('should have correct providers in PROVIDERS_WITH_TOOL_USAGE_CONTROL', () => {
|
||||||
@@ -363,20 +361,19 @@ describe('Model Capabilities', () => {
|
|||||||
expect(MODELS_WITH_TEMPERATURE_SUPPORT.length).toBe(
|
expect(MODELS_WITH_TEMPERATURE_SUPPORT.length).toBe(
|
||||||
MODELS_TEMP_RANGE_0_2.length + MODELS_TEMP_RANGE_0_1.length
|
MODELS_TEMP_RANGE_0_2.length + MODELS_TEMP_RANGE_0_1.length
|
||||||
)
|
)
|
||||||
expect(MODELS_WITH_TEMPERATURE_SUPPORT).toContain('gpt-4o') // From 0-2 range
|
expect(MODELS_WITH_TEMPERATURE_SUPPORT).toContain('gpt-4o')
|
||||||
expect(MODELS_WITH_TEMPERATURE_SUPPORT).toContain('claude-sonnet-4-0') // From 0-1 range
|
expect(MODELS_WITH_TEMPERATURE_SUPPORT).toContain('claude-sonnet-4-0')
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
it.concurrent('should have correct models in MODELS_WITH_REASONING_EFFORT', () => {
|
it.concurrent('should have correct models in MODELS_WITH_REASONING_EFFORT', () => {
|
||||||
// Should contain GPT-5.1 models that support reasoning effort
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.1')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-mini')
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-nano')
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-codex')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.1-codex')
|
||||||
|
|
||||||
// Should contain GPT-5 models that support reasoning effort
|
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('azure/gpt-5.1-mini')
|
||||||
|
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('azure/gpt-5.1-nano')
|
||||||
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-mini')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-mini')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-nano')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5-nano')
|
||||||
@@ -384,35 +381,30 @@ describe('Model Capabilities', () => {
|
|||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-mini')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-mini')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-nano')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-nano')
|
||||||
|
|
||||||
// Should contain gpt-5.2 models
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.2')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.2')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.2')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.2')
|
||||||
|
|
||||||
// Should contain o-series reasoning models (reasoning_effort added Dec 17, 2024)
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('o1')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('o1')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('o3')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('o3')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('o4-mini')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('o4-mini')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/o3')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/o3')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/o4-mini')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/o4-mini')
|
||||||
|
|
||||||
// Should NOT contain non-reasoning GPT-5 models
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-5-chat-latest')
|
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-5-chat-latest')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('azure/gpt-5-chat-latest')
|
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('azure/gpt-5-chat-latest')
|
||||||
|
|
||||||
// Should NOT contain other models
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-4o')
|
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-4o')
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('claude-sonnet-4-0')
|
expect(MODELS_WITH_REASONING_EFFORT).not.toContain('claude-sonnet-4-0')
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should have correct models in MODELS_WITH_VERBOSITY', () => {
|
it.concurrent('should have correct models in MODELS_WITH_VERBOSITY', () => {
|
||||||
// Should contain GPT-5.1 models that support verbosity
|
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1')
|
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.1')
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1')
|
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1')
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-mini')
|
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-nano')
|
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-codex')
|
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.1-codex')
|
||||||
|
|
||||||
// Should contain GPT-5 models that support verbosity
|
expect(MODELS_WITH_VERBOSITY).not.toContain('azure/gpt-5.1-mini')
|
||||||
|
expect(MODELS_WITH_VERBOSITY).not.toContain('azure/gpt-5.1-nano')
|
||||||
|
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5')
|
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5')
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-mini')
|
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-mini')
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-nano')
|
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5-nano')
|
||||||
@@ -420,26 +412,39 @@ describe('Model Capabilities', () => {
|
|||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-mini')
|
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-mini')
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-nano')
|
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-nano')
|
||||||
|
|
||||||
// Should contain gpt-5.2 models
|
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.2')
|
expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.2')
|
||||||
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.2')
|
expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.2')
|
||||||
|
|
||||||
// Should NOT contain non-reasoning GPT-5 models
|
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-5-chat-latest')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-5-chat-latest')
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('azure/gpt-5-chat-latest')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('azure/gpt-5-chat-latest')
|
||||||
|
|
||||||
// Should NOT contain o-series models (they support reasoning_effort but not verbosity)
|
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('o1')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('o1')
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('o3')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('o3')
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('o4-mini')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('o4-mini')
|
||||||
|
|
||||||
// Should NOT contain other models
|
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-4o')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-4o')
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('claude-sonnet-4-0')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('claude-sonnet-4-0')
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it.concurrent('should have correct models in MODELS_WITH_THINKING', () => {
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-opus-4-6')
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-opus-4-5')
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-opus-4-1')
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-opus-4-0')
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-sonnet-4-5')
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-sonnet-4-0')
|
||||||
|
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('gemini-3-pro-preview')
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('gemini-3-flash-preview')
|
||||||
|
|
||||||
|
expect(MODELS_WITH_THINKING).toContain('claude-haiku-4-5')
|
||||||
|
|
||||||
|
expect(MODELS_WITH_THINKING).not.toContain('gpt-4o')
|
||||||
|
expect(MODELS_WITH_THINKING).not.toContain('gpt-5')
|
||||||
|
expect(MODELS_WITH_THINKING).not.toContain('o3')
|
||||||
|
})
|
||||||
|
|
||||||
it.concurrent('should have GPT-5 models in both reasoning effort and verbosity arrays', () => {
|
it.concurrent('should have GPT-5 models in both reasoning effort and verbosity arrays', () => {
|
||||||
// GPT-5 series models support both reasoning effort and verbosity
|
|
||||||
const gpt5ModelsWithReasoningEffort = MODELS_WITH_REASONING_EFFORT.filter(
|
const gpt5ModelsWithReasoningEffort = MODELS_WITH_REASONING_EFFORT.filter(
|
||||||
(m) => m.includes('gpt-5') && !m.includes('chat-latest')
|
(m) => m.includes('gpt-5') && !m.includes('chat-latest')
|
||||||
)
|
)
|
||||||
@@ -448,11 +453,229 @@ describe('Model Capabilities', () => {
|
|||||||
)
|
)
|
||||||
expect(gpt5ModelsWithReasoningEffort.sort()).toEqual(gpt5ModelsWithVerbosity.sort())
|
expect(gpt5ModelsWithReasoningEffort.sort()).toEqual(gpt5ModelsWithVerbosity.sort())
|
||||||
|
|
||||||
// o-series models have reasoning effort but NOT verbosity
|
|
||||||
expect(MODELS_WITH_REASONING_EFFORT).toContain('o1')
|
expect(MODELS_WITH_REASONING_EFFORT).toContain('o1')
|
||||||
expect(MODELS_WITH_VERBOSITY).not.toContain('o1')
|
expect(MODELS_WITH_VERBOSITY).not.toContain('o1')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
describe('Reasoning Effort Values Per Model', () => {
|
||||||
|
it.concurrent('should return correct values for GPT-5.2', () => {
|
||||||
|
const values = getReasoningEffortValuesForModel('gpt-5.2')
|
||||||
|
expect(values).toBeDefined()
|
||||||
|
expect(values).toContain('none')
|
||||||
|
expect(values).toContain('low')
|
||||||
|
expect(values).toContain('medium')
|
||||||
|
expect(values).toContain('high')
|
||||||
|
expect(values).toContain('xhigh')
|
||||||
|
expect(values).not.toContain('minimal')
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct values for GPT-5', () => {
|
||||||
|
const values = getReasoningEffortValuesForModel('gpt-5')
|
||||||
|
expect(values).toBeDefined()
|
||||||
|
expect(values).toContain('minimal')
|
||||||
|
expect(values).toContain('low')
|
||||||
|
expect(values).toContain('medium')
|
||||||
|
expect(values).toContain('high')
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct values for o-series models', () => {
|
||||||
|
for (const model of ['o1', 'o3', 'o4-mini']) {
|
||||||
|
const values = getReasoningEffortValuesForModel(model)
|
||||||
|
expect(values).toBeDefined()
|
||||||
|
expect(values).toContain('low')
|
||||||
|
expect(values).toContain('medium')
|
||||||
|
expect(values).toContain('high')
|
||||||
|
expect(values).not.toContain('none')
|
||||||
|
expect(values).not.toContain('minimal')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return null for non-reasoning models', () => {
|
||||||
|
expect(getReasoningEffortValuesForModel('gpt-4o')).toBeNull()
|
||||||
|
expect(getReasoningEffortValuesForModel('claude-sonnet-4-5')).toBeNull()
|
||||||
|
expect(getReasoningEffortValuesForModel('gemini-2.5-flash')).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct values for Azure GPT-5.2', () => {
|
||||||
|
const values = getReasoningEffortValuesForModel('azure/gpt-5.2')
|
||||||
|
expect(values).toBeDefined()
|
||||||
|
expect(values).not.toContain('minimal')
|
||||||
|
expect(values).toContain('xhigh')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Verbosity Values Per Model', () => {
|
||||||
|
it.concurrent('should return correct values for GPT-5 family', () => {
|
||||||
|
for (const model of ['gpt-5.2', 'gpt-5.1', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano']) {
|
||||||
|
const values = getVerbosityValuesForModel(model)
|
||||||
|
expect(values).toBeDefined()
|
||||||
|
expect(values).toContain('low')
|
||||||
|
expect(values).toContain('medium')
|
||||||
|
expect(values).toContain('high')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return null for o-series models', () => {
|
||||||
|
expect(getVerbosityValuesForModel('o1')).toBeNull()
|
||||||
|
expect(getVerbosityValuesForModel('o3')).toBeNull()
|
||||||
|
expect(getVerbosityValuesForModel('o4-mini')).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return null for non-reasoning models', () => {
|
||||||
|
expect(getVerbosityValuesForModel('gpt-4o')).toBeNull()
|
||||||
|
expect(getVerbosityValuesForModel('claude-sonnet-4-5')).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Thinking Levels Per Model', () => {
|
||||||
|
it.concurrent('should return correct levels for Claude Opus 4.6 (adaptive)', () => {
|
||||||
|
const levels = getThinkingLevelsForModel('claude-opus-4-6')
|
||||||
|
expect(levels).toBeDefined()
|
||||||
|
expect(levels).toContain('low')
|
||||||
|
expect(levels).toContain('medium')
|
||||||
|
expect(levels).toContain('high')
|
||||||
|
expect(levels).toContain('max')
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct levels for other Claude models (budget_tokens)', () => {
|
||||||
|
for (const model of ['claude-opus-4-5', 'claude-sonnet-4-5', 'claude-sonnet-4-0']) {
|
||||||
|
const levels = getThinkingLevelsForModel(model)
|
||||||
|
expect(levels).toBeDefined()
|
||||||
|
expect(levels).toContain('low')
|
||||||
|
expect(levels).toContain('medium')
|
||||||
|
expect(levels).toContain('high')
|
||||||
|
expect(levels).not.toContain('max')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct levels for Gemini 3 models', () => {
|
||||||
|
const proLevels = getThinkingLevelsForModel('gemini-3-pro-preview')
|
||||||
|
expect(proLevels).toBeDefined()
|
||||||
|
expect(proLevels).toContain('low')
|
||||||
|
expect(proLevels).toContain('high')
|
||||||
|
|
||||||
|
const flashLevels = getThinkingLevelsForModel('gemini-3-flash-preview')
|
||||||
|
expect(flashLevels).toBeDefined()
|
||||||
|
expect(flashLevels).toContain('minimal')
|
||||||
|
expect(flashLevels).toContain('low')
|
||||||
|
expect(flashLevels).toContain('medium')
|
||||||
|
expect(flashLevels).toContain('high')
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct levels for Claude Haiku 4.5', () => {
|
||||||
|
const levels = getThinkingLevelsForModel('claude-haiku-4-5')
|
||||||
|
expect(levels).toBeDefined()
|
||||||
|
expect(levels).toContain('low')
|
||||||
|
expect(levels).toContain('medium')
|
||||||
|
expect(levels).toContain('high')
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return null for non-thinking models', () => {
|
||||||
|
expect(getThinkingLevelsForModel('gpt-4o')).toBeNull()
|
||||||
|
expect(getThinkingLevelsForModel('gpt-5')).toBeNull()
|
||||||
|
expect(getThinkingLevelsForModel('o3')).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Max Output Tokens', () => {
|
||||||
|
describe('getMaxOutputTokensForModel', () => {
|
||||||
|
it.concurrent('should return higher value for streaming than non-streaming (Anthropic)', () => {
|
||||||
|
const streamingTokens = getMaxOutputTokensForModel('claude-opus-4-6', true)
|
||||||
|
const nonStreamingTokens = getMaxOutputTokensForModel('claude-opus-4-6', false)
|
||||||
|
expect(streamingTokens).toBeGreaterThan(nonStreamingTokens)
|
||||||
|
expect(streamingTokens).toBe(128000)
|
||||||
|
expect(nonStreamingTokens).toBe(8192)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct values for Claude Sonnet 4.5', () => {
|
||||||
|
expect(getMaxOutputTokensForModel('claude-sonnet-4-5', true)).toBe(64000)
|
||||||
|
expect(getMaxOutputTokensForModel('claude-sonnet-4-5', false)).toBe(8192)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return correct values for Claude Opus 4.1', () => {
|
||||||
|
expect(getMaxOutputTokensForModel('claude-opus-4-1', true)).toBe(32000)
|
||||||
|
expect(getMaxOutputTokensForModel('claude-opus-4-1', false)).toBe(8192)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return standard default for models without maxOutputTokens', () => {
|
||||||
|
expect(getMaxOutputTokensForModel('gpt-4o', false)).toBe(4096)
|
||||||
|
expect(getMaxOutputTokensForModel('gpt-4o', true)).toBe(4096)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return standard default for unknown models', () => {
|
||||||
|
expect(getMaxOutputTokensForModel('unknown-model', false)).toBe(4096)
|
||||||
|
expect(getMaxOutputTokensForModel('unknown-model', true)).toBe(4096)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent(
|
||||||
|
'non-streaming default should be within Anthropic SDK non-streaming threshold',
|
||||||
|
() => {
|
||||||
|
const SDK_NON_STREAMING_THRESHOLD = 21333
|
||||||
|
const models = [
|
||||||
|
'claude-opus-4-6',
|
||||||
|
'claude-opus-4-5',
|
||||||
|
'claude-opus-4-1',
|
||||||
|
'claude-sonnet-4-5',
|
||||||
|
'claude-sonnet-4-0',
|
||||||
|
'claude-haiku-4-5',
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const model of models) {
|
||||||
|
const nonStreamingDefault = getMaxOutputTokensForModel(model, false)
|
||||||
|
expect(nonStreamingDefault).toBeLessThan(SDK_NON_STREAMING_THRESHOLD)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Model Pricing Validation', () => {
|
||||||
|
it.concurrent('should have correct pricing for key Anthropic models', () => {
|
||||||
|
const opus46 = getModelPricing('claude-opus-4-6')
|
||||||
|
expect(opus46).toBeDefined()
|
||||||
|
expect(opus46.input).toBe(5.0)
|
||||||
|
expect(opus46.output).toBe(25.0)
|
||||||
|
|
||||||
|
const sonnet45 = getModelPricing('claude-sonnet-4-5')
|
||||||
|
expect(sonnet45).toBeDefined()
|
||||||
|
expect(sonnet45.input).toBe(3.0)
|
||||||
|
expect(sonnet45.output).toBe(15.0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should have correct pricing for key OpenAI models', () => {
|
||||||
|
const gpt4o = getModelPricing('gpt-4o')
|
||||||
|
expect(gpt4o).toBeDefined()
|
||||||
|
expect(gpt4o.input).toBe(2.5)
|
||||||
|
expect(gpt4o.output).toBe(10.0)
|
||||||
|
|
||||||
|
const o3 = getModelPricing('o3')
|
||||||
|
expect(o3).toBeDefined()
|
||||||
|
expect(o3.input).toBe(2.0)
|
||||||
|
expect(o3.output).toBe(8.0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should have correct pricing for Azure OpenAI o3', () => {
|
||||||
|
const azureO3 = getModelPricing('azure/o3')
|
||||||
|
expect(azureO3).toBeDefined()
|
||||||
|
expect(azureO3.input).toBe(2.0)
|
||||||
|
expect(azureO3.output).toBe(8.0)
|
||||||
|
})
|
||||||
|
|
||||||
|
it.concurrent('should return null for unknown models', () => {
|
||||||
|
expect(getModelPricing('unknown-model')).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Context Window Validation', () => {
|
||||||
|
it.concurrent('should have correct context windows for key models', () => {
|
||||||
|
const allModels = getAllModels()
|
||||||
|
|
||||||
|
expect(allModels).toContain('gpt-5-chat-latest')
|
||||||
|
|
||||||
|
expect(allModels).toContain('o3')
|
||||||
|
expect(allModels).toContain('o4-mini')
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('Cost Calculation', () => {
|
describe('Cost Calculation', () => {
|
||||||
@@ -464,7 +687,7 @@ describe('Cost Calculation', () => {
|
|||||||
expect(result.output).toBeGreaterThan(0)
|
expect(result.output).toBeGreaterThan(0)
|
||||||
expect(result.total).toBeCloseTo(result.input + result.output, 6)
|
expect(result.total).toBeCloseTo(result.input + result.output, 6)
|
||||||
expect(result.pricing).toBeDefined()
|
expect(result.pricing).toBeDefined()
|
||||||
expect(result.pricing.input).toBe(2.5) // GPT-4o pricing
|
expect(result.pricing.input).toBe(2.5)
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should handle cached input pricing when enabled', () => {
|
it.concurrent('should handle cached input pricing when enabled', () => {
|
||||||
@@ -472,7 +695,7 @@ describe('Cost Calculation', () => {
|
|||||||
const cachedCost = calculateCost('gpt-4o', 1000, 500, true)
|
const cachedCost = calculateCost('gpt-4o', 1000, 500, true)
|
||||||
|
|
||||||
expect(cachedCost.input).toBeLessThan(regularCost.input)
|
expect(cachedCost.input).toBeLessThan(regularCost.input)
|
||||||
expect(cachedCost.output).toBe(regularCost.output) // Output cost should be same
|
expect(cachedCost.output).toBe(regularCost.output)
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should return default pricing for unknown models', () => {
|
it.concurrent('should return default pricing for unknown models', () => {
|
||||||
@@ -481,7 +704,7 @@ describe('Cost Calculation', () => {
|
|||||||
expect(result.input).toBe(0)
|
expect(result.input).toBe(0)
|
||||||
expect(result.output).toBe(0)
|
expect(result.output).toBe(0)
|
||||||
expect(result.total).toBe(0)
|
expect(result.total).toBe(0)
|
||||||
expect(result.pricing.input).toBe(1.0) // Default pricing
|
expect(result.pricing.input).toBe(1.0)
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should handle zero tokens', () => {
|
it.concurrent('should handle zero tokens', () => {
|
||||||
@@ -528,19 +751,15 @@ describe('getHostedModels', () => {
|
|||||||
it.concurrent('should return OpenAI, Anthropic, and Google models as hosted', () => {
|
it.concurrent('should return OpenAI, Anthropic, and Google models as hosted', () => {
|
||||||
const hostedModels = getHostedModels()
|
const hostedModels = getHostedModels()
|
||||||
|
|
||||||
// OpenAI models
|
|
||||||
expect(hostedModels).toContain('gpt-4o')
|
expect(hostedModels).toContain('gpt-4o')
|
||||||
expect(hostedModels).toContain('o1')
|
expect(hostedModels).toContain('o1')
|
||||||
|
|
||||||
// Anthropic models
|
|
||||||
expect(hostedModels).toContain('claude-sonnet-4-0')
|
expect(hostedModels).toContain('claude-sonnet-4-0')
|
||||||
expect(hostedModels).toContain('claude-opus-4-0')
|
expect(hostedModels).toContain('claude-opus-4-0')
|
||||||
|
|
||||||
// Google models
|
|
||||||
expect(hostedModels).toContain('gemini-2.5-pro')
|
expect(hostedModels).toContain('gemini-2.5-pro')
|
||||||
expect(hostedModels).toContain('gemini-2.5-flash')
|
expect(hostedModels).toContain('gemini-2.5-flash')
|
||||||
|
|
||||||
// Should not contain models from other providers
|
|
||||||
expect(hostedModels).not.toContain('deepseek-v3')
|
expect(hostedModels).not.toContain('deepseek-v3')
|
||||||
expect(hostedModels).not.toContain('grok-4-latest')
|
expect(hostedModels).not.toContain('grok-4-latest')
|
||||||
})
|
})
|
||||||
@@ -558,31 +777,24 @@ describe('getHostedModels', () => {
|
|||||||
|
|
||||||
describe('shouldBillModelUsage', () => {
|
describe('shouldBillModelUsage', () => {
|
||||||
it.concurrent('should return true for exact matches of hosted models', () => {
|
it.concurrent('should return true for exact matches of hosted models', () => {
|
||||||
// OpenAI models
|
|
||||||
expect(shouldBillModelUsage('gpt-4o')).toBe(true)
|
expect(shouldBillModelUsage('gpt-4o')).toBe(true)
|
||||||
expect(shouldBillModelUsage('o1')).toBe(true)
|
expect(shouldBillModelUsage('o1')).toBe(true)
|
||||||
|
|
||||||
// Anthropic models
|
|
||||||
expect(shouldBillModelUsage('claude-sonnet-4-0')).toBe(true)
|
expect(shouldBillModelUsage('claude-sonnet-4-0')).toBe(true)
|
||||||
expect(shouldBillModelUsage('claude-opus-4-0')).toBe(true)
|
expect(shouldBillModelUsage('claude-opus-4-0')).toBe(true)
|
||||||
|
|
||||||
// Google models
|
|
||||||
expect(shouldBillModelUsage('gemini-2.5-pro')).toBe(true)
|
expect(shouldBillModelUsage('gemini-2.5-pro')).toBe(true)
|
||||||
expect(shouldBillModelUsage('gemini-2.5-flash')).toBe(true)
|
expect(shouldBillModelUsage('gemini-2.5-flash')).toBe(true)
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should return false for non-hosted models', () => {
|
it.concurrent('should return false for non-hosted models', () => {
|
||||||
// Other providers
|
|
||||||
expect(shouldBillModelUsage('deepseek-v3')).toBe(false)
|
expect(shouldBillModelUsage('deepseek-v3')).toBe(false)
|
||||||
expect(shouldBillModelUsage('grok-4-latest')).toBe(false)
|
expect(shouldBillModelUsage('grok-4-latest')).toBe(false)
|
||||||
|
|
||||||
// Unknown models
|
|
||||||
expect(shouldBillModelUsage('unknown-model')).toBe(false)
|
expect(shouldBillModelUsage('unknown-model')).toBe(false)
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should return false for versioned model names not in hosted list', () => {
|
it.concurrent('should return false for versioned model names not in hosted list', () => {
|
||||||
// Versioned model names that are NOT in the hosted list
|
|
||||||
// These should NOT be billed (user provides own API key)
|
|
||||||
expect(shouldBillModelUsage('claude-sonnet-4-20250514')).toBe(false)
|
expect(shouldBillModelUsage('claude-sonnet-4-20250514')).toBe(false)
|
||||||
expect(shouldBillModelUsage('gpt-4o-2024-08-06')).toBe(false)
|
expect(shouldBillModelUsage('gpt-4o-2024-08-06')).toBe(false)
|
||||||
expect(shouldBillModelUsage('claude-3-5-sonnet-20241022')).toBe(false)
|
expect(shouldBillModelUsage('claude-3-5-sonnet-20241022')).toBe(false)
|
||||||
@@ -595,8 +807,7 @@ describe('shouldBillModelUsage', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should not match partial model names', () => {
|
it.concurrent('should not match partial model names', () => {
|
||||||
// Should not match partial/prefix models
|
expect(shouldBillModelUsage('gpt-4')).toBe(false)
|
||||||
expect(shouldBillModelUsage('gpt-4')).toBe(false) // gpt-4o is hosted, not gpt-4
|
|
||||||
expect(shouldBillModelUsage('claude-sonnet')).toBe(false)
|
expect(shouldBillModelUsage('claude-sonnet')).toBe(false)
|
||||||
expect(shouldBillModelUsage('gemini')).toBe(false)
|
expect(shouldBillModelUsage('gemini')).toBe(false)
|
||||||
})
|
})
|
||||||
@@ -612,8 +823,8 @@ describe('Provider Management', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should use model patterns for pattern matching', () => {
|
it.concurrent('should use model patterns for pattern matching', () => {
|
||||||
expect(getProviderFromModel('gpt-5-custom')).toBe('openai') // Matches /^gpt/ pattern
|
expect(getProviderFromModel('gpt-5-custom')).toBe('openai')
|
||||||
expect(getProviderFromModel('claude-custom-model')).toBe('anthropic') // Matches /^claude/ pattern
|
expect(getProviderFromModel('claude-custom-model')).toBe('anthropic')
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should default to ollama for unknown models', () => {
|
it.concurrent('should default to ollama for unknown models', () => {
|
||||||
@@ -667,7 +878,6 @@ describe('Provider Management', () => {
|
|||||||
expect(Array.isArray(allModels)).toBe(true)
|
expect(Array.isArray(allModels)).toBe(true)
|
||||||
expect(allModels.length).toBeGreaterThan(0)
|
expect(allModels.length).toBeGreaterThan(0)
|
||||||
|
|
||||||
// Should contain models from different providers
|
|
||||||
expect(allModels).toContain('gpt-4o')
|
expect(allModels).toContain('gpt-4o')
|
||||||
expect(allModels).toContain('claude-sonnet-4-0')
|
expect(allModels).toContain('claude-sonnet-4-0')
|
||||||
expect(allModels).toContain('gemini-2.5-pro')
|
expect(allModels).toContain('gemini-2.5-pro')
|
||||||
@@ -712,7 +922,6 @@ describe('Provider Management', () => {
|
|||||||
|
|
||||||
const baseProviders = getBaseModelProviders()
|
const baseProviders = getBaseModelProviders()
|
||||||
expect(typeof baseProviders).toBe('object')
|
expect(typeof baseProviders).toBe('object')
|
||||||
// Should exclude ollama models
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -720,10 +929,8 @@ describe('Provider Management', () => {
|
|||||||
it.concurrent('should update ollama models', () => {
|
it.concurrent('should update ollama models', () => {
|
||||||
const mockModels = ['llama2', 'codellama', 'mistral']
|
const mockModels = ['llama2', 'codellama', 'mistral']
|
||||||
|
|
||||||
// This should not throw
|
|
||||||
expect(() => updateOllamaProviderModels(mockModels)).not.toThrow()
|
expect(() => updateOllamaProviderModels(mockModels)).not.toThrow()
|
||||||
|
|
||||||
// Verify the models were updated
|
|
||||||
const ollamaModels = getProviderModels('ollama')
|
const ollamaModels = getProviderModels('ollama')
|
||||||
expect(ollamaModels).toEqual(mockModels)
|
expect(ollamaModels).toEqual(mockModels)
|
||||||
})
|
})
|
||||||
@@ -754,7 +961,7 @@ describe('JSON and Structured Output', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should clean up common JSON issues', () => {
|
it.concurrent('should clean up common JSON issues', () => {
|
||||||
const content = '{\n "key": "value",\n "number": 42,\n}' // Trailing comma
|
const content = '{\n "key": "value",\n "number": 42,\n}'
|
||||||
const result = extractAndParseJSON(content)
|
const result = extractAndParseJSON(content)
|
||||||
expect(result).toEqual({ key: 'value', number: 42 })
|
expect(result).toEqual({ key: 'value', number: 42 })
|
||||||
})
|
})
|
||||||
@@ -945,13 +1152,13 @@ describe('prepareToolExecution', () => {
|
|||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
expect(toolParams.apiKey).toBe('user-key')
|
expect(toolParams.apiKey).toBe('user-key')
|
||||||
expect(toolParams.channel).toBe('#general') // User value wins
|
expect(toolParams.channel).toBe('#general')
|
||||||
expect(toolParams.message).toBe('Hello world')
|
expect(toolParams.message).toBe('Hello world')
|
||||||
})
|
})
|
||||||
|
|
||||||
it.concurrent('should filter out empty string user params', () => {
|
it.concurrent('should filter out empty string user params', () => {
|
||||||
const tool = {
|
const tool = {
|
||||||
params: { apiKey: 'user-key', channel: '' }, // Empty channel
|
params: { apiKey: 'user-key', channel: '' },
|
||||||
}
|
}
|
||||||
const llmArgs = { message: 'Hello', channel: '#llm-channel' }
|
const llmArgs = { message: 'Hello', channel: '#llm-channel' }
|
||||||
const request = {}
|
const request = {}
|
||||||
@@ -959,7 +1166,7 @@ describe('prepareToolExecution', () => {
|
|||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
expect(toolParams.apiKey).toBe('user-key')
|
expect(toolParams.apiKey).toBe('user-key')
|
||||||
expect(toolParams.channel).toBe('#llm-channel') // LLM value used since user is empty
|
expect(toolParams.channel).toBe('#llm-channel')
|
||||||
expect(toolParams.message).toBe('Hello')
|
expect(toolParams.message).toBe('Hello')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -969,7 +1176,7 @@ describe('prepareToolExecution', () => {
|
|||||||
const tool = {
|
const tool = {
|
||||||
params: {
|
params: {
|
||||||
workflowId: 'child-workflow-123',
|
workflowId: 'child-workflow-123',
|
||||||
inputMapping: '{}', // Empty JSON string from UI
|
inputMapping: '{}',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
const llmArgs = {
|
const llmArgs = {
|
||||||
@@ -979,7 +1186,6 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
// LLM values should be used since user object is empty
|
|
||||||
expect(toolParams.inputMapping).toEqual({ query: 'search term', limit: 10 })
|
expect(toolParams.inputMapping).toEqual({ query: 'search term', limit: 10 })
|
||||||
expect(toolParams.workflowId).toBe('child-workflow-123')
|
expect(toolParams.workflowId).toBe('child-workflow-123')
|
||||||
})
|
})
|
||||||
@@ -988,7 +1194,7 @@ describe('prepareToolExecution', () => {
|
|||||||
const tool = {
|
const tool = {
|
||||||
params: {
|
params: {
|
||||||
workflowId: 'child-workflow',
|
workflowId: 'child-workflow',
|
||||||
inputMapping: '{"query": "", "customField": "user-value"}', // Partial values
|
inputMapping: '{"query": "", "customField": "user-value"}',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
const llmArgs = {
|
const llmArgs = {
|
||||||
@@ -998,7 +1204,6 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
// LLM fills empty query, user's customField preserved, LLM's limit included
|
|
||||||
expect(toolParams.inputMapping).toEqual({
|
expect(toolParams.inputMapping).toEqual({
|
||||||
query: 'llm-search',
|
query: 'llm-search',
|
||||||
limit: 10,
|
limit: 10,
|
||||||
@@ -1020,7 +1225,6 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
// User values win, but LLM's extra field is included
|
|
||||||
expect(toolParams.inputMapping).toEqual({
|
expect(toolParams.inputMapping).toEqual({
|
||||||
query: 'user-search',
|
query: 'user-search',
|
||||||
limit: 5,
|
limit: 5,
|
||||||
@@ -1032,7 +1236,7 @@ describe('prepareToolExecution', () => {
|
|||||||
const tool = {
|
const tool = {
|
||||||
params: {
|
params: {
|
||||||
workflowId: 'child-workflow',
|
workflowId: 'child-workflow',
|
||||||
inputMapping: { query: '', customField: 'user-value' }, // Object, not string
|
inputMapping: { query: '', customField: 'user-value' },
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
const llmArgs = {
|
const llmArgs = {
|
||||||
@@ -1051,7 +1255,7 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
it.concurrent('should use LLM inputMapping when user does not provide it', () => {
|
it.concurrent('should use LLM inputMapping when user does not provide it', () => {
|
||||||
const tool = {
|
const tool = {
|
||||||
params: { workflowId: 'child-workflow' }, // No inputMapping
|
params: { workflowId: 'child-workflow' },
|
||||||
}
|
}
|
||||||
const llmArgs = {
|
const llmArgs = {
|
||||||
inputMapping: { query: 'llm-search', limit: 10 },
|
inputMapping: { query: 'llm-search', limit: 10 },
|
||||||
@@ -1070,7 +1274,7 @@ describe('prepareToolExecution', () => {
|
|||||||
inputMapping: '{"query": "user-search"}',
|
inputMapping: '{"query": "user-search"}',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
const llmArgs = {} // No inputMapping from LLM
|
const llmArgs = {}
|
||||||
const request = {}
|
const request = {}
|
||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
@@ -1092,7 +1296,6 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
// Should use LLM values since user JSON is invalid
|
|
||||||
expect(toolParams.inputMapping).toEqual({ query: 'llm-search' })
|
expect(toolParams.inputMapping).toEqual({ query: 'llm-search' })
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1105,9 +1308,8 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
// Normal behavior: user values override LLM values
|
|
||||||
expect(toolParams.apiKey).toBe('user-key')
|
expect(toolParams.apiKey).toBe('user-key')
|
||||||
expect(toolParams.channel).toBe('#general') // User value wins
|
expect(toolParams.channel).toBe('#general')
|
||||||
expect(toolParams.message).toBe('Hello')
|
expect(toolParams.message).toBe('Hello')
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1125,8 +1327,6 @@ describe('prepareToolExecution', () => {
|
|||||||
|
|
||||||
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
const { toolParams } = prepareToolExecution(tool, llmArgs, request)
|
||||||
|
|
||||||
// 0 and false should be preserved (they're valid values)
|
|
||||||
// empty string should be filled by LLM
|
|
||||||
expect(toolParams.inputMapping).toEqual({
|
expect(toolParams.inputMapping).toEqual({
|
||||||
limit: 0,
|
limit: 0,
|
||||||
enabled: false,
|
enabled: false,
|
||||||
|
|||||||
1
apps/sim/tools/airweave/index.ts
Normal file
1
apps/sim/tools/airweave/index.ts
Normal file
@@ -0,0 +1 @@
|
|||||||
|
export { airweaveSearchTool } from './search'
|
||||||
130
apps/sim/tools/airweave/search.ts
Normal file
130
apps/sim/tools/airweave/search.ts
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
import type { AirweaveSearchParams, AirweaveSearchResponse } from '@/tools/airweave/types'
|
||||||
|
import { AIRWEAVE_SEARCH_RESULT_OUTPUT_PROPERTIES } from '@/tools/airweave/types'
|
||||||
|
import type { ToolConfig } from '@/tools/types'
|
||||||
|
|
||||||
|
export const airweaveSearchTool: ToolConfig<AirweaveSearchParams, AirweaveSearchResponse> = {
|
||||||
|
id: 'airweave_search',
|
||||||
|
name: 'Airweave Search',
|
||||||
|
description:
|
||||||
|
'Search your synced data collections using Airweave. Supports semantic search with hybrid, neural, or keyword retrieval strategies. Optionally generate AI-powered answers from search results.',
|
||||||
|
version: '1.0.0',
|
||||||
|
|
||||||
|
params: {
|
||||||
|
apiKey: {
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
visibility: 'user-only',
|
||||||
|
description: 'Airweave API Key for authentication',
|
||||||
|
},
|
||||||
|
collectionId: {
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
visibility: 'user-or-llm',
|
||||||
|
description: 'The readable ID of the collection to search',
|
||||||
|
},
|
||||||
|
query: {
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
visibility: 'user-or-llm',
|
||||||
|
description: 'The search query text',
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
type: 'number',
|
||||||
|
required: false,
|
||||||
|
visibility: 'user-only',
|
||||||
|
description: 'Maximum number of results to return (default: 100)',
|
||||||
|
},
|
||||||
|
retrievalStrategy: {
|
||||||
|
type: 'string',
|
||||||
|
required: false,
|
||||||
|
visibility: 'user-or-llm',
|
||||||
|
description: 'Retrieval strategy: hybrid (default), neural, or keyword',
|
||||||
|
},
|
||||||
|
expandQuery: {
|
||||||
|
type: 'boolean',
|
||||||
|
required: false,
|
||||||
|
visibility: 'user-or-llm',
|
||||||
|
description: 'Generate query variations to improve recall',
|
||||||
|
},
|
||||||
|
rerank: {
|
||||||
|
type: 'boolean',
|
||||||
|
required: false,
|
||||||
|
visibility: 'user-or-llm',
|
||||||
|
description: 'Reorder results for improved relevance using LLM',
|
||||||
|
},
|
||||||
|
generateAnswer: {
|
||||||
|
type: 'boolean',
|
||||||
|
required: false,
|
||||||
|
visibility: 'user-or-llm',
|
||||||
|
description: 'Generate a natural-language answer to the query',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
request: {
|
||||||
|
url: (params) => `https://api.airweave.ai/collections/${params.collectionId}/search`,
|
||||||
|
method: 'POST',
|
||||||
|
headers: (params) => ({
|
||||||
|
'X-API-Key': params.apiKey,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
}),
|
||||||
|
body: (params) => {
|
||||||
|
const body: Record<string, any> = {
|
||||||
|
query: params.query,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only include optional parameters if explicitly set
|
||||||
|
if (params.limit !== undefined) body.limit = Number(params.limit)
|
||||||
|
if (params.retrievalStrategy) body.retrieval_strategy = params.retrievalStrategy
|
||||||
|
if (params.expandQuery !== undefined) body.expand_query = params.expandQuery
|
||||||
|
if (params.rerank !== undefined) body.rerank = params.rerank
|
||||||
|
if (params.generateAnswer !== undefined) body.generate_answer = params.generateAnswer
|
||||||
|
|
||||||
|
return body
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
transformResponse: async (response: Response) => {
|
||||||
|
const data = await response.json()
|
||||||
|
|
||||||
|
// Handle error responses
|
||||||
|
if (!response.ok) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: { results: [] },
|
||||||
|
error: data.detail ?? data.message ?? 'Search request failed',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
output: {
|
||||||
|
results: (data.results ?? []).map((result: any) => ({
|
||||||
|
entity_id: result.entity_id ?? result.id ?? '',
|
||||||
|
source_name: result.source_name ?? '',
|
||||||
|
md_content: result.md_content ?? null,
|
||||||
|
score: result.score ?? 0,
|
||||||
|
metadata: result.metadata ?? null,
|
||||||
|
breadcrumbs: result.breadcrumbs ?? null,
|
||||||
|
url: result.url ?? null,
|
||||||
|
})),
|
||||||
|
...(data.completion && { completion: data.completion }),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
outputs: {
|
||||||
|
results: {
|
||||||
|
type: 'array',
|
||||||
|
description: 'Search results with content, scores, and metadata from your synced data',
|
||||||
|
items: {
|
||||||
|
type: 'object',
|
||||||
|
properties: AIRWEAVE_SEARCH_RESULT_OUTPUT_PROPERTIES,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
completion: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'AI-generated answer to the query (when generateAnswer is enabled)',
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
84
apps/sim/tools/airweave/types.ts
Normal file
84
apps/sim/tools/airweave/types.ts
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
import type { OutputProperty, ToolResponse } from '@/tools/types'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Output definition for Airweave search result items.
|
||||||
|
* Based on Airweave Search API response format.
|
||||||
|
*/
|
||||||
|
export const AIRWEAVE_SEARCH_RESULT_OUTPUT_PROPERTIES = {
|
||||||
|
entity_id: { type: 'string', description: 'Unique identifier for the search result entity' },
|
||||||
|
source_name: { type: 'string', description: 'Name of the data source (e.g., "GitHub", "Slack")' },
|
||||||
|
md_content: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Markdown-formatted content of the result',
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
score: { type: 'number', description: 'Relevance score from the search' },
|
||||||
|
metadata: {
|
||||||
|
type: 'object',
|
||||||
|
description: 'Additional metadata associated with the result',
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
breadcrumbs: {
|
||||||
|
type: 'array',
|
||||||
|
description: 'Navigation path to the result within its source',
|
||||||
|
optional: true,
|
||||||
|
items: { type: 'string', description: 'Breadcrumb segment' },
|
||||||
|
},
|
||||||
|
url: { type: 'string', description: 'URL to the original content', optional: true },
|
||||||
|
} as const satisfies Record<string, OutputProperty>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Complete search result output definition.
|
||||||
|
*/
|
||||||
|
export const AIRWEAVE_SEARCH_RESULT_OUTPUT: OutputProperty = {
|
||||||
|
type: 'object',
|
||||||
|
description: 'Search result item with content and metadata',
|
||||||
|
properties: AIRWEAVE_SEARCH_RESULT_OUTPUT_PROPERTIES,
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for Airweave search requests.
|
||||||
|
*/
|
||||||
|
export interface AirweaveSearchParams {
|
||||||
|
/** Airweave API Key for authentication */
|
||||||
|
apiKey: string
|
||||||
|
/** The readable ID of the collection to search */
|
||||||
|
collectionId: string
|
||||||
|
/** The search query text */
|
||||||
|
query: string
|
||||||
|
/** Maximum number of results to return */
|
||||||
|
limit?: number
|
||||||
|
/** Retrieval strategy: hybrid, neural, or keyword */
|
||||||
|
retrievalStrategy?: 'hybrid' | 'neural' | 'keyword'
|
||||||
|
/** Generate query variations to improve recall */
|
||||||
|
expandQuery?: boolean
|
||||||
|
/** Reorder results for improved relevance using LLM */
|
||||||
|
rerank?: boolean
|
||||||
|
/** Generate a natural-language answer to the query */
|
||||||
|
generateAnswer?: boolean
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Individual search result from Airweave.
|
||||||
|
*/
|
||||||
|
export interface AirweaveSearchResult {
|
||||||
|
entity_id: string
|
||||||
|
source_name: string
|
||||||
|
md_content?: string
|
||||||
|
score: number
|
||||||
|
metadata?: Record<string, any>
|
||||||
|
breadcrumbs?: string[]
|
||||||
|
url?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Response from Airweave search API.
|
||||||
|
*/
|
||||||
|
export interface AirweaveSearchResponse extends ToolResponse {
|
||||||
|
output: {
|
||||||
|
/** Array of search results */
|
||||||
|
results: AirweaveSearchResult[]
|
||||||
|
/** AI-generated answer to the query (when generateAnswer is true) */
|
||||||
|
completion?: string
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,6 +9,14 @@ export interface GuardrailsValidateInput {
|
|||||||
topK?: string
|
topK?: string
|
||||||
model?: string
|
model?: string
|
||||||
apiKey?: string
|
apiKey?: string
|
||||||
|
azureEndpoint?: string
|
||||||
|
azureApiVersion?: string
|
||||||
|
vertexProject?: string
|
||||||
|
vertexLocation?: string
|
||||||
|
vertexCredential?: string
|
||||||
|
bedrockAccessKeyId?: string
|
||||||
|
bedrockSecretKey?: string
|
||||||
|
bedrockRegion?: string
|
||||||
piiEntityTypes?: string[]
|
piiEntityTypes?: string[]
|
||||||
piiMode?: string
|
piiMode?: string
|
||||||
piiLanguage?: string
|
piiLanguage?: string
|
||||||
@@ -166,6 +174,14 @@ export const guardrailsValidateTool: ToolConfig<GuardrailsValidateInput, Guardra
|
|||||||
topK: params.topK,
|
topK: params.topK,
|
||||||
model: params.model,
|
model: params.model,
|
||||||
apiKey: params.apiKey,
|
apiKey: params.apiKey,
|
||||||
|
azureEndpoint: params.azureEndpoint,
|
||||||
|
azureApiVersion: params.azureApiVersion,
|
||||||
|
vertexProject: params.vertexProject,
|
||||||
|
vertexLocation: params.vertexLocation,
|
||||||
|
vertexCredential: params.vertexCredential,
|
||||||
|
bedrockAccessKeyId: params.bedrockAccessKeyId,
|
||||||
|
bedrockSecretKey: params.bedrockSecretKey,
|
||||||
|
bedrockRegion: params.bedrockRegion,
|
||||||
piiEntityTypes: params.piiEntityTypes,
|
piiEntityTypes: params.piiEntityTypes,
|
||||||
piiMode: params.piiMode,
|
piiMode: params.piiMode,
|
||||||
piiLanguage: params.piiLanguage,
|
piiLanguage: params.piiLanguage,
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import { generateRequestId } from '@/lib/core/utils/request'
|
|||||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||||
import { parseMcpToolId } from '@/lib/mcp/utils'
|
import { parseMcpToolId } from '@/lib/mcp/utils'
|
||||||
import { isCustomTool, isMcpTool } from '@/executor/constants'
|
import { isCustomTool, isMcpTool } from '@/executor/constants'
|
||||||
|
import { resolveSkillContent } from '@/executor/handlers/agent/skills-resolver'
|
||||||
import type { ExecutionContext } from '@/executor/types'
|
import type { ExecutionContext } from '@/executor/types'
|
||||||
import type { ErrorInfo } from '@/tools/error-extractors'
|
import type { ErrorInfo } from '@/tools/error-extractors'
|
||||||
import { extractErrorMessage } from '@/tools/error-extractors'
|
import { extractErrorMessage } from '@/tools/error-extractors'
|
||||||
@@ -218,6 +219,31 @@ export async function executeTool(
|
|||||||
// Normalize tool ID to strip resource suffixes (e.g., workflow_executor_<uuid> -> workflow_executor)
|
// Normalize tool ID to strip resource suffixes (e.g., workflow_executor_<uuid> -> workflow_executor)
|
||||||
const normalizedToolId = normalizeToolId(toolId)
|
const normalizedToolId = normalizeToolId(toolId)
|
||||||
|
|
||||||
|
// Handle load_skill tool for agent skills progressive disclosure
|
||||||
|
if (normalizedToolId === 'load_skill') {
|
||||||
|
const skillName = params.skill_name
|
||||||
|
const workspaceId = params._context?.workspaceId
|
||||||
|
if (!skillName || !workspaceId) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: { error: 'Missing skill_name or workspace context' },
|
||||||
|
error: 'Missing skill_name or workspace context',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const content = await resolveSkillContent(skillName, workspaceId)
|
||||||
|
if (!content) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
output: { error: `Skill "${skillName}" not found` },
|
||||||
|
error: `Skill "${skillName}" not found`,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
output: { content },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If it's a custom tool, use the async version with workflowId
|
// If it's a custom tool, use the async version with workflowId
|
||||||
if (isCustomTool(normalizedToolId)) {
|
if (isCustomTool(normalizedToolId)) {
|
||||||
const workflowId = params._context?.workflowId
|
const workflowId = params._context?.workflowId
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import {
|
|||||||
airtableListRecordsTool,
|
airtableListRecordsTool,
|
||||||
airtableUpdateRecordTool,
|
airtableUpdateRecordTool,
|
||||||
} from '@/tools/airtable'
|
} from '@/tools/airtable'
|
||||||
|
import { airweaveSearchTool } from '@/tools/airweave'
|
||||||
import { apifyRunActorAsyncTool, apifyRunActorSyncTool } from '@/tools/apify'
|
import { apifyRunActorAsyncTool, apifyRunActorSyncTool } from '@/tools/apify'
|
||||||
import {
|
import {
|
||||||
apolloAccountBulkCreateTool,
|
apolloAccountBulkCreateTool,
|
||||||
@@ -1809,6 +1810,7 @@ export const tools: Record<string, ToolConfig> = {
|
|||||||
a2a_resubscribe: a2aResubscribeTool,
|
a2a_resubscribe: a2aResubscribeTool,
|
||||||
a2a_send_message: a2aSendMessageTool,
|
a2a_send_message: a2aSendMessageTool,
|
||||||
a2a_set_push_notification: a2aSetPushNotificationTool,
|
a2a_set_push_notification: a2aSetPushNotificationTool,
|
||||||
|
airweave_search: airweaveSearchTool,
|
||||||
arxiv_search: arxivSearchTool,
|
arxiv_search: arxivSearchTool,
|
||||||
arxiv_get_paper: arxivGetPaperTool,
|
arxiv_get_paper: arxivGetPaperTool,
|
||||||
arxiv_get_author_papers: arxivGetAuthorPapersTool,
|
arxiv_get_author_papers: arxivGetAuthorPapersTool,
|
||||||
|
|||||||
15
packages/db/migrations/0152_parallel_frog_thor.sql
Normal file
15
packages/db/migrations/0152_parallel_frog_thor.sql
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
CREATE TABLE "skill" (
|
||||||
|
"id" text PRIMARY KEY NOT NULL,
|
||||||
|
"workspace_id" text,
|
||||||
|
"user_id" text,
|
||||||
|
"name" text NOT NULL,
|
||||||
|
"description" text NOT NULL,
|
||||||
|
"content" text NOT NULL,
|
||||||
|
"created_at" timestamp DEFAULT now() NOT NULL,
|
||||||
|
"updated_at" timestamp DEFAULT now() NOT NULL
|
||||||
|
);
|
||||||
|
--> statement-breakpoint
|
||||||
|
ALTER TABLE "skill" ADD CONSTRAINT "skill_workspace_id_workspace_id_fk" FOREIGN KEY ("workspace_id") REFERENCES "public"."workspace"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||||
|
ALTER TABLE "skill" ADD CONSTRAINT "skill_user_id_user_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."user"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
|
||||||
|
CREATE INDEX "skill_workspace_id_idx" ON "skill" USING btree ("workspace_id");--> statement-breakpoint
|
||||||
|
CREATE UNIQUE INDEX "skill_workspace_name_unique" ON "skill" USING btree ("workspace_id","name");
|
||||||
10619
packages/db/migrations/meta/0152_snapshot.json
Normal file
10619
packages/db/migrations/meta/0152_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1058,6 +1058,13 @@
|
|||||||
"when": 1770239332381,
|
"when": 1770239332381,
|
||||||
"tag": "0151_stale_screwball",
|
"tag": "0151_stale_screwball",
|
||||||
"breakpoints": true
|
"breakpoints": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"idx": 152,
|
||||||
|
"version": "7",
|
||||||
|
"when": 1770336289511,
|
||||||
|
"tag": "0152_parallel_frog_thor",
|
||||||
|
"breakpoints": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -743,6 +743,27 @@ export const customTools = pgTable(
|
|||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
export const skill = pgTable(
|
||||||
|
'skill',
|
||||||
|
{
|
||||||
|
id: text('id').primaryKey(),
|
||||||
|
workspaceId: text('workspace_id').references(() => workspace.id, { onDelete: 'cascade' }),
|
||||||
|
userId: text('user_id').references(() => user.id, { onDelete: 'set null' }),
|
||||||
|
name: text('name').notNull(),
|
||||||
|
description: text('description').notNull(),
|
||||||
|
content: text('content').notNull(),
|
||||||
|
createdAt: timestamp('created_at').notNull().defaultNow(),
|
||||||
|
updatedAt: timestamp('updated_at').notNull().defaultNow(),
|
||||||
|
},
|
||||||
|
(table) => ({
|
||||||
|
workspaceIdIdx: index('skill_workspace_id_idx').on(table.workspaceId),
|
||||||
|
workspaceNameUnique: uniqueIndex('skill_workspace_name_unique').on(
|
||||||
|
table.workspaceId,
|
||||||
|
table.name
|
||||||
|
),
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
export const subscription = pgTable(
|
export const subscription = pgTable(
|
||||||
'subscription',
|
'subscription',
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user