fix: resolve merge conflict with dev in baseline/service.py

This commit is contained in:
Zamil Majdy
2026-04-03 20:11:42 +02:00
37 changed files with 4089 additions and 240 deletions

36
.gitleaks.toml Normal file
View File

@@ -0,0 +1,36 @@
title = "AutoGPT Gitleaks Config"
[extend]
useDefault = true
[allowlist]
description = "Global allowlist"
paths = [
# Template/example env files (no real secrets)
'''\.env\.(default|example|template)$''',
# Lock files
'''pnpm-lock\.yaml$''',
'''poetry\.lock$''',
# Secrets baseline
'''\.secrets\.baseline$''',
# Build artifacts and caches (should not be committed)
'''__pycache__/''',
'''classic/frontend/build/''',
# Docker dev setup (local dev JWTs/keys only)
'''autogpt_platform/db/docker/''',
# Load test configs (dev JWTs)
'''load-tests/configs/''',
# Test files with fake/fixture keys (_test.py, test_*.py, conftest.py)
'''(_test|test_.*|conftest)\.py$''',
# Documentation (only contains placeholder keys in curl/API examples)
'''docs/.*\.md$''',
# Firebase config (public API keys by design)
'''google-services\.json$''',
'''classic/frontend/(lib|web)/''',
]
# CI test-only encryption key (marked DO NOT USE IN PRODUCTION)
regexes = [
'''dvziYgz0KSK8FENhju0ZYi8''',
# LLM model name enum values falsely flagged as API keys
'''Llama-\d.*Instruct''',
]

View File

@@ -23,9 +23,15 @@ repos:
- id: detect-secrets
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
args: ["--baseline", ".secrets.baseline"]
files: ^autogpt_platform/
exclude: pnpm-lock\.yaml$
stages: [pre-push]
exclude: (pnpm-lock\.yaml|\.env\.(default|example|template))$
- repo: https://github.com/gitleaks/gitleaks
rev: v8.24.3
hooks:
- id: gitleaks
name: Detect secrets (gitleaks)
- repo: local
# For proper type checking, all dependencies need to be up-to-date.

467
.secrets.baseline Normal file
View File

@@ -0,0 +1,467 @@
{
"version": "1.5.0",
"plugins_used": [
{
"name": "ArtifactoryDetector"
},
{
"name": "AWSKeyDetector"
},
{
"name": "AzureStorageKeyDetector"
},
{
"name": "Base64HighEntropyString",
"limit": 4.5
},
{
"name": "BasicAuthDetector"
},
{
"name": "CloudantDetector"
},
{
"name": "DiscordBotTokenDetector"
},
{
"name": "GitHubTokenDetector"
},
{
"name": "GitLabTokenDetector"
},
{
"name": "HexHighEntropyString",
"limit": 3.0
},
{
"name": "IbmCloudIamDetector"
},
{
"name": "IbmCosHmacDetector"
},
{
"name": "IPPublicDetector"
},
{
"name": "JwtTokenDetector"
},
{
"name": "KeywordDetector",
"keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
{
"name": "NpmDetector"
},
{
"name": "OpenAIDetector"
},
{
"name": "PrivateKeyDetector"
},
{
"name": "PypiTokenDetector"
},
{
"name": "SendGridDetector"
},
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
{
"name": "SquareOAuthDetector"
},
{
"name": "StripeDetector"
},
{
"name": "TelegramBotTokenDetector"
},
{
"name": "TwilioKeyDetector"
}
],
"filters_used": [
{
"path": "detect_secrets.filters.allowlist.is_line_allowlisted"
},
{
"path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
"min_level": 2
},
{
"path": "detect_secrets.filters.heuristic.is_indirect_reference"
},
{
"path": "detect_secrets.filters.heuristic.is_likely_id_string"
},
{
"path": "detect_secrets.filters.heuristic.is_lock_file"
},
{
"path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
},
{
"path": "detect_secrets.filters.heuristic.is_potential_uuid"
},
{
"path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
},
{
"path": "detect_secrets.filters.heuristic.is_sequential_string"
},
{
"path": "detect_secrets.filters.heuristic.is_swagger_file"
},
{
"path": "detect_secrets.filters.heuristic.is_templated_secret"
},
{
"path": "detect_secrets.filters.regex.should_exclude_file",
"pattern": [
"\\.env$",
"pnpm-lock\\.yaml$",
"\\.env\\.(default|example|template)$",
"__pycache__",
"_test\\.py$",
"test_.*\\.py$",
"conftest\\.py$",
"poetry\\.lock$",
"node_modules"
]
}
],
"results": {
"autogpt_platform/backend/backend/api/external/v1/integrations.py": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/backend/backend/api/external/v1/integrations.py",
"hashed_secret": "665b1e3851eefefa3fb878654292f16597d25155",
"is_verified": false,
"line_number": 289
}
],
"autogpt_platform/backend/backend/blocks/airtable/_config.py": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/backend/backend/blocks/airtable/_config.py",
"hashed_secret": "57e168b03afb7c1ee3cdc4ee3db2fe1cc6e0df26",
"is_verified": false,
"line_number": 29
}
],
"autogpt_platform/backend/backend/blocks/dataforseo/_config.py": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/backend/backend/blocks/dataforseo/_config.py",
"hashed_secret": "32ce93887331fa5d192f2876ea15ec000c7d58b8",
"is_verified": false,
"line_number": 12
}
],
"autogpt_platform/backend/backend/blocks/github/checks.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/checks.py",
"hashed_secret": "8ac6f92737d8586790519c5d7bfb4d2eb172c238",
"is_verified": false,
"line_number": 108
}
],
"autogpt_platform/backend/backend/blocks/github/ci.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/ci.py",
"hashed_secret": "90bd1b48e958257948487b90bee080ba5ed00caa",
"is_verified": false,
"line_number": 123
}
],
"autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json",
"hashed_secret": "f96896dafced7387dcd22343b8ea29d3d2c65663",
"is_verified": false,
"line_number": 42
},
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json",
"hashed_secret": "b80a94d5e70bedf4f5f89d2f5a5255cc9492d12e",
"is_verified": false,
"line_number": 193
},
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json",
"hashed_secret": "75b17e517fe1b3136394f6bec80c4f892da75e42",
"is_verified": false,
"line_number": 344
},
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json",
"hashed_secret": "b0bfb5e4e2394e7f8906e5ed1dffd88b2bc89dd5",
"is_verified": false,
"line_number": 534
}
],
"autogpt_platform/backend/backend/blocks/github/statuses.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/github/statuses.py",
"hashed_secret": "8ac6f92737d8586790519c5d7bfb4d2eb172c238",
"is_verified": false,
"line_number": 85
}
],
"autogpt_platform/backend/backend/blocks/google/docs.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/google/docs.py",
"hashed_secret": "c95da0c6696342c867ef0c8258d2f74d20fd94d4",
"is_verified": false,
"line_number": 203
}
],
"autogpt_platform/backend/backend/blocks/google/sheets.py": [
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/google/sheets.py",
"hashed_secret": "bd5a04fa3667e693edc13239b6d310c5c7a8564b",
"is_verified": false,
"line_number": 57
}
],
"autogpt_platform/backend/backend/blocks/linear/_config.py": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/backend/backend/blocks/linear/_config.py",
"hashed_secret": "b37f020f42d6d613b6ce30103e4d408c4499b3bb",
"is_verified": false,
"line_number": 53
}
],
"autogpt_platform/backend/backend/blocks/medium.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/medium.py",
"hashed_secret": "ff998abc1ce6d8f01a675fa197368e44c8916e9c",
"is_verified": false,
"line_number": 131
}
],
"autogpt_platform/backend/backend/blocks/replicate/replicate_block.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/replicate/replicate_block.py",
"hashed_secret": "8bbdd6f26368f58ea4011d13d7f763cb662e66f0",
"is_verified": false,
"line_number": 55
}
],
"autogpt_platform/backend/backend/blocks/slant3d/webhook.py": [
{
"type": "Hex High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/slant3d/webhook.py",
"hashed_secret": "36263c76947443b2f6e6b78153967ac4a7da99f9",
"is_verified": false,
"line_number": 100
}
],
"autogpt_platform/backend/backend/blocks/talking_head.py": [
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/backend/backend/blocks/talking_head.py",
"hashed_secret": "44ce2d66222529eea4a32932823466fc0601c799",
"is_verified": false,
"line_number": 113
}
],
"autogpt_platform/backend/backend/blocks/wordpress/_config.py": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/backend/backend/blocks/wordpress/_config.py",
"hashed_secret": "e62679512436161b78e8a8d68c8829c2a1031ccb",
"is_verified": false,
"line_number": 17
}
],
"autogpt_platform/backend/backend/util/cache.py": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/backend/backend/util/cache.py",
"hashed_secret": "37f0c918c3fa47ca4a70e42037f9f123fdfbc75b",
"is_verified": false,
"line_number": 449
}
],
"autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/build/components/FlowEditor/nodes/helpers.ts",
"hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8",
"is_verified": false,
"line_number": 6
}
],
"autogpt_platform/frontend/src/app/(platform)/dictionaries/en.json": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/dictionaries/en.json",
"hashed_secret": "8be3c943b1609fffbfc51aad666d0a04adf83c9d",
"is_verified": false,
"line_number": 5
}
],
"autogpt_platform/frontend/src/app/(platform)/dictionaries/es.json": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/dictionaries/es.json",
"hashed_secret": "5a6d1c612954979ea99ee33dbb2d231b00f6ac0a",
"is_verified": false,
"line_number": 5
}
],
"autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts",
"hashed_secret": "cf678cab87dc1f7d1b95b964f15375e088461679",
"is_verified": false,
"line_number": 6
},
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/AgentInputsReadOnly/helpers.ts",
"hashed_secret": "f72cbb45464d487064610c5411c576ca4019d380",
"is_verified": false,
"line_number": 8
}
],
"autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/helpers.ts": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/helpers.ts",
"hashed_secret": "cf678cab87dc1f7d1b95b964f15375e088461679",
"is_verified": false,
"line_number": 5
},
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/components/ModalRunSection/helpers.ts",
"hashed_secret": "f72cbb45464d487064610c5411c576ca4019d380",
"is_verified": false,
"line_number": 7
}
],
"autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx",
"hashed_secret": "cf678cab87dc1f7d1b95b964f15375e088461679",
"is_verified": false,
"line_number": 192
},
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/app/(platform)/profile/(user)/integrations/page.tsx",
"hashed_secret": "86275db852204937bbdbdebe5fabe8536e030ab6",
"is_verified": false,
"line_number": 193
}
],
"autogpt_platform/frontend/src/components/contextual/CredentialsInput/helpers.ts": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/components/contextual/CredentialsInput/helpers.ts",
"hashed_secret": "47acd2028cf81b5da88ddeedb2aea4eca4b71fbd",
"is_verified": false,
"line_number": 102
},
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/components/contextual/CredentialsInput/helpers.ts",
"hashed_secret": "8be3c943b1609fffbfc51aad666d0a04adf83c9d",
"is_verified": false,
"line_number": 103
}
],
"autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts": [
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "9c486c92f1a7420e1045c7ad963fbb7ba3621025",
"is_verified": false,
"line_number": 73
},
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "9277508c7a6effc8fb59163efbfada189e35425c",
"is_verified": false,
"line_number": 75
},
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "8dc7e2cb1d0935897d541bf5facab389b8a50340",
"is_verified": false,
"line_number": 77
},
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "79a26ad48775944299be6aaf9fb1d5302c1ed75b",
"is_verified": false,
"line_number": 79
},
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "a3b62b44500a1612e48d4cab8294df81561b3b1a",
"is_verified": false,
"line_number": 81
},
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "a58979bd0b21ef4f50417d001008e60dd7a85c64",
"is_verified": false,
"line_number": 83
},
{
"type": "Base64 High Entropy String",
"filename": "autogpt_platform/frontend/src/lib/autogpt-server-api/utils.ts",
"hashed_secret": "6cb6e075f8e8c7c850f9d128d6608e5dbe209a79",
"is_verified": false,
"line_number": 85
}
],
"autogpt_platform/frontend/src/lib/constants.ts": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/lib/constants.ts",
"hashed_secret": "27b924db06a28cc755fb07c54f0fddc30659fe4d",
"is_verified": false,
"line_number": 10
}
],
"autogpt_platform/frontend/src/tests/credentials/index.ts": [
{
"type": "Secret Keyword",
"filename": "autogpt_platform/frontend/src/tests/credentials/index.ts",
"hashed_secret": "c18006fc138809314751cd1991f1e0b820fabd37",
"is_verified": false,
"line_number": 4
}
]
},
"generated_at": "2026-04-02T13:10:54Z"
}

View File

@@ -205,6 +205,19 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
KIMI_K2 = "moonshotai/kimi-k2"
QWEN3_235B_A22B_THINKING = "qwen/qwen3-235b-a22b-thinking-2507"
QWEN3_CODER = "qwen/qwen3-coder"
# Z.ai (Zhipu) models
ZAI_GLM_4_32B = "z-ai/glm-4-32b"
ZAI_GLM_4_5 = "z-ai/glm-4.5"
ZAI_GLM_4_5_AIR = "z-ai/glm-4.5-air"
ZAI_GLM_4_5_AIR_FREE = "z-ai/glm-4.5-air:free"
ZAI_GLM_4_5V = "z-ai/glm-4.5v"
ZAI_GLM_4_6 = "z-ai/glm-4.6"
ZAI_GLM_4_6V = "z-ai/glm-4.6v"
ZAI_GLM_4_7 = "z-ai/glm-4.7"
ZAI_GLM_4_7_FLASH = "z-ai/glm-4.7-flash"
ZAI_GLM_5 = "z-ai/glm-5"
ZAI_GLM_5_TURBO = "z-ai/glm-5-turbo"
ZAI_GLM_5V_TURBO = "z-ai/glm-5v-turbo"
# Llama API models
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
@@ -630,6 +643,43 @@ MODEL_METADATA = {
LlmModel.QWEN3_CODER: ModelMetadata(
"open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3
),
# https://openrouter.ai/models?q=z-ai
LlmModel.ZAI_GLM_4_32B: ModelMetadata(
"open_router", 128000, 128000, "GLM 4 32B", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_4_5: ModelMetadata(
"open_router", 131072, 98304, "GLM 4.5", "OpenRouter", "Z.ai", 2
),
LlmModel.ZAI_GLM_4_5_AIR: ModelMetadata(
"open_router", 131072, 98304, "GLM 4.5 Air", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_4_5_AIR_FREE: ModelMetadata(
"open_router", 131072, 96000, "GLM 4.5 Air (Free)", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_4_5V: ModelMetadata(
"open_router", 65536, 16384, "GLM 4.5V", "OpenRouter", "Z.ai", 2
),
LlmModel.ZAI_GLM_4_6: ModelMetadata(
"open_router", 204800, 204800, "GLM 4.6", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_4_6V: ModelMetadata(
"open_router", 131072, 131072, "GLM 4.6V", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_4_7: ModelMetadata(
"open_router", 202752, 65535, "GLM 4.7", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_4_7_FLASH: ModelMetadata(
"open_router", 202752, 202752, "GLM 4.7 Flash", "OpenRouter", "Z.ai", 1
),
LlmModel.ZAI_GLM_5: ModelMetadata(
"open_router", 80000, 80000, "GLM 5", "OpenRouter", "Z.ai", 2
),
LlmModel.ZAI_GLM_5_TURBO: ModelMetadata(
"open_router", 202752, 131072, "GLM 5 Turbo", "OpenRouter", "Z.ai", 3
),
LlmModel.ZAI_GLM_5V_TURBO: ModelMetadata(
"open_router", 202752, 131072, "GLM 5V Turbo", "OpenRouter", "Z.ai", 3
),
# Llama API models
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata(
"llama_api",

View File

@@ -23,6 +23,7 @@ from backend.copilot.model import (
ChatMessage,
ChatSession,
get_chat_session,
maybe_append_user_message,
update_session_title,
upsert_chat_session,
)
@@ -451,22 +452,12 @@ async def stream_chat_completion_baseline(
f"Session {session_id} not found. Please create a new session first."
)
# Append user message (skip if it's an exact duplicate of the last message,
# e.g. from a network retry)
new_role = "user" if is_user_message else "assistant"
if message and (
len(session.messages) == 0
or not (
session.messages[-1].role == new_role
and session.messages[-1].content == message
)
):
session.messages.append(ChatMessage(role=new_role, content=message))
if maybe_append_user_message(session, message, is_user_message):
if is_user_message:
track_user_message(
user_id=user_id,
session_id=session_id,
message_length=len(message),
message_length=len(message or ""),
)
session = await upsert_chat_session(session)

View File

@@ -23,8 +23,9 @@ from .model import (
ChatSession,
ChatSessionInfo,
ChatSessionMetadata,
invalidate_session_cache,
cache_chat_session,
)
from .model import get_chat_session as get_chat_session_cached
logger = logging.getLogger(__name__)
@@ -380,8 +381,11 @@ async def update_tool_message_content(
async def set_turn_duration(session_id: str, duration_ms: int) -> None:
"""Set durationMs on the last assistant message in a session.
Also invalidates the Redis session cache so the next GET returns
the updated duration.
Updates the Redis cache in-place instead of invalidating it.
Invalidation would delete the key, creating a window where concurrent
``get_chat_session`` calls re-populate the cache from DB — potentially
with stale data if the DB write from the previous turn hasn't propagated.
This race caused duplicate user messages on the next turn.
"""
last_msg = await PrismaChatMessage.prisma().find_first(
where={"sessionId": session_id, "role": "assistant"},
@@ -392,5 +396,13 @@ async def set_turn_duration(session_id: str, duration_ms: int) -> None:
where={"id": last_msg.id},
data={"durationMs": duration_ms},
)
# Invalidate cache so the session is re-fetched from DB with durationMs
await invalidate_session_cache(session_id)
# Update cache in-place rather than invalidating to avoid a
# race window where the empty cache gets re-populated with
# stale data by a concurrent get_chat_session call.
session = await get_chat_session_cached(session_id)
if session and session.messages:
for msg in reversed(session.messages):
if msg.role == "assistant":
msg.duration_ms = duration_ms
break
await cache_chat_session(session)

View File

@@ -0,0 +1,54 @@
import pytest
from .db import set_turn_duration
from .model import ChatMessage, ChatSession, get_chat_session, upsert_chat_session
@pytest.mark.asyncio(loop_scope="session")
async def test_set_turn_duration_updates_cache_in_place(setup_test_user, test_user_id):
"""set_turn_duration patches the cached session without invalidation.
Verifies that after calling set_turn_duration the Redis-cached session
reflects the updated durationMs on the last assistant message, without
the cache having been deleted and re-populated (which could race with
concurrent get_chat_session calls).
"""
session = ChatSession.new(user_id=test_user_id, dry_run=False)
session.messages = [
ChatMessage(role="user", content="hello"),
ChatMessage(role="assistant", content="hi there"),
]
session = await upsert_chat_session(session)
# Ensure the session is in cache
cached = await get_chat_session(session.session_id, test_user_id)
assert cached is not None
assert cached.messages[-1].duration_ms is None
# Update turn duration — should patch cache in-place
await set_turn_duration(session.session_id, 1234)
# Read from cache (not DB) — the cache should already have the update
updated = await get_chat_session(session.session_id, test_user_id)
assert updated is not None
assistant_msgs = [m for m in updated.messages if m.role == "assistant"]
assert len(assistant_msgs) == 1
assert assistant_msgs[0].duration_ms == 1234
@pytest.mark.asyncio(loop_scope="session")
async def test_set_turn_duration_no_assistant_message(setup_test_user, test_user_id):
"""set_turn_duration is a no-op when there are no assistant messages."""
session = ChatSession.new(user_id=test_user_id, dry_run=False)
session.messages = [
ChatMessage(role="user", content="hello"),
]
session = await upsert_chat_session(session)
# Should not raise
await set_turn_duration(session.session_id, 5678)
cached = await get_chat_session(session.session_id, test_user_id)
assert cached is not None
# User message should not have durationMs
assert cached.messages[0].duration_ms is None

View File

@@ -81,6 +81,49 @@ class ChatMessage(BaseModel):
)
def is_message_duplicate(
messages: list[ChatMessage],
role: str,
content: str,
) -> bool:
"""Check whether *content* is already present in the current pending turn.
Only inspects trailing messages that share the given *role* (i.e. the
current turn). This ensures legitimately repeated messages across different
turns are not suppressed, while same-turn duplicates from stale cache are
still caught.
"""
for m in reversed(messages):
if m.role == role:
if m.content == content:
return True
else:
break
return False
def maybe_append_user_message(
session: "ChatSession",
message: str | None,
is_user_message: bool,
) -> bool:
"""Append a user/assistant message to the session if not already present.
The route handler already persists the user message before enqueueing,
so we check trailing same-role messages to avoid re-appending when the
session cache is slightly stale.
Returns True if the message was appended, False if skipped.
"""
if not message:
return False
role = "user" if is_user_message else "assistant"
if is_message_duplicate(session.messages, role, message):
return False
session.messages.append(ChatMessage(role=role, content=message))
return True
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int

View File

@@ -17,6 +17,8 @@ from .model import (
ChatSession,
Usage,
get_chat_session,
is_message_duplicate,
maybe_append_user_message,
upsert_chat_session,
)
@@ -424,3 +426,151 @@ async def test_concurrent_saves_collision_detection(setup_test_user, test_user_i
assert "Streaming message 1" in contents
assert "Streaming message 2" in contents
assert "Callback result" in contents
# --------------------------------------------------------------------------- #
# is_message_duplicate #
# --------------------------------------------------------------------------- #
def test_duplicate_detected_in_trailing_same_role():
"""Duplicate user message at the tail is detected."""
msgs = [
ChatMessage(role="user", content="hello"),
ChatMessage(role="assistant", content="hi there"),
ChatMessage(role="user", content="yes"),
]
assert is_message_duplicate(msgs, "user", "yes") is True
def test_duplicate_not_detected_across_turns():
"""Same text in a previous turn (separated by assistant) is NOT a duplicate."""
msgs = [
ChatMessage(role="user", content="yes"),
ChatMessage(role="assistant", content="ok"),
]
assert is_message_duplicate(msgs, "user", "yes") is False
def test_no_duplicate_on_empty_messages():
"""Empty message list never reports a duplicate."""
assert is_message_duplicate([], "user", "hello") is False
def test_no_duplicate_when_content_differs():
"""Different content in the trailing same-role block is not a duplicate."""
msgs = [
ChatMessage(role="assistant", content="response"),
ChatMessage(role="user", content="first message"),
]
assert is_message_duplicate(msgs, "user", "second message") is False
def test_duplicate_with_multiple_trailing_same_role():
"""Detects duplicate among multiple consecutive same-role messages."""
msgs = [
ChatMessage(role="assistant", content="response"),
ChatMessage(role="user", content="msg1"),
ChatMessage(role="user", content="msg2"),
]
assert is_message_duplicate(msgs, "user", "msg1") is True
assert is_message_duplicate(msgs, "user", "msg2") is True
assert is_message_duplicate(msgs, "user", "msg3") is False
def test_duplicate_check_for_assistant_role():
"""Works correctly when checking assistant role too."""
msgs = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="hello"),
ChatMessage(role="assistant", content="how can I help?"),
]
assert is_message_duplicate(msgs, "assistant", "hello") is True
assert is_message_duplicate(msgs, "assistant", "new response") is False
def test_no_false_positive_when_content_is_none():
"""Messages with content=None in the trailing block do not match."""
msgs = [
ChatMessage(role="user", content=None),
ChatMessage(role="user", content="hello"),
]
assert is_message_duplicate(msgs, "user", "hello") is True
# None-content message should not match any string
msgs2 = [
ChatMessage(role="user", content=None),
]
assert is_message_duplicate(msgs2, "user", "hello") is False
def test_all_same_role_messages():
"""When all messages share the same role, the entire list is scanned."""
msgs = [
ChatMessage(role="user", content="first"),
ChatMessage(role="user", content="second"),
ChatMessage(role="user", content="third"),
]
assert is_message_duplicate(msgs, "user", "first") is True
assert is_message_duplicate(msgs, "user", "new") is False
# --------------------------------------------------------------------------- #
# maybe_append_user_message #
# --------------------------------------------------------------------------- #
def test_maybe_append_user_message_appends_new():
"""A new user message is appended and returns True."""
session = ChatSession.new(user_id="u", dry_run=False)
session.messages = [
ChatMessage(role="assistant", content="hello"),
]
result = maybe_append_user_message(session, "new msg", is_user_message=True)
assert result is True
assert len(session.messages) == 2
assert session.messages[-1].role == "user"
assert session.messages[-1].content == "new msg"
def test_maybe_append_user_message_skips_duplicate():
"""A duplicate user message is skipped and returns False."""
session = ChatSession.new(user_id="u", dry_run=False)
session.messages = [
ChatMessage(role="assistant", content="hello"),
ChatMessage(role="user", content="dup"),
]
result = maybe_append_user_message(session, "dup", is_user_message=True)
assert result is False
assert len(session.messages) == 2
def test_maybe_append_user_message_none_message():
"""None/empty message returns False without appending."""
session = ChatSession.new(user_id="u", dry_run=False)
assert maybe_append_user_message(session, None, is_user_message=True) is False
assert maybe_append_user_message(session, "", is_user_message=True) is False
assert len(session.messages) == 0
def test_maybe_append_assistant_message():
"""Works for assistant role when is_user_message=False."""
session = ChatSession.new(user_id="u", dry_run=False)
session.messages = [
ChatMessage(role="user", content="hi"),
]
result = maybe_append_user_message(session, "response", is_user_message=False)
assert result is True
assert session.messages[-1].role == "assistant"
assert session.messages[-1].content == "response"
def test_maybe_append_assistant_skips_duplicate():
"""Duplicate assistant message is skipped."""
session = ChatSession.new(user_id="u", dry_run=False)
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="dup"),
]
result = maybe_append_user_message(session, "dup", is_user_message=False)
assert result is False
assert len(session.messages) == 2

View File

@@ -126,6 +126,21 @@ After building the file, reference it with `@@agptfile:` in other tools:
- When spawning sub-agents for research, ensure each has a distinct
non-overlapping scope to avoid redundant searches.
### Tool Discovery Priority
When the user asks to interact with a service or API, follow this order:
1. **find_block first** — Search platform blocks with `find_block`. The platform has hundreds of built-in blocks (Google Sheets, Docs, Calendar, Gmail, Slack, GitHub, etc.) that work without extra setup.
2. **run_mcp_tool** — If no matching block exists, check if a hosted MCP server is available for the service. Only use known MCP server URLs from the registry.
3. **SendAuthenticatedWebRequestBlock** — If no block or MCP server exists, use `SendAuthenticatedWebRequestBlock` with existing host-scoped credentials. Check available credentials via `connect_integration`.
4. **Manual API call** — As a last resort, guide the user to set up credentials and use `SendAuthenticatedWebRequestBlock` with direct API calls.
**Never skip step 1.** Built-in blocks are more reliable, tested, and user-friendly than MCP or raw API calls.
### Sub-agent tasks
- When using the Task tool, NEVER set `run_in_background` to true.
All tasks must run in the foreground.

View File

@@ -53,6 +53,12 @@ Steps:
or fix manually based on the error descriptions. Iterate until valid.
8. **Save**: Call `create_agent` (new) or `edit_agent` (existing) with
the final `agent_json`
8. **Dry-run**: ALWAYS call `run_agent` with `dry_run=True` and
`wait_for_result=120` to verify the agent works end-to-end.
9. **Inspect & fix**: Check the dry-run output for errors. If issues are
found, call `edit_agent` to fix and dry-run again. Repeat until the
simulation passes or the problems are clearly unfixable.
See "REQUIRED: Dry-Run Verification Loop" section below for details.
### Agent JSON Structure
@@ -246,19 +252,51 @@ call in a loop until the task is complete:
Regular blocks work exactly like sub-agents as tools — wire each input
field from `source_name: "tools"` on the Orchestrator side.
### Testing with Dry Run
### REQUIRED: Dry-Run Verification Loop (create -> dry-run -> fix)
After saving an agent, suggest a dry run to validate wiring without consuming
real API calls, credentials, or credits:
After creating or editing an agent, you MUST dry-run it before telling the
user the agent is ready. NEVER skip this step.
1. **Run**: Call `run_agent` or `run_block` with `dry_run=True` and provide
sample inputs. This executes the graph with mock outputs, verifying that
links resolve correctly and required inputs are satisfied.
2. **Check results**: Call `view_agent_output` with `show_execution_details=True`
to inspect the full node-by-node execution trace. This shows what each node
received as input and produced as output, making it easy to spot wiring issues.
3. **Iterate**: If the dry run reveals wiring issues or missing inputs, fix
the agent JSON and re-save before suggesting a real execution.
#### Step-by-step workflow
1. **Create/Edit**: Call `create_agent` or `edit_agent` to save the agent.
2. **Dry-run**: Call `run_agent` with `dry_run=True`, `wait_for_result=120`,
and realistic sample inputs that exercise every path in the agent. This
simulates execution using an LLM for each block — no real API calls,
credentials, or credits are consumed.
3. **Inspect output**: Examine the dry-run result for problems. If
`wait_for_result` returns only a summary, call
`view_agent_output(execution_id=..., show_execution_details=True)` to
see the full node-by-node execution trace. Look for:
- **Errors / failed nodes** — a node raised an exception or returned an
error status. Common causes: wrong `source_name`/`sink_name` in links,
missing `input_default` values, or referencing a nonexistent block output.
- **Null / empty outputs** — data did not flow through a link. Verify that
`source_name` and `sink_name` match the block schemas exactly (case-
sensitive, including nested `_#_` notation).
- **Nodes that never executed** — the node was not reached. Likely a
missing or broken link from an upstream node.
- **Unexpected values** — data arrived but in the wrong type or
structure. Check type compatibility between linked ports.
4. **Fix**: If any issues are found, call `edit_agent` with the corrected
agent JSON, then go back to step 2.
5. **Repeat**: Continue the dry-run -> fix cycle until the simulation passes
or the problems are clearly unfixable. If you stop making progress,
report the remaining issues to the user and ask for guidance.
#### Good vs bad dry-run output
**Good output** (agent is ready):
- All nodes executed successfully (no errors in the execution trace)
- Data flows through every link with non-null, correctly-typed values
- The final `AgentOutputBlock` contains a meaningful result
- Status is `COMPLETED`
**Bad output** (needs fixing):
- Status is `FAILED` — check the error message for the failing node
- An output node received `null` — trace back to find the broken link
- A node received data in the wrong format (e.g. string where list expected)
- Nodes downstream of a failing node were skipped entirely
**Special block behaviour in dry-run mode:**
- **OrchestratorBlock** and **AgentExecutorBlock** execute for real so the

View File

@@ -28,13 +28,12 @@ Each result includes a `remotes` array with the exact server URL to use.
### Important: Check blocks first
Before using `run_mcp_tool`, always check if the platform already has blocks for the service
using `find_block`. The platform has hundreds of built-in blocks (Google Sheets, Google Docs,
Google Calendar, Gmail, etc.) that work without MCP setup.
Always follow the **Tool Discovery Priority** described in the tool notes:
call `find_block` before resorting to `run_mcp_tool`.
Only use `run_mcp_tool` when:
- The service is in the known hosted MCP servers list above, OR
- You searched `find_block` first and found no matching blocks
- You searched `find_block` first and found no matching blocks, AND
- The service is in the known hosted MCP servers list above or found via the registry API
**Never guess or construct MCP server URLs.** Only use URLs from the known servers list above
or from the `remotes[].url` field in MCP registry search results.

View File

@@ -63,6 +63,7 @@ from ..model import (
ChatMessage,
ChatSession,
get_chat_session,
maybe_append_user_message,
update_session_title,
upsert_chat_session,
)
@@ -130,6 +131,11 @@ _CIRCUIT_BREAKER_ERROR_MSG = (
"Try breaking your request into smaller parts."
)
# Idle timeout: abort the stream if no meaningful SDK message (only heartbeats)
# arrives for this many seconds. This catches hung tool calls (e.g. WebSearch
# hanging on a search provider that never responds).
_IDLE_TIMEOUT_SECONDS = 10 * 60 # 10 minutes
# Patterns that indicate the prompt/request exceeds the model's context limit.
# Matched case-insensitively against the full exception chain.
_PROMPT_TOO_LONG_PATTERNS: tuple[str, ...] = (
@@ -1272,6 +1278,8 @@ async def _run_stream_attempt(
await client.query(state.query_message, session_id=ctx.session_id)
state.transcript_builder.append_user(content=ctx.current_message)
_last_real_msg_time = time.monotonic()
async for sdk_msg in _iter_sdk_messages(client):
# Heartbeat sentinel — refresh lock and keep SSE alive
if sdk_msg is None:
@@ -1279,8 +1287,34 @@ async def _run_stream_attempt(
for ev in ctx.compaction.emit_start_if_ready():
yield ev
yield StreamHeartbeat()
# Idle timeout: if no real SDK message for too long, a tool
# call is likely hung (e.g. WebSearch provider not responding).
idle_seconds = time.monotonic() - _last_real_msg_time
if idle_seconds >= _IDLE_TIMEOUT_SECONDS:
logger.error(
"%s Idle timeout after %.0fs with no SDK message — "
"aborting stream (likely hung tool call)",
ctx.log_prefix,
idle_seconds,
)
stream_error_msg = (
"A tool call appears to be stuck "
"(no response for 10 minutes). "
"Please try again."
)
stream_error_code = "idle_timeout"
_append_error_marker(ctx.session, stream_error_msg, retryable=True)
yield StreamError(
errorText=stream_error_msg,
code=stream_error_code,
)
ended_with_stream_error = True
break
continue
_last_real_msg_time = time.monotonic()
logger.info(
"%s Received: %s %s (unresolved=%d, current=%d, resolved=%d)",
ctx.log_prefix,
@@ -1529,9 +1563,21 @@ async def _run_stream_attempt(
# --- Intermediate persistence ---
# Flush session messages to DB periodically so page reloads
# show progress during long-running turns.
#
# IMPORTANT: Skip the flush while tool calls are pending
# (tool_calls set on assistant but results not yet received).
# The DB save is append-only (uses start_sequence), so if we
# flush the assistant message before tool_calls are set on it
# (text and tool_use arrive as separate SDK events), the
# tool_calls update is lost — the next flush starts past it.
_msgs_since_flush += 1
now = time.monotonic()
if (
has_pending_tools = (
acc.has_appended_assistant
and acc.accumulated_tool_calls
and not acc.has_tool_results
)
if not has_pending_tools and (
_msgs_since_flush >= _FLUSH_MESSAGE_THRESHOLD
or (now - _last_flush_time) >= _FLUSH_INTERVAL_SECONDS
):
@@ -1670,19 +1716,12 @@ async def stream_chat_completion_sdk(
)
session.messages.pop()
# Append the new message to the session if it's not already there
new_message_role = "user" if is_user_message else "assistant"
if message and (
len(session.messages) == 0
or not (
session.messages[-1].role == new_message_role
and session.messages[-1].content == message
)
):
session.messages.append(ChatMessage(role=new_message_role, content=message))
if maybe_append_user_message(session, message, is_user_message):
if is_user_message:
track_user_message(
user_id=user_id, session_id=session_id, message_length=len(message)
user_id=user_id,
session_id=session_id,
message_length=len(message or ""),
)
# Structured log prefix: [SDK][<session>][T<turn>]

View File

@@ -42,7 +42,10 @@ class GetAgentBuildingGuideTool(BaseTool):
@property
def description(self) -> str:
return "Get the agent JSON building guide (nodes, links, AgentExecutorBlock, MCPToolBlock usage). Call before generating agent JSON."
return (
"Get the agent JSON building guide (nodes, links, AgentExecutorBlock, MCPToolBlock usage, "
"and the create->dry-run->fix iterative workflow). Call before generating agent JSON."
)
@property
def parameters(self) -> dict[str, Any]:

View File

@@ -0,0 +1,15 @@
"""Tests for GetAgentBuildingGuideTool."""
from backend.copilot.tools.get_agent_building_guide import _load_guide
def test_load_guide_returns_string():
guide = _load_guide()
assert isinstance(guide, str)
assert len(guide) > 100
def test_load_guide_caches():
guide1 = _load_guide()
guide2 = _load_guide()
assert guide1 is guide2

View File

@@ -48,27 +48,41 @@ logger = logging.getLogger(__name__)
def get_inputs_from_schema(
input_schema: dict[str, Any],
exclude_fields: set[str] | None = None,
input_data: dict[str, Any] | None = None,
) -> list[dict[str, Any]]:
"""Extract input field info from JSON schema."""
"""Extract input field info from JSON schema.
When *input_data* is provided, each field's ``value`` key is populated
with the value the CoPilot already supplied — so the frontend can
prefill the form instead of showing empty inputs. Fields marked
``advanced`` in the schema are flagged so the frontend can hide them
by default (matching the builder behaviour).
"""
if not isinstance(input_schema, dict):
return []
exclude = exclude_fields or set()
properties = input_schema.get("properties", {})
required = set(input_schema.get("required", []))
provided = input_data or {}
return [
{
results: list[dict[str, Any]] = []
for name, schema in properties.items():
if name in exclude:
continue
entry: dict[str, Any] = {
"name": name,
"title": schema.get("title", name),
"type": schema.get("type", "string"),
"description": schema.get("description", ""),
"required": name in required,
"default": schema.get("default"),
"advanced": schema.get("advanced", False),
}
for name, schema in properties.items()
if name not in exclude
]
if name in provided:
entry["value"] = provided[name]
results.append(entry)
return results
async def execute_block(
@@ -446,7 +460,9 @@ async def prepare_block_for_execution(
requirements={
"credentials": missing_creds_list,
"inputs": get_inputs_from_schema(
input_schema, exclude_fields=credentials_fields
input_schema,
exclude_fields=credentials_fields,
input_data=input_data,
),
"execution_modes": ["immediate"],
},

View File

@@ -153,7 +153,11 @@ class RunAgentTool(BaseTool):
},
"dry_run": {
"type": "boolean",
"description": "Execute in preview mode.",
"description": (
"When true, simulates execution using an LLM for each block "
"— no real API calls, credentials, or credits. "
"See agent_generation_guide for the full workflow."
),
},
},
"required": ["dry_run"],

View File

@@ -147,6 +147,19 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.KIMI_K2: 1,
LlmModel.QWEN3_235B_A22B_THINKING: 1,
LlmModel.QWEN3_CODER: 9,
# Z.ai (Zhipu) models
LlmModel.ZAI_GLM_4_32B: 1,
LlmModel.ZAI_GLM_4_5: 2,
LlmModel.ZAI_GLM_4_5_AIR: 1,
LlmModel.ZAI_GLM_4_5_AIR_FREE: 1,
LlmModel.ZAI_GLM_4_5V: 2,
LlmModel.ZAI_GLM_4_6: 1,
LlmModel.ZAI_GLM_4_6V: 1,
LlmModel.ZAI_GLM_4_7: 1,
LlmModel.ZAI_GLM_4_7_FLASH: 1,
LlmModel.ZAI_GLM_5: 2,
LlmModel.ZAI_GLM_5_TURBO: 4,
LlmModel.ZAI_GLM_5V_TURBO: 4,
# v0 by Vercel models
LlmModel.V0_1_5_MD: 1,
LlmModel.V0_1_5_LG: 2,

View File

@@ -0,0 +1,394 @@
"""Prompt regression tests AND functional tests for the dry-run verification loop.
NOTE: This file lives in test/copilot/ rather than being colocated with a
single source module because it is a cross-cutting test spanning multiple
modules: prompting.py, service.py, agent_generation_guide.md, and run_agent.py.
These tests verify that the create -> dry-run -> fix iterative workflow is
properly communicated through tool descriptions, the prompting supplement,
and the agent building guide.
After deduplication, the full dry-run workflow lives in the
agent_generation_guide.md only. The system prompt and individual tool
descriptions no longer repeat it — they keep a minimal footprint.
**Intentionally brittle**: the assertions check for specific substrings so
that accidental removal or rewording of key instructions is caught. If you
deliberately reword a prompt, update the corresponding assertion here.
--- Functional tests (added separately) ---
The dry-run loop is primarily a *prompt/guide* feature — the copilot reads
the guide and follows its instructions. There are no standalone Python
functions that implement "loop until passing" logic; the loop is driven by
the LLM. However, several pieces of real Python infrastructure make the
loop possible:
1. The ``run_agent`` and ``run_block`` OpenAI tool schemas expose a
``dry_run`` boolean parameter that the LLM must be able to set.
2. The ``RunAgentInput`` Pydantic model validates ``dry_run`` as a required
bool, so the executor can branch on it.
3. The ``_check_prerequisites`` method in ``RunAgentTool`` bypasses
credential and missing-input gates when ``dry_run=True``.
4. The guide documents the workflow steps in a specific order that the LLM
must follow: create/edit -> dry-run -> inspect -> fix -> repeat.
The functional test classes below exercise items 1-4 directly.
"""
import re
from pathlib import Path
from typing import Any, cast
import pytest
from openai.types.chat import ChatCompletionToolParam
from pydantic import ValidationError
from backend.copilot.prompting import get_sdk_supplement
from backend.copilot.service import DEFAULT_SYSTEM_PROMPT
from backend.copilot.tools import TOOL_REGISTRY
from backend.copilot.tools.run_agent import RunAgentInput
# Resolved once for the whole module so individual tests stay fast.
_SDK_SUPPLEMENT = get_sdk_supplement(use_e2b=False, cwd="/tmp/test")
# ---------------------------------------------------------------------------
# Prompt regression tests (original)
# ---------------------------------------------------------------------------
class TestSystemPromptBasics:
"""Verify the system prompt includes essential baseline content.
After deduplication, the dry-run workflow lives only in the guide.
The system prompt carries tone and personality only.
"""
def test_mentions_automations(self):
assert "automations" in DEFAULT_SYSTEM_PROMPT.lower()
def test_mentions_action_oriented(self):
assert "action-oriented" in DEFAULT_SYSTEM_PROMPT.lower()
class TestToolDescriptionsDryRunLoop:
"""Verify tool descriptions and parameters related to the dry-run loop."""
def test_get_agent_building_guide_mentions_workflow(self):
desc = TOOL_REGISTRY["get_agent_building_guide"].description
assert "dry-run" in desc.lower()
def test_run_agent_dry_run_param_exists_and_is_boolean(self):
schema = TOOL_REGISTRY["run_agent"].as_openai_tool()
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
assert "dry_run" in params["properties"]
assert params["properties"]["dry_run"]["type"] == "boolean"
def test_run_agent_dry_run_param_mentions_simulation(self):
"""After deduplication the dry_run param description mentions simulation."""
schema = TOOL_REGISTRY["run_agent"].as_openai_tool()
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
dry_run_desc = params["properties"]["dry_run"]["description"]
assert "simulat" in dry_run_desc.lower()
class TestPromptingSupplementContent:
"""Verify the prompting supplement (via get_sdk_supplement) includes
essential shared tool notes. After deduplication, the dry-run workflow
lives only in the guide; the supplement carries storage, file-handling,
and tool-discovery notes.
"""
def test_includes_tool_discovery_priority(self):
assert "Tool Discovery Priority" in _SDK_SUPPLEMENT
def test_includes_find_block_first(self):
assert "find_block first" in _SDK_SUPPLEMENT or "find_block" in _SDK_SUPPLEMENT
def test_includes_send_authenticated_web_request(self):
assert "SendAuthenticatedWebRequestBlock" in _SDK_SUPPLEMENT
class TestAgentBuildingGuideDryRunLoop:
"""Verify the agent building guide includes the dry-run loop."""
@pytest.fixture
def guide_content(self):
guide_path = (
Path(__file__).resolve().parent.parent.parent
/ "backend"
/ "copilot"
/ "sdk"
/ "agent_generation_guide.md"
)
return guide_path.read_text(encoding="utf-8")
def test_has_dry_run_verification_section(self, guide_content):
assert "REQUIRED: Dry-Run Verification Loop" in guide_content
def test_workflow_includes_dry_run_step(self, guide_content):
assert "dry_run=True" in guide_content
def test_mentions_good_vs_bad_output(self, guide_content):
assert "**Good output**" in guide_content
assert "**Bad output**" in guide_content
def test_mentions_repeat_until_pass(self, guide_content):
lower = guide_content.lower()
assert "repeat" in lower
assert "clearly unfixable" in lower
def test_mentions_wait_for_result(self, guide_content):
assert "wait_for_result=120" in guide_content
def test_mentions_view_agent_output(self, guide_content):
assert "view_agent_output" in guide_content
def test_workflow_has_dry_run_and_inspect_steps(self, guide_content):
assert "**Dry-run**" in guide_content
assert "**Inspect & fix**" in guide_content
# ---------------------------------------------------------------------------
# Functional tests: tool schema validation
# ---------------------------------------------------------------------------
class TestRunAgentToolSchema:
"""Validate the run_agent OpenAI tool schema exposes dry_run correctly.
These go beyond substring checks — they verify the full schema structure
that the LLM receives, ensuring the parameter is well-formed and will be
parsed correctly by OpenAI function-calling.
"""
@pytest.fixture
def schema(self) -> ChatCompletionToolParam:
return TOOL_REGISTRY["run_agent"].as_openai_tool()
def test_schema_is_valid_openai_tool(self, schema: ChatCompletionToolParam):
"""The schema has the required top-level OpenAI structure."""
assert schema["type"] == "function"
assert "function" in schema
func = schema["function"]
assert "name" in func
assert "description" in func
assert "parameters" in func
assert func["name"] == "run_agent"
def test_dry_run_is_required(self, schema: ChatCompletionToolParam):
"""dry_run must be in 'required' so the LLM always provides it explicitly."""
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
required = params.get("required", [])
assert "dry_run" in required
def test_dry_run_is_boolean_type(self, schema: ChatCompletionToolParam):
"""dry_run must be typed as boolean so the LLM generates true/false."""
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
assert params["properties"]["dry_run"]["type"] == "boolean"
def test_dry_run_description_is_nonempty(self, schema: ChatCompletionToolParam):
"""The description must be present and substantive for LLM guidance."""
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
desc = params["properties"]["dry_run"]["description"]
assert isinstance(desc, str)
assert len(desc) > 10, "Description too short to guide the LLM"
def test_wait_for_result_coexists_with_dry_run(
self, schema: ChatCompletionToolParam
):
"""wait_for_result must also be present — the guide instructs the LLM
to pass both dry_run=True and wait_for_result=120 together."""
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
assert "wait_for_result" in params["properties"]
assert params["properties"]["wait_for_result"]["type"] == "integer"
class TestRunBlockToolSchema:
"""Validate the run_block OpenAI tool schema exposes dry_run correctly."""
@pytest.fixture
def schema(self) -> ChatCompletionToolParam:
return TOOL_REGISTRY["run_block"].as_openai_tool()
def test_schema_is_valid_openai_tool(self, schema: ChatCompletionToolParam):
assert schema["type"] == "function"
func = schema["function"]
assert func["name"] == "run_block"
assert "parameters" in func
def test_dry_run_exists_and_is_boolean(self, schema: ChatCompletionToolParam):
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
props = params["properties"]
assert "dry_run" in props
assert props["dry_run"]["type"] == "boolean"
def test_dry_run_is_required(self, schema: ChatCompletionToolParam):
"""dry_run must be required — along with block_id and input_data."""
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
required = params.get("required", [])
assert "dry_run" in required
assert "block_id" in required
assert "input_data" in required
def test_dry_run_description_mentions_preview(
self, schema: ChatCompletionToolParam
):
params = cast(dict[str, Any], schema["function"].get("parameters", {}))
desc = params["properties"]["dry_run"]["description"]
assert isinstance(desc, str)
assert (
"preview mode" in desc.lower()
), "run_block dry_run description should mention preview mode"
# ---------------------------------------------------------------------------
# Functional tests: RunAgentInput Pydantic model
# ---------------------------------------------------------------------------
class TestRunAgentInputModel:
"""Validate RunAgentInput Pydantic model handles dry_run correctly.
The executor reads dry_run from this model, so it must parse, default,
and validate properly.
"""
def test_dry_run_accepts_true(self):
model = RunAgentInput(username_agent_slug="user/agent", dry_run=True)
assert model.dry_run is True
def test_dry_run_accepts_false(self):
"""dry_run=False must be accepted when provided explicitly."""
model = RunAgentInput(username_agent_slug="user/agent", dry_run=False)
assert model.dry_run is False
def test_dry_run_coerces_truthy_int(self):
"""Pydantic bool fields coerce int 1 to True."""
model = RunAgentInput(username_agent_slug="user/agent", dry_run=1) # type: ignore[arg-type]
assert model.dry_run is True
def test_dry_run_coerces_falsy_int(self):
"""Pydantic bool fields coerce int 0 to False."""
model = RunAgentInput(username_agent_slug="user/agent", dry_run=0) # type: ignore[arg-type]
assert model.dry_run is False
def test_dry_run_with_wait_for_result(self):
"""The guide instructs passing both dry_run=True and wait_for_result=120.
The model must accept this combination."""
model = RunAgentInput(
username_agent_slug="user/agent",
dry_run=True,
wait_for_result=120,
)
assert model.dry_run is True
assert model.wait_for_result == 120
def test_wait_for_result_upper_bound(self):
"""wait_for_result is bounded at 300 seconds (ge=0, le=300)."""
with pytest.raises(ValidationError):
RunAgentInput(
username_agent_slug="user/agent",
dry_run=True,
wait_for_result=301,
)
def test_string_fields_are_stripped(self):
"""The strip_strings validator should strip whitespace from string fields."""
model = RunAgentInput(username_agent_slug=" user/agent ", dry_run=True)
assert model.username_agent_slug == "user/agent"
# ---------------------------------------------------------------------------
# Functional tests: guide documents the correct workflow ordering
# ---------------------------------------------------------------------------
class TestGuideWorkflowOrdering:
"""Verify the guide documents workflow steps in the correct order.
The LLM must see: create/edit -> dry-run -> inspect -> fix -> repeat.
If these steps are reordered, the copilot would follow the wrong sequence.
These tests verify *ordering*, not just presence.
"""
@pytest.fixture
def guide_content(self) -> str:
guide_path = (
Path(__file__).resolve().parent.parent.parent
/ "backend"
/ "copilot"
/ "sdk"
/ "agent_generation_guide.md"
)
return guide_path.read_text(encoding="utf-8")
def test_create_before_dry_run_in_workflow(self, guide_content: str):
"""Step 7 (Save/create_agent) must appear before step 8 (Dry-run)."""
create_pos = guide_content.index("create_agent")
dry_run_pos = guide_content.index("dry_run=True")
assert (
create_pos < dry_run_pos
), "create_agent must appear before dry_run=True in the workflow"
def test_dry_run_before_inspect_in_verification_section(self, guide_content: str):
"""In the verification loop section, Dry-run step must come before
Inspect & fix step."""
section_start = guide_content.index("REQUIRED: Dry-Run Verification Loop")
section = guide_content[section_start:]
dry_run_pos = section.index("**Dry-run**")
inspect_pos = section.index("**Inspect")
assert (
dry_run_pos < inspect_pos
), "Dry-run step must come before Inspect & fix in the verification loop"
def test_fix_before_repeat_in_verification_section(self, guide_content: str):
"""The Fix step must come before the Repeat step."""
section_start = guide_content.index("REQUIRED: Dry-Run Verification Loop")
section = guide_content[section_start:]
fix_pos = section.index("**Fix**")
repeat_pos = section.index("**Repeat**")
assert fix_pos < repeat_pos
def test_good_output_before_bad_output(self, guide_content: str):
"""Good output examples should be listed before bad output examples,
so the LLM sees the success pattern first."""
good_pos = guide_content.index("**Good output**")
bad_pos = guide_content.index("**Bad output**")
assert good_pos < bad_pos
def test_numbered_steps_in_verification_section(self, guide_content: str):
"""The step-by-step workflow should have numbered steps 1-5."""
section_start = guide_content.index("Step-by-step workflow")
section = guide_content[section_start:]
# The section should contain numbered items 1 through 5
for step_num in range(1, 6):
assert (
f"{step_num}. " in section
), f"Missing numbered step {step_num} in verification workflow"
def test_workflow_steps_are_in_numbered_order(self, guide_content: str):
"""The main workflow steps (1-9) must appear in ascending order."""
# Extract the numbered workflow items from the top-level workflow section
workflow_start = guide_content.index("### Workflow for Creating/Editing Agents")
# End at the next ### section
next_section = guide_content.index("### Agent JSON Structure")
workflow_section = guide_content[workflow_start:next_section]
step_positions = []
for step_num in range(1, 10):
pattern = rf"^{step_num}\.\s"
match = re.search(pattern, workflow_section, re.MULTILINE)
if match:
step_positions.append((step_num, match.start()))
# Verify at least steps 1-9 are present and in order
assert (
len(step_positions) >= 9
), f"Expected 9 workflow steps, found {len(step_positions)}"
for i in range(1, len(step_positions)):
prev_num, prev_pos = step_positions[i - 1]
curr_num, curr_pos = step_positions[i]
assert prev_pos < curr_pos, (
f"Step {prev_num} (pos {prev_pos}) should appear before "
f"step {curr_num} (pos {curr_pos})"
)

View File

@@ -98,6 +98,7 @@ services:
- CLAMD_CONF_MaxScanSize=100M
- CLAMD_CONF_MaxThreads=12
- CLAMD_CONF_ReadTimeout=300
- CLAMD_CONF_TCPAddr=0.0.0.0
healthcheck:
test: ["CMD-SHELL", "clamdscan --version || exit 1"]
interval: 30s

View File

@@ -2,7 +2,6 @@ import { MessageResponse } from "@/components/ai-elements/message";
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
import { ExclamationMarkIcon } from "@phosphor-icons/react";
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
import { useState } from "react";
import { AskQuestionTool } from "../../../tools/AskQuestion/AskQuestion";
import { ConnectIntegrationTool } from "../../../tools/ConnectIntegrationTool/ConnectIntegrationTool";
import { CreateAgentTool } from "../../../tools/CreateAgent/CreateAgent";
@@ -29,12 +28,10 @@ import { parseSpecialMarkers, resolveWorkspaceUrls } from "../helpers";
*/
function WorkspaceMediaImage(props: React.JSX.IntrinsicElements["img"]) {
const { src, alt, ...rest } = props;
const [imgFailed, setImgFailed] = useState(false);
const isWorkspace = src?.includes("/workspace/files/") ?? false;
if (!src) return null;
if (alt?.startsWith("video:") || (imgFailed && isWorkspace)) {
if (alt?.startsWith("video:")) {
return (
<span className="my-2 inline-block">
<video
@@ -56,9 +53,6 @@ function WorkspaceMediaImage(props: React.JSX.IntrinsicElements["img"]) {
alt={alt || "Image"}
className="h-auto max-w-full rounded-md border border-zinc-200"
loading="lazy"
onError={() => {
if (isWorkspace) setImgFailed(true);
}}
{...rest}
/>
);

View File

@@ -334,4 +334,57 @@ describe("getAnimationText", () => {
});
expect(getAnimationText(part, "agent")).toBe("Agent still running\u2026");
});
it("shows agent completed with summary for sync agent", () => {
const part = makePart({
type: `tool-${TOOL_AGENT}`,
state: "output-available",
input: { description: "analyze code" },
output: { status: "completed" },
});
expect(getAnimationText(part, "agent")).toBe(
"Agent completed: analyze code",
);
});
it("shows agent completed without summary", () => {
const part = makePart({
type: `tool-${TOOL_AGENT}`,
state: "output-available",
output: {},
});
expect(getAnimationText(part, "agent")).toBe("Agent completed");
});
it("shows error text for web search failure", () => {
const part = makePart({
type: "tool-WebSearch",
state: "output-error",
});
expect(getAnimationText(part, "web")).toBe("Search failed");
});
it("shows error text for web fetch failure", () => {
const part = makePart({
type: "tool-web_fetch",
state: "output-error",
});
expect(getAnimationText(part, "web")).toBe("Fetch failed");
});
it("shows error text for browser failure", () => {
const part = makePart({
type: "tool-browser_navigate",
state: "output-error",
});
expect(getAnimationText(part, "browser")).toBe("Browser action failed");
});
it("shows fallback text for unknown state", () => {
const part = makePart({
type: "tool-custom_tool",
state: "unknown-state" as any,
});
expect(getAnimationText(part, "other")).toBe("Running Custom tool\u2026");
});
});

View File

@@ -6,25 +6,26 @@ import { Text } from "@/components/atoms/Text/Text";
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
import type { CredentialsMetaInput } from "@/lib/autogpt-server-api/types";
import { useState } from "react";
import { useEffect, useMemo, useState } from "react";
import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions";
import { ContentMessage } from "../../../../components/ToolAccordion/AccordionContent";
import {
buildExpectedInputsSchema,
buildRunMessage,
buildSiblingInputsFromCredentials,
checkAllCredentialsComplete,
checkAllInputsComplete,
checkCanRun,
coerceCredentialFields,
coerceExpectedInputs,
extractInitialValues,
mergeInputValues,
} from "./helpers";
interface Props {
output: SetupRequirementsResponse;
/** Override the message sent to the chat when the user clicks Proceed after connecting credentials.
* Defaults to "Please re-run this step now." */
retryInstruction?: string;
/** Override the label shown above the credentials section.
* Defaults to "Credentials". */
credentialsLabel?: string;
/** Called after Proceed is clicked so the parent can persist the dismissed state
* across remounts (avoids re-enabling the Proceed button on remount). */
onComplete?: () => void;
}
@@ -39,8 +40,8 @@ export function SetupRequirementsCard({
const [inputCredentials, setInputCredentials] = useState<
Record<string, CredentialsMetaInput | undefined>
>({});
const [inputValues, setInputValues] = useState<Record<string, unknown>>({});
const [hasSent, setHasSent] = useState(false);
const [showAdvanced, setShowAdvanced] = useState(false);
const { credentialFields, requiredCredentials } = coerceCredentialFields(
output.setup_info.user_readiness?.missing_credentials,
@@ -50,57 +51,69 @@ export function SetupRequirementsCard({
(output.setup_info.requirements as Record<string, unknown>)?.inputs,
);
const inputSchema = buildExpectedInputsSchema(expectedInputs);
const initialValues = useMemo(
() => extractInitialValues(expectedInputs),
// eslint-disable-next-line react-hooks/exhaustive-deps -- stabilise on the raw prop
[output.setup_info.requirements],
);
const [inputValues, setInputValues] =
useState<Record<string, unknown>>(initialValues);
const initialValuesKey = JSON.stringify(initialValues);
useEffect(() => {
setInputValues((prev) => mergeInputValues(initialValues, prev));
// eslint-disable-next-line react-hooks/exhaustive-deps -- sync when serialised values change
}, [initialValuesKey]);
const hasAdvancedFields = expectedInputs.some((i) => i.advanced);
const inputSchema = buildExpectedInputsSchema(expectedInputs, showAdvanced);
// Build siblingInputs for credential modal host prefill.
// Prefer discriminator_values from the credential response, but also
// include values from input_data (e.g. url field) so the host pattern
// can be extracted even when discriminator_values is empty.
const siblingInputs = useMemo(() => {
const fromCreds = buildSiblingInputsFromCredentials(
output.setup_info.user_readiness?.missing_credentials,
);
return { ...inputValues, ...fromCreds };
}, [output.setup_info.user_readiness?.missing_credentials, inputValues]);
function handleCredentialChange(key: string, value?: CredentialsMetaInput) {
setInputCredentials((prev) => ({ ...prev, [key]: value }));
}
const needsCredentials = credentialFields.length > 0;
const isAllCredentialsComplete =
needsCredentials &&
[...requiredCredentials].every((key) => !!inputCredentials[key]);
const isAllCredsComplete = checkAllCredentialsComplete(
requiredCredentials,
inputCredentials,
);
const needsInputs = inputSchema !== null;
const requiredInputNames = expectedInputs
.filter((i) => i.required)
.map((i) => i.name);
const isAllInputsComplete =
needsInputs &&
requiredInputNames.every((name) => {
const v = inputValues[name];
return v !== undefined && v !== null && v !== "";
});
const needsInputs = expectedInputs.length > 0;
const isAllInputsDone = checkAllInputsComplete(expectedInputs, inputValues);
if (hasSent) {
return <ContentMessage>Connected. Continuing</ContentMessage>;
}
const canRun =
(!needsCredentials || isAllCredentialsComplete) &&
(!needsInputs || isAllInputsComplete);
const canRun = checkCanRun(
needsCredentials,
isAllCredsComplete,
isAllInputsDone,
);
function handleRun() {
setHasSent(true);
onComplete?.();
const parts: string[] = [];
if (needsCredentials) {
parts.push("I've configured the required credentials.");
}
if (needsInputs) {
const nonEmpty = Object.fromEntries(
Object.entries(inputValues).filter(
([, v]) => v !== undefined && v !== null && v !== "",
),
);
parts.push(`Run with these inputs: ${JSON.stringify(nonEmpty, null, 2)}`);
} else {
parts.push(retryInstruction ?? "Please re-run this step now.");
}
onSend(parts.join(" "));
onSend(
buildRunMessage(
needsCredentials,
needsInputs,
inputValues,
retryInstruction,
),
);
setInputValues({});
}
@@ -118,31 +131,44 @@ export function SetupRequirementsCard({
credentialFields={credentialFields}
requiredCredentials={requiredCredentials}
inputCredentials={inputCredentials}
inputValues={{}}
inputValues={siblingInputs}
onCredentialChange={handleCredentialChange}
/>
</div>
</div>
)}
{inputSchema && (
{(inputSchema || hasAdvancedFields) && (
<div className="rounded-2xl border bg-background p-3 pt-4">
<Text variant="small" className="w-fit border-b text-zinc-500">
Inputs
</Text>
<FormRenderer
jsonSchema={inputSchema}
className="mb-3 mt-3"
handleChange={(v) => setInputValues(v.formData ?? {})}
uiSchema={{
"ui:submitButtonOptions": { norender: true },
}}
initialValues={inputValues}
formContext={{
showHandles: false,
size: "small",
}}
/>
{inputSchema && (
<FormRenderer
jsonSchema={inputSchema}
className="mb-3 mt-3"
handleChange={(v) =>
setInputValues((prev) => ({ ...prev, ...(v.formData ?? {}) }))
}
uiSchema={{
"ui:submitButtonOptions": { norender: true },
}}
initialValues={inputValues}
formContext={{
showHandles: false,
size: "small",
}}
/>
)}
{hasAdvancedFields && (
<button
type="button"
className="text-xs text-muted-foreground underline"
onClick={() => setShowAdvanced((v) => !v)}
>
{showAdvanced ? "Hide advanced fields" : "Show advanced fields"}
</button>
)}
</div>
)}

View File

@@ -0,0 +1,247 @@
import { render, screen, fireEvent, cleanup } from "@testing-library/react";
import { afterEach, describe, expect, it, vi } from "vitest";
import { SetupRequirementsCard } from "../SetupRequirementsCard";
import type { SetupRequirementsResponse } from "@/app/api/__generated__/models/setupRequirementsResponse";
const mockOnSend = vi.fn();
vi.mock(
"../../../../../components/CopilotChatActionsProvider/useCopilotChatActions",
() => ({
useCopilotChatActions: () => ({ onSend: mockOnSend }),
}),
);
vi.mock(
"@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView",
() => ({
CredentialsGroupedView: () => (
<div data-testid="credentials-grouped-view">Credentials</div>
),
}),
);
vi.mock("@/components/renderers/InputRenderer/FormRenderer", () => ({
FormRenderer: ({
handleChange,
}: {
handleChange: (e: { formData?: Record<string, unknown> }) => void;
}) => (
<div data-testid="form-renderer">
<button
data-testid="form-change"
onClick={() => handleChange({ formData: { url: "https://test.com" } })}
>
Fill
</button>
</div>
),
}));
afterEach(() => {
cleanup();
mockOnSend.mockReset();
});
function makeOutput(
overrides: {
message?: string;
missingCredentials?: Record<string, unknown>;
inputs?: unknown[];
} = {},
): SetupRequirementsResponse {
const {
message = "Please configure credentials",
missingCredentials,
inputs,
} = overrides;
return {
type: "setup_requirements",
message,
session_id: "sess-1",
setup_info: {
agent_id: "agent-1",
agent_name: "Test Agent",
user_readiness: {
has_all_credentials: !missingCredentials,
missing_credentials: missingCredentials ?? {},
ready_to_run: !missingCredentials && !inputs,
},
requirements: {
credentials: [],
inputs: inputs ?? [],
execution_modes: ["immediate"],
},
},
graph_id: null,
graph_version: null,
} as SetupRequirementsResponse;
}
describe("SetupRequirementsCard", () => {
it("renders the setup message", () => {
render(<SetupRequirementsCard output={makeOutput()} />);
expect(screen.getByText("Please configure credentials")).toBeDefined();
});
it("renders credential section when missing credentials are provided", () => {
render(
<SetupRequirementsCard
output={makeOutput({
missingCredentials: {
api_key: {
provider: "openai",
types: ["api_key"],
},
},
})}
/>,
);
expect(screen.getByTestId("credentials-grouped-view")).toBeDefined();
});
it("uses custom credentials label when provided", () => {
render(
<SetupRequirementsCard
output={makeOutput({
missingCredentials: {
api_key: { provider: "openai", types: ["api_key"] },
},
})}
credentialsLabel="API Keys"
/>,
);
expect(screen.getByText("API Keys")).toBeDefined();
});
it("renders input form when inputs are provided", () => {
render(
<SetupRequirementsCard
output={makeOutput({
inputs: [
{ name: "url", title: "URL", type: "string", required: true },
],
})}
/>,
);
expect(screen.getByTestId("form-renderer")).toBeDefined();
expect(screen.getByText("Inputs")).toBeDefined();
});
it("renders Proceed button that is enabled when inputs are filled", () => {
render(
<SetupRequirementsCard
output={makeOutput({
inputs: [
{
name: "url",
title: "URL",
type: "string",
required: true,
value: "https://prefilled.com",
},
],
})}
/>,
);
const proceed = screen.getByText("Proceed");
expect(proceed.closest("button")?.disabled).toBe(false);
});
it("calls onSend and shows Connected message when Proceed is clicked", () => {
render(
<SetupRequirementsCard
output={makeOutput({
inputs: [
{
name: "url",
title: "URL",
type: "string",
required: true,
value: "https://prefilled.com",
},
],
})}
/>,
);
fireEvent.click(screen.getByText("Proceed"));
expect(mockOnSend).toHaveBeenCalledOnce();
expect(screen.getByText(/Connected. Continuing/)).toBeDefined();
});
it("calls onComplete callback when Proceed is clicked", () => {
const onComplete = vi.fn();
render(
<SetupRequirementsCard
output={makeOutput({
inputs: [
{
name: "url",
title: "URL",
type: "string",
required: true,
value: "https://prefilled.com",
},
],
})}
onComplete={onComplete}
/>,
);
fireEvent.click(screen.getByText("Proceed"));
expect(onComplete).toHaveBeenCalledOnce();
});
it("renders advanced toggle when advanced inputs exist", () => {
render(
<SetupRequirementsCard
output={makeOutput({
inputs: [
{
name: "debug",
title: "Debug Mode",
type: "boolean",
advanced: true,
},
],
})}
/>,
);
expect(screen.getByText("Show advanced fields")).toBeDefined();
});
it("toggles advanced fields visibility", () => {
render(
<SetupRequirementsCard
output={makeOutput({
inputs: [
{ name: "url", title: "URL", type: "string", required: false },
{ name: "debug", title: "Debug", type: "boolean", advanced: true },
],
})}
/>,
);
const toggle = screen.getByText("Show advanced fields");
fireEvent.click(toggle);
expect(screen.getByText("Hide advanced fields")).toBeDefined();
});
it("includes retryInstruction in onSend message when no inputs needed", () => {
render(
<SetupRequirementsCard
output={makeOutput({
missingCredentials: {
api_key: { provider: "openai", types: ["api_key"] },
},
})}
retryInstruction="Retry the agent now"
/>,
);
// With credentials required but no auto-filling mechanism in the mock,
// Proceed is disabled, but we're testing render only here
expect(screen.getByText("Proceed")).toBeDefined();
});
it("does not render Proceed when neither credentials nor inputs are needed", () => {
render(<SetupRequirementsCard output={makeOutput()} />);
expect(screen.queryByText("Proceed")).toBeNull();
});
});

View File

@@ -0,0 +1,741 @@
import { describe, expect, it } from "vitest";
import {
coerceCredentialFields,
buildSiblingInputsFromCredentials,
coerceExpectedInputs,
buildExpectedInputsSchema,
extractInitialValues,
mergeInputValues,
checkAllCredentialsComplete,
getRequiredInputNames,
checkAllInputsComplete,
checkCanRun,
buildRunMessage,
} from "../helpers";
describe("coerceCredentialFields", () => {
it("returns empty results for null input", () => {
const result = coerceCredentialFields(null);
expect(result.credentialFields).toEqual([]);
expect(result.requiredCredentials.size).toBe(0);
});
it("returns empty results for non-object input", () => {
const result = coerceCredentialFields("not-an-object");
expect(result.credentialFields).toEqual([]);
});
it("parses valid credential with api_key type", () => {
const input = {
cred1: {
provider: "github",
types: ["api_key"],
},
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(1);
expect(result.credentialFields[0][0]).toBe("cred1");
expect(result.requiredCredentials.has("cred1")).toBe(true);
});
it("filters out invalid credential types", () => {
const input = {
cred1: {
provider: "github",
types: ["invalid_type"],
},
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(0);
});
it("handles non-string items in types array", () => {
const input = {
cred1: {
provider: "github",
types: [123, null, "api_key", undefined],
},
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(1);
const schema = result.credentialFields[0][1] as Record<string, unknown>;
expect(schema.credentials_types).toEqual(["api_key"]);
});
it("skips entries with empty types array", () => {
const input = {
cred1: {
provider: "github",
types: [],
},
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(0);
});
it("skips entries without provider", () => {
const input = {
cred1: {
provider: "",
types: ["api_key"],
},
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(0);
});
it("includes discriminator when present", () => {
const input = {
cred1: {
provider: "custom",
types: ["host_scoped"],
discriminator: "url",
discriminator_values: ["https://example.com"],
},
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(1);
const schema = result.credentialFields[0][1] as Record<string, unknown>;
expect(schema.discriminator).toBe("url");
expect(schema.discriminator_values).toEqual(["https://example.com"]);
});
it("includes scopes when present", () => {
const input = {
cred1: {
provider: "google",
types: ["oauth2"],
scopes: ["read", "write"],
},
};
const result = coerceCredentialFields(input);
const schema = result.credentialFields[0][1] as Record<string, unknown>;
expect(schema.credentials_scopes).toEqual(["read", "write"]);
});
it("handles multiple credentials", () => {
const input = {
cred1: { provider: "github", types: ["api_key"] },
cred2: { provider: "google", types: ["oauth2"] },
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(2);
expect(result.requiredCredentials.size).toBe(2);
});
it("skips non-object values", () => {
const input = {
cred1: "invalid",
cred2: null,
cred3: { provider: "github", types: ["api_key"] },
};
const result = coerceCredentialFields(input);
expect(result.credentialFields).toHaveLength(1);
});
});
describe("buildSiblingInputsFromCredentials", () => {
it("returns empty object for null input", () => {
expect(buildSiblingInputsFromCredentials(null)).toEqual({});
});
it("returns empty object for non-object input", () => {
expect(buildSiblingInputsFromCredentials("string")).toEqual({});
});
it("extracts discriminator values", () => {
const input = {
cred1: {
discriminator: "url",
discriminator_values: ["https://example.com"],
},
};
const result = buildSiblingInputsFromCredentials(input);
expect(result.url).toBe("https://example.com");
});
it("takes only the first discriminator value", () => {
const input = {
cred1: {
discriminator: "host",
discriminator_values: ["first.com", "second.com"],
},
};
const result = buildSiblingInputsFromCredentials(input);
expect(result.host).toBe("first.com");
});
it("skips entries without discriminator", () => {
const input = {
cred1: { provider: "github" },
};
const result = buildSiblingInputsFromCredentials(input);
expect(Object.keys(result)).toHaveLength(0);
});
it("skips entries with empty discriminator_values", () => {
const input = {
cred1: { discriminator: "url", discriminator_values: [] },
};
const result = buildSiblingInputsFromCredentials(input);
expect(Object.keys(result)).toHaveLength(0);
});
it("skips non-object values in the credentials map", () => {
const input = {
cred1: "string-value",
cred2: null,
cred3: 42,
cred4: {
discriminator: "url",
discriminator_values: ["https://ok.com"],
},
};
const result = buildSiblingInputsFromCredentials(input);
expect(result.url).toBe("https://ok.com");
expect(Object.keys(result)).toHaveLength(1);
});
it("filters non-string discriminator_values", () => {
const input = {
cred1: {
discriminator: "url",
discriminator_values: [42, "https://valid.com", null],
},
};
const result = buildSiblingInputsFromCredentials(input);
expect(result.url).toBe("https://valid.com");
});
});
describe("coerceExpectedInputs", () => {
it("returns empty array for non-array input", () => {
expect(coerceExpectedInputs(null)).toEqual([]);
expect(coerceExpectedInputs("string")).toEqual([]);
});
it("parses valid input objects", () => {
const result = coerceExpectedInputs([
{ name: "query", title: "Search Query", type: "string", required: true },
]);
expect(result).toHaveLength(1);
expect(result[0].name).toBe("query");
expect(result[0].title).toBe("Search Query");
expect(result[0].type).toBe("string");
expect(result[0].required).toBe(true);
expect(result[0].advanced).toBe(false);
});
it("generates fallback name from index", () => {
const result = coerceExpectedInputs([{ type: "string" }]);
expect(result[0].name).toBe("input-0");
expect(result[0].title).toBe("input-0");
});
it("uses name as fallback title", () => {
const result = coerceExpectedInputs([{ name: "query", type: "string" }]);
expect(result[0].title).toBe("query");
});
it("includes description when present", () => {
const result = coerceExpectedInputs([
{ name: "q", type: "string", description: "The search query" },
]);
expect(result[0].description).toBe("The search query");
});
it("excludes empty description", () => {
const result = coerceExpectedInputs([
{ name: "q", type: "string", description: " " },
]);
expect(result[0].description).toBeUndefined();
});
it("includes value when present and non-null", () => {
const result = coerceExpectedInputs([
{ name: "q", type: "string", value: "default" },
]);
expect(result[0].value).toBe("default");
});
it("skips non-object array elements", () => {
const result = coerceExpectedInputs([
null,
"string",
{ name: "valid", type: "string" },
]);
expect(result).toHaveLength(1);
expect(result[0].name).toBe("valid");
});
it("uses 'unknown' for non-string type field", () => {
const result = coerceExpectedInputs([{ name: "q", type: 42 }]);
expect(result[0].type).toBe("unknown");
});
it("skips null value", () => {
const result = coerceExpectedInputs([
{ name: "q", type: "string", value: null },
]);
expect(result[0].value).toBeUndefined();
});
it("omits non-string discriminator_values from scopes in coerceCredentialFields", () => {
const input = {
cred1: {
provider: "github",
types: ["api_key"],
scopes: ["read", 42, null, "write"],
},
};
const result = coerceCredentialFields(input);
const schema = result.credentialFields[0][1] as Record<string, unknown>;
expect(schema.credentials_scopes).toEqual(["read", "write"]);
});
});
describe("buildExpectedInputsSchema", () => {
const inputs = [
{
name: "query",
title: "Query",
type: "string",
required: true,
advanced: false,
},
{
name: "limit",
title: "Limit",
type: "int",
required: false,
advanced: true,
},
];
it("returns null for empty inputs", () => {
expect(buildExpectedInputsSchema([])).toBeNull();
});
it("excludes advanced fields by default", () => {
const schema = buildExpectedInputsSchema(inputs);
expect(schema).not.toBeNull();
expect(schema!.properties).toHaveProperty("query");
expect(schema!.properties).not.toHaveProperty("limit");
});
it("includes advanced fields when showAdvanced is true", () => {
const schema = buildExpectedInputsSchema(inputs, true);
expect(schema!.properties).toHaveProperty("query");
expect(schema!.properties).toHaveProperty("limit");
});
it("maps types correctly", () => {
const allTypes = [
{ name: "a", title: "A", type: "str", required: false, advanced: false },
{ name: "b", title: "B", type: "int", required: false, advanced: false },
{
name: "c",
title: "C",
type: "float",
required: false,
advanced: false,
},
{
name: "d",
title: "D",
type: "bool",
required: false,
advanced: false,
},
{
name: "e",
title: "E",
type: "unknown_type",
required: false,
advanced: false,
},
];
const schema = buildExpectedInputsSchema(allTypes);
const props = schema!.properties as Record<string, Record<string, unknown>>;
expect(props.a.type).toBe("string");
expect(props.b.type).toBe("integer");
expect(props.c.type).toBe("number");
expect(props.d.type).toBe("boolean");
expect(props.e.type).toBe("string");
});
it("includes required array only for required fields", () => {
const schema = buildExpectedInputsSchema(inputs);
expect(schema!.required).toEqual(["query"]);
});
it("omits required when no fields are required", () => {
const optional = [
{
name: "q",
title: "Q",
type: "string",
required: false,
advanced: false,
},
];
const schema = buildExpectedInputsSchema(optional);
expect(schema!.required).toBeUndefined();
});
it("includes default value from input.value", () => {
const withDefault = [
{
name: "q",
title: "Q",
type: "string",
required: false,
advanced: false,
value: "hello",
},
];
const schema = buildExpectedInputsSchema(withDefault);
const props = schema!.properties as Record<string, Record<string, unknown>>;
expect(props.q.default).toBe("hello");
});
it("includes description in schema when present", () => {
const withDesc = [
{
name: "q",
title: "Q",
type: "string",
required: false,
advanced: false,
description: "A search query",
},
];
const schema = buildExpectedInputsSchema(withDesc);
const props = schema!.properties as Record<string, Record<string, unknown>>;
expect(props.q.description).toBe("A search query");
});
it("returns null when all inputs are advanced and showAdvanced is false", () => {
const advancedOnly = [
{
name: "limit",
title: "Limit",
type: "int",
required: false,
advanced: true,
},
];
expect(buildExpectedInputsSchema(advancedOnly)).toBeNull();
expect(buildExpectedInputsSchema(advancedOnly, true)).not.toBeNull();
});
});
describe("extractInitialValues", () => {
it("returns empty object when no values are set", () => {
const inputs = [
{
name: "q",
title: "Q",
type: "string",
required: false,
advanced: false,
},
];
expect(extractInitialValues(inputs)).toEqual({});
});
it("extracts values that are present", () => {
const inputs = [
{
name: "q",
title: "Q",
type: "string",
required: false,
advanced: false,
value: "hello",
},
{
name: "n",
title: "N",
type: "number",
required: false,
advanced: false,
value: 42,
},
];
expect(extractInitialValues(inputs)).toEqual({ q: "hello", n: 42 });
});
it("skips null and undefined values", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: false,
advanced: false,
value: null,
},
{
name: "b",
title: "B",
type: "string",
required: false,
advanced: false,
},
];
expect(extractInitialValues(inputs)).toEqual({});
});
});
describe("mergeInputValues", () => {
it("returns initial values when prev is empty", () => {
expect(mergeInputValues({ a: "1" }, {})).toEqual({ a: "1" });
});
it("preserves non-empty prev values over initial", () => {
expect(mergeInputValues({ a: "1", b: "2" }, { a: "override" })).toEqual({
a: "override",
b: "2",
});
});
it("skips undefined, null, and empty string from prev", () => {
expect(
mergeInputValues(
{ a: "init-a", b: "init-b", c: "init-c" },
{ a: undefined, b: null, c: "" },
),
).toEqual({ a: "init-a", b: "init-b", c: "init-c" });
});
it("adds new keys from prev that are not in initial", () => {
expect(mergeInputValues({ a: "1" }, { b: "new" })).toEqual({
a: "1",
b: "new",
});
});
it("preserves zero and false as valid values from prev", () => {
expect(mergeInputValues({ a: "1" }, { a: 0, b: false })).toEqual({
a: 0,
b: false,
});
});
});
describe("checkAllCredentialsComplete", () => {
it("returns true when all required credentials are present", () => {
const required = new Set(["cred1", "cred2"]);
const input = { cred1: { id: "a" }, cred2: { id: "b" } };
expect(checkAllCredentialsComplete(required, input)).toBe(true);
});
it("returns false when a required credential is missing", () => {
const required = new Set(["cred1", "cred2"]);
const input = { cred1: { id: "a" } };
expect(checkAllCredentialsComplete(required, input)).toBe(false);
});
it("returns false when a required credential is falsy", () => {
const required = new Set(["cred1"]);
const input = { cred1: undefined };
expect(checkAllCredentialsComplete(required, input)).toBe(false);
});
it("returns true when no credentials are required", () => {
expect(checkAllCredentialsComplete(new Set(), {})).toBe(true);
});
});
describe("getRequiredInputNames", () => {
it("returns names of required non-advanced inputs", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: true,
advanced: false,
},
{
name: "b",
title: "B",
type: "string",
required: false,
advanced: false,
},
{ name: "c", title: "C", type: "string", required: true, advanced: true },
{
name: "d",
title: "D",
type: "string",
required: true,
advanced: false,
},
];
expect(getRequiredInputNames(inputs)).toEqual(["a", "d"]);
});
it("returns empty array when no inputs are required", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: false,
advanced: false,
},
];
expect(getRequiredInputNames(inputs)).toEqual([]);
});
});
describe("checkAllInputsComplete", () => {
it("returns true when there are no inputs", () => {
expect(checkAllInputsComplete([], {})).toBe(true);
});
it("returns true when all required inputs have values", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: true,
advanced: false,
},
{
name: "b",
title: "B",
type: "string",
required: false,
advanced: false,
},
];
expect(checkAllInputsComplete(inputs, { a: "value" })).toBe(true);
});
it("returns false when a required input is empty", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: true,
advanced: false,
},
];
expect(checkAllInputsComplete(inputs, { a: "" })).toBe(false);
});
it("returns false when a required input is null", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: true,
advanced: false,
},
];
expect(checkAllInputsComplete(inputs, { a: null })).toBe(false);
});
it("returns false when a required input is undefined", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: true,
advanced: false,
},
];
expect(checkAllInputsComplete(inputs, {})).toBe(false);
});
it("ignores advanced required inputs", () => {
const inputs = [
{ name: "a", title: "A", type: "string", required: true, advanced: true },
];
expect(checkAllInputsComplete(inputs, {})).toBe(true);
});
it("returns true with only optional inputs present", () => {
const inputs = [
{
name: "a",
title: "A",
type: "string",
required: false,
advanced: false,
},
];
expect(checkAllInputsComplete(inputs, {})).toBe(true);
});
});
describe("checkCanRun", () => {
it("returns true when no credentials needed and inputs complete", () => {
expect(checkCanRun(false, false, true)).toBe(true);
});
it("returns false when credentials needed but not complete", () => {
expect(checkCanRun(true, false, true)).toBe(false);
});
it("returns false when inputs not complete", () => {
expect(checkCanRun(false, false, false)).toBe(false);
});
it("returns true when credentials needed and complete, inputs complete", () => {
expect(checkCanRun(true, true, true)).toBe(true);
});
it("returns false when both credentials and inputs incomplete", () => {
expect(checkCanRun(true, false, false)).toBe(false);
});
});
describe("buildRunMessage", () => {
it("includes credentials message when needsCredentials is true", () => {
const msg = buildRunMessage(true, false, {});
expect(msg).toContain("I've configured the required credentials.");
});
it("includes inputs when needsInputs is true", () => {
const msg = buildRunMessage(false, true, { query: "test" });
expect(msg).toContain("Run with these inputs:");
expect(msg).toContain('"query": "test"');
});
it("filters out empty/null/undefined values from inputs", () => {
const msg = buildRunMessage(false, true, {
a: "keep",
b: "",
c: null,
d: undefined,
});
expect(msg).toContain('"a": "keep"');
expect(msg).not.toContain('"b"');
expect(msg).not.toContain('"c"');
expect(msg).not.toContain('"d"');
});
it("uses retryInstruction when provided and no inputs", () => {
const msg = buildRunMessage(false, false, {}, "Retry now please.");
expect(msg).toBe("Retry now please.");
});
it("uses default retry message when no retryInstruction", () => {
const msg = buildRunMessage(false, false, {});
expect(msg).toBe("Please re-run this step now.");
});
it("combines credentials and inputs messages", () => {
const msg = buildRunMessage(true, true, { key: "val" });
expect(msg).toContain("I've configured the required credentials.");
expect(msg).toContain("Run with these inputs:");
});
});

View File

@@ -71,21 +71,58 @@ export function coerceCredentialFields(rawMissingCredentials: unknown): {
return { credentialFields, requiredCredentials };
}
export function coerceExpectedInputs(rawInputs: unknown): Array<{
/**
* Build a sibling-inputs dict from the missing_credentials discriminator values.
*
* When the backend resolves credentials for host-scoped blocks (e.g.
* SendAuthenticatedWebRequestBlock), it adds the target URL to
* `discriminator_values`. The credential modal uses `siblingInputs`
* to extract the host and prefill the "Host Pattern" field.
*
* This function builds that mapping from the `discriminator` field name
* and the first `discriminator_values` entry for each credential.
*/
export function buildSiblingInputsFromCredentials(
rawMissingCredentials: unknown,
): Record<string, unknown> {
const result: Record<string, unknown> = {};
if (!rawMissingCredentials || typeof rawMissingCredentials !== "object")
return result;
const missing = rawMissingCredentials as Record<string, unknown>;
for (const value of Object.values(missing)) {
if (!value || typeof value !== "object") continue;
const cred = value as Record<string, unknown>;
const discriminator =
typeof cred.discriminator === "string" ? cred.discriminator : null;
const discriminatorValues = Array.isArray(cred.discriminator_values)
? cred.discriminator_values.filter(
(v): v is string => typeof v === "string",
)
: [];
if (discriminator && discriminatorValues.length > 0) {
result[discriminator] = discriminatorValues[0];
}
}
return result;
}
interface ExpectedInput {
name: string;
title: string;
type: string;
description?: string;
required: boolean;
}> {
advanced: boolean;
value?: unknown;
}
export function coerceExpectedInputs(rawInputs: unknown): ExpectedInput[] {
if (!Array.isArray(rawInputs)) return [];
const results: Array<{
name: string;
title: string;
type: string;
description?: string;
required: boolean;
}> = [];
const results: ExpectedInput[] = [];
rawInputs.forEach((value, index) => {
if (!value || typeof value !== "object") return;
@@ -105,15 +142,13 @@ export function coerceExpectedInputs(rawInputs: unknown): Array<{
? input.description.trim()
: undefined;
const required = Boolean(input.required);
const advanced = Boolean(input.advanced);
const item: {
name: string;
title: string;
type: string;
description?: string;
required: boolean;
} = { name, title, type, required };
const item: ExpectedInput = { name, title, type, required, advanced };
if (description) item.description = description;
if (input.value !== undefined && input.value !== null) {
item.value = input.value;
}
results.push(item);
});
@@ -123,17 +158,20 @@ export function coerceExpectedInputs(rawInputs: unknown): Array<{
/**
* Build an RJSF schema from expected inputs so they can be rendered
* as a dynamic form via FormRenderer.
*
* When ``showAdvanced`` is false (default), fields marked ``advanced``
* are excluded — matching the builder behaviour where advanced fields
* are hidden behind a toggle.
*/
export function buildExpectedInputsSchema(
expectedInputs: Array<{
name: string;
title: string;
type: string;
description?: string;
required: boolean;
}>,
expectedInputs: ExpectedInput[],
showAdvanced = false,
): RJSFSchema | null {
if (expectedInputs.length === 0) return null;
const visible = showAdvanced
? expectedInputs
: expectedInputs.filter((i) => !i.advanced);
if (visible.length === 0) return null;
const TYPE_MAP: Record<string, string> = {
string: "string",
@@ -150,12 +188,14 @@ export function buildExpectedInputsSchema(
const properties: Record<string, Record<string, unknown>> = {};
const required: string[] = [];
for (const input of expectedInputs) {
properties[input.name] = {
for (const input of visible) {
const prop: Record<string, unknown> = {
type: TYPE_MAP[input.type.toLowerCase()] ?? "string",
title: input.title,
...(input.description ? { description: input.description } : {}),
};
if (input.description) prop.description = input.description;
if (input.value !== undefined) prop.default = input.value;
properties[input.name] = prop;
if (input.required) required.push(input.name);
}
@@ -165,3 +205,92 @@ export function buildExpectedInputsSchema(
...(required.length > 0 ? { required } : {}),
};
}
/**
* Extract initial form values from expected inputs that have a
* prefilled ``value`` from the backend.
*/
export function extractInitialValues(
expectedInputs: ExpectedInput[],
): Record<string, unknown> {
const values: Record<string, unknown> = {};
for (const input of expectedInputs) {
if (input.value !== undefined && input.value !== null) {
values[input.name] = input.value;
}
}
return values;
}
export function mergeInputValues(
initialValues: Record<string, unknown>,
prev: Record<string, unknown>,
): Record<string, unknown> {
const merged = { ...initialValues };
for (const [key, value] of Object.entries(prev)) {
if (value !== undefined && value !== null && value !== "") {
merged[key] = value;
}
}
return merged;
}
export function checkAllCredentialsComplete(
requiredCredentials: Set<string>,
inputCredentials: Record<string, unknown>,
): boolean {
return [...requiredCredentials].every((key) => !!inputCredentials[key]);
}
export function getRequiredInputNames(
expectedInputs: ExpectedInput[],
): string[] {
return expectedInputs
.filter((i) => i.required && !i.advanced)
.map((i) => i.name);
}
export function checkAllInputsComplete(
expectedInputs: ExpectedInput[],
inputValues: Record<string, unknown>,
): boolean {
if (expectedInputs.length === 0) return true;
const requiredNames = getRequiredInputNames(expectedInputs);
return requiredNames.every((name) => {
const v = inputValues[name];
return v !== undefined && v !== null && v !== "";
});
}
export function checkCanRun(
needsCredentials: boolean,
isAllCredentialsComplete: boolean,
isAllInputsComplete: boolean,
): boolean {
return (!needsCredentials || isAllCredentialsComplete) && isAllInputsComplete;
}
export function buildRunMessage(
needsCredentials: boolean,
needsInputs: boolean,
inputValues: Record<string, unknown>,
retryInstruction?: string,
): string {
const parts: string[] = [];
if (needsCredentials) {
parts.push("I've configured the required credentials.");
}
if (needsInputs) {
const nonEmpty = Object.fromEntries(
Object.entries(inputValues).filter(
([, v]) => v !== undefined && v !== null && v !== "",
),
);
parts.push(`Run with these inputs: ${JSON.stringify(nonEmpty, null, 2)}`);
} else {
parts.push(retryInstruction ?? "Please re-run this step now.");
}
return parts.join(" ");
}

View File

@@ -10,6 +10,7 @@ import { toDisplayName } from "@/providers/agent-credentials/helper";
import { APIKeyCredentialsModal } from "./components/APIKeyCredentialsModal/APIKeyCredentialsModal";
import { CredentialsFlatView } from "./components/CredentialsFlatView/CredentialsFlatView";
import { CredentialTypeSelector } from "./components/CredentialTypeSelector/CredentialTypeSelector";
import { DeleteConfirmationModal } from "./components/DeleteConfirmationModal/DeleteConfirmationModal";
import { HostScopedCredentialsModal } from "./components/HotScopedCredentialsModal/HotScopedCredentialsModal";
import { OAuthFlowWaitingModal } from "./components/OAuthWaitingModal/OAuthWaitingModal";
import { PasswordCredentialsModal } from "./components/PasswordCredentialsModal/PasswordCredentialsModal";
@@ -90,6 +91,12 @@ export function CredentialsInput({
handleActionButtonClick,
handleCredentialSelect,
handleOAuthLogin,
handleDeleteCredential,
handleDeleteConfirm,
credentialToDelete,
deleteWarningMessage,
setCredentialToDelete,
isDeletingCredential,
} = hookData;
const displayName = toDisplayName(provider);
@@ -113,6 +120,7 @@ export function CredentialsInput({
onSelectCredential={handleCredentialSelect}
onClearCredential={() => onSelectCredential(undefined)}
onAddCredential={handleActionButtonClick}
onDeleteCredential={readOnly ? undefined : handleDeleteCredential}
actionButtonText={actionButtonText}
isOptional={isOptional}
showTitle={showTitle}
@@ -192,6 +200,15 @@ export function CredentialsInput({
Error: {oAuthError}
</Text>
)}
<DeleteConfirmationModal
credentialToDelete={credentialToDelete}
warningMessage={deleteWarningMessage}
isDeleting={isDeletingCredential}
onClose={() => setCredentialToDelete(null)}
onConfirm={() => handleDeleteConfirm(false)}
onForceConfirm={() => handleDeleteConfirm(true)}
/>
</>
)}
</div>

View File

@@ -0,0 +1,449 @@
import { describe, expect, it, vi } from "vitest";
import {
countSupportedTypes,
getSupportedTypes,
getCredentialTypeLabel,
getActionButtonText,
getCredentialDisplayName,
isSystemCredential,
filterSystemCredentials,
getSystemCredentials,
processCredentialDeletion,
findExistingHostCredentials,
hasExistingHostCredential,
resolveActionTarget,
headerPairsToRecord,
addHeaderPairToList,
removeHeaderPairFromList,
updateHeaderPairInList,
} from "../helpers";
describe("countSupportedTypes", () => {
it("returns 0 when nothing is supported", () => {
expect(countSupportedTypes(false, false, false, false)).toBe(0);
});
it("returns 1 for a single supported type", () => {
expect(countSupportedTypes(true, false, false, false)).toBe(1);
expect(countSupportedTypes(false, true, false, false)).toBe(1);
});
it("returns count of all true flags", () => {
expect(countSupportedTypes(true, true, true, true)).toBe(4);
expect(countSupportedTypes(true, false, true, false)).toBe(2);
});
});
describe("getSupportedTypes", () => {
it("returns empty array when nothing supported", () => {
expect(getSupportedTypes(false, false, false, false)).toEqual([]);
});
it("returns oauth2 when supportsOAuth2 is true", () => {
expect(getSupportedTypes(true, false, false, false)).toEqual(["oauth2"]);
});
it("returns all supported types in order", () => {
expect(getSupportedTypes(true, true, true, true)).toEqual([
"oauth2",
"api_key",
"user_password",
"host_scoped",
]);
});
it("returns only the enabled types", () => {
expect(getSupportedTypes(false, true, false, true)).toEqual([
"api_key",
"host_scoped",
]);
});
});
describe("getCredentialTypeLabel", () => {
it("returns 'OAuth' for oauth2", () => {
expect(getCredentialTypeLabel("oauth2")).toBe("OAuth");
});
it("returns 'API Key' for api_key", () => {
expect(getCredentialTypeLabel("api_key")).toBe("API Key");
});
it("returns 'Password' for user_password", () => {
expect(getCredentialTypeLabel("user_password")).toBe("Password");
});
it("returns 'Headers' for host_scoped", () => {
expect(getCredentialTypeLabel("host_scoped")).toBe("Headers");
});
});
describe("getActionButtonText", () => {
it("returns generic text for multiple types without existing", () => {
expect(getActionButtonText(true, true, false, false, false)).toBe(
"Add credential",
);
});
it("returns generic text for multiple types with existing", () => {
expect(getActionButtonText(true, true, false, false, true)).toBe(
"Add another credential",
);
});
it("returns specific text for single OAuth2 without existing", () => {
expect(getActionButtonText(true, false, false, false, false)).toBe(
"Add account",
);
});
it("returns specific text for single OAuth2 with existing", () => {
expect(getActionButtonText(true, false, false, false, true)).toBe(
"Connect another account",
);
});
it("returns API key text for single API key", () => {
expect(getActionButtonText(false, true, false, false, false)).toBe(
"Add API key",
);
expect(getActionButtonText(false, true, false, false, true)).toBe(
"Use a new API key",
);
});
it("returns password text for single user_password", () => {
expect(getActionButtonText(false, false, true, false, false)).toBe(
"Add username and password",
);
expect(getActionButtonText(false, false, true, false, true)).toBe(
"Add a new username and password",
);
});
it("returns headers text for single host_scoped", () => {
expect(getActionButtonText(false, false, false, true, false)).toBe(
"Add headers",
);
expect(getActionButtonText(false, false, false, true, true)).toBe(
"Update headers",
);
});
it("returns fallback text when no type is supported", () => {
expect(getActionButtonText(false, false, false, false, false)).toBe(
"Add credentials",
);
expect(getActionButtonText(false, false, false, false, true)).toBe(
"Add new credentials",
);
});
});
describe("getCredentialDisplayName", () => {
it("returns title when present", () => {
expect(getCredentialDisplayName({ title: "My API Key" }, "Google")).toBe(
"My API Key",
);
});
it("returns username when title is missing", () => {
expect(
getCredentialDisplayName({ username: "user@example.com" }, "Google"),
).toBe("user@example.com");
});
it("returns fallback when both are missing", () => {
expect(getCredentialDisplayName({}, "Google")).toBe("Your Google account");
});
});
describe("isSystemCredential", () => {
it("returns true when is_system is true", () => {
expect(isSystemCredential({ is_system: true })).toBe(true);
});
it("returns false when is_system is false and no title", () => {
expect(isSystemCredential({ is_system: false })).toBe(false);
});
it("returns true when title contains 'system'", () => {
expect(isSystemCredential({ title: "System Default" })).toBe(true);
});
it("returns true when title starts with 'use credits for'", () => {
expect(isSystemCredential({ title: "Use Credits for OpenAI" })).toBe(true);
});
it("returns true when title contains 'use credits'", () => {
expect(isSystemCredential({ title: "Please use credits" })).toBe(true);
});
it("returns false for regular credential", () => {
expect(isSystemCredential({ title: "My API Key" })).toBe(false);
});
it("returns false when title is null", () => {
expect(isSystemCredential({ title: null })).toBe(false);
});
});
describe("filterSystemCredentials", () => {
it("removes system credentials", () => {
const creds = [
{ title: "My Key", is_system: false },
{ title: "System Default", is_system: true },
{ title: "Other Key" },
];
expect(filterSystemCredentials(creds)).toEqual([
{ title: "My Key", is_system: false },
{ title: "Other Key" },
]);
});
it("returns empty array when all are system", () => {
expect(filterSystemCredentials([{ is_system: true }])).toEqual([]);
});
});
describe("getSystemCredentials", () => {
it("returns only system credentials", () => {
const creds = [
{ title: "My Key", is_system: false },
{ title: "System Default", is_system: true },
];
expect(getSystemCredentials(creds)).toEqual([
{ title: "System Default", is_system: true },
]);
});
});
describe("processCredentialDeletion", () => {
const cred = { id: "cred-1", title: "My Key" };
it("clears state on successful deletion", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: true });
const state = await processCredentialDeletion(
cred,
"other",
deleteFn,
false,
);
expect(state.credentialToDelete).toBeNull();
expect(state.shouldUnselectCurrent).toBe(false);
});
it("flags shouldUnselectCurrent when selected credential is deleted", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: true });
const state = await processCredentialDeletion(
cred,
"cred-1",
deleteFn,
false,
);
expect(state.shouldUnselectCurrent).toBe(true);
});
it("returns warning when confirmation needed", async () => {
const deleteFn = vi.fn().mockResolvedValue({
deleted: false,
need_confirmation: true,
message: "In use",
});
const state = await processCredentialDeletion(
cred,
undefined,
deleteFn,
false,
);
expect(state.warningMessage).toBe("In use");
expect(state.credentialToDelete).toBe(cred);
});
it("uses fallback warning when message is empty", async () => {
const deleteFn = vi.fn().mockResolvedValue({
deleted: false,
need_confirmation: true,
message: "",
});
const state = await processCredentialDeletion(
cred,
undefined,
deleteFn,
false,
);
expect(state.warningMessage).toBe(
"This credential is in use. Force delete?",
);
});
it("passes force=true to the delete function", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: true });
await processCredentialDeletion(cred, undefined, deleteFn, true);
expect(deleteFn).toHaveBeenCalledWith("cred-1", true);
});
});
describe("findExistingHostCredentials", () => {
const creds = [
{ id: "1", type: "host_scoped", host: "a.com" },
{ id: "2", type: "api_key" },
{ id: "3", type: "host_scoped", host: "b.com" },
];
it("returns matching host_scoped credentials", () => {
expect(findExistingHostCredentials(creds, "a.com")).toEqual([
{ id: "1", type: "host_scoped", host: "a.com" },
]);
});
it("returns empty when no match", () => {
expect(findExistingHostCredentials(creds, "c.com")).toEqual([]);
});
});
describe("hasExistingHostCredential", () => {
const creds = [{ type: "host_scoped", host: "x.com" }, { type: "api_key" }];
it("returns true for existing host", () => {
expect(hasExistingHostCredential(creds, "x.com")).toBe(true);
});
it("returns false for non-existing host", () => {
expect(hasExistingHostCredential(creds, "y.com")).toBe(false);
});
});
describe("resolveActionTarget", () => {
it("returns type_selector when hasMultipleCredentialTypes is true", () => {
expect(resolveActionTarget(true, true, true, false, false)).toBe(
"type_selector",
);
});
it("returns oauth when only OAuth2 is supported", () => {
expect(resolveActionTarget(false, true, false, false, false)).toBe("oauth");
});
it("returns api_key when only API key is supported", () => {
expect(resolveActionTarget(false, false, true, false, false)).toBe(
"api_key",
);
});
it("returns user_password when only user_password is supported", () => {
expect(resolveActionTarget(false, false, false, true, false)).toBe(
"user_password",
);
});
it("returns host_scoped when only host_scoped is supported", () => {
expect(resolveActionTarget(false, false, false, false, true)).toBe(
"host_scoped",
);
});
it("returns null when nothing is supported", () => {
expect(resolveActionTarget(false, false, false, false, false)).toBeNull();
});
it("prefers oauth over api_key when not multiple types", () => {
expect(resolveActionTarget(false, true, true, false, false)).toBe("oauth");
});
});
describe("headerPairsToRecord", () => {
it("converts pairs to record filtering empty entries", () => {
const pairs = [
{ key: "Authorization", value: "Bearer token" },
{ key: "", value: "ignored" },
{ key: "X-Key", value: "" },
{ key: " Accept ", value: " application/json " },
];
expect(headerPairsToRecord(pairs)).toEqual({
Authorization: "Bearer token",
Accept: "application/json",
});
});
it("returns empty object for empty pairs", () => {
expect(headerPairsToRecord([])).toEqual({});
});
it("returns empty object when all pairs are empty", () => {
expect(headerPairsToRecord([{ key: "", value: "" }])).toEqual({});
});
});
describe("addHeaderPairToList", () => {
it("adds a new empty pair to the list", () => {
const pairs = [{ key: "a", value: "b" }];
const result = addHeaderPairToList(pairs);
expect(result).toHaveLength(2);
expect(result[1]).toEqual({ key: "", value: "" });
});
it("does not mutate the original array", () => {
const pairs = [{ key: "a", value: "b" }];
const result = addHeaderPairToList(pairs);
expect(pairs).toHaveLength(1);
expect(result).not.toBe(pairs);
});
});
describe("removeHeaderPairFromList", () => {
it("removes the pair at the given index", () => {
const pairs = [
{ key: "a", value: "1" },
{ key: "b", value: "2" },
{ key: "c", value: "3" },
];
const result = removeHeaderPairFromList(pairs, 1);
expect(result).toEqual([
{ key: "a", value: "1" },
{ key: "c", value: "3" },
]);
});
it("does not remove when only one pair remains", () => {
const pairs = [{ key: "a", value: "1" }];
const result = removeHeaderPairFromList(pairs, 0);
expect(result).toHaveLength(1);
expect(result).toBe(pairs);
});
it("does not mutate the original array", () => {
const pairs = [
{ key: "a", value: "1" },
{ key: "b", value: "2" },
];
removeHeaderPairFromList(pairs, 0);
expect(pairs).toHaveLength(2);
});
});
describe("updateHeaderPairInList", () => {
it("updates the key of a pair at the given index", () => {
const pairs = [
{ key: "a", value: "1" },
{ key: "b", value: "2" },
];
const result = updateHeaderPairInList(pairs, 0, "key", "updated");
expect(result[0]).toEqual({ key: "updated", value: "1" });
expect(result[1]).toEqual({ key: "b", value: "2" });
});
it("updates the value of a pair at the given index", () => {
const pairs = [{ key: "a", value: "1" }];
const result = updateHeaderPairInList(pairs, 0, "value", "new-val");
expect(result[0]).toEqual({ key: "a", value: "new-val" });
});
it("does not mutate the original array or pair objects", () => {
const pairs = [{ key: "a", value: "1" }];
const result = updateHeaderPairInList(pairs, 0, "key", "b");
expect(pairs[0].key).toBe("a");
expect(result).not.toBe(pairs);
expect(result[0]).not.toBe(pairs[0]);
});
});

View File

@@ -31,6 +31,7 @@ type Props = {
onSelectCredential: (credentialId: string) => void;
onClearCredential: () => void;
onAddCredential: () => void;
onDeleteCredential?: (credential: { id: string; title: string }) => void;
};
export function CredentialsFlatView({
@@ -47,6 +48,7 @@ export function CredentialsFlatView({
onSelectCredential,
onClearCredential,
onAddCredential,
onDeleteCredential,
}: Props) {
const hasCredentials = credentials.length > 0;
@@ -99,6 +101,15 @@ export function CredentialsFlatView({
provider={provider}
displayName={displayName}
onSelect={() => onSelectCredential(credential.id)}
onDelete={
onDeleteCredential
? () =>
onDeleteCredential({
id: credential.id,
title: credential.title || credential.id,
})
: undefined
}
readOnly={readOnly}
/>
))}

View File

@@ -4,16 +4,20 @@ import { Dialog } from "@/components/molecules/Dialog/Dialog";
interface Props {
credentialToDelete: { id: string; title: string } | null;
warningMessage?: string | null;
isDeleting: boolean;
onClose: () => void;
onConfirm: () => void;
onForceConfirm: () => void;
}
export function DeleteConfirmationModal({
credentialToDelete,
warningMessage,
isDeleting,
onClose,
onConfirm,
onForceConfirm,
}: Props) {
return (
<Dialog
@@ -27,21 +31,35 @@ export function DeleteConfirmationModal({
styling={{ maxWidth: "32rem" }}
>
<Dialog.Content>
<Text variant="large">
Are you sure you want to delete &quot;{credentialToDelete?.title}
&quot;? This action cannot be undone.
</Text>
{warningMessage ? (
<Text variant="large">{warningMessage}</Text>
) : (
<Text variant="large">
Are you sure you want to delete &quot;{credentialToDelete?.title}
&quot;? This action cannot be undone.
</Text>
)}
<Dialog.Footer>
<Button variant="secondary" onClick={onClose} disabled={isDeleting}>
Cancel
</Button>
<Button
variant="destructive"
onClick={onConfirm}
loading={isDeleting}
>
Delete
</Button>
{warningMessage ? (
<Button
variant="destructive"
onClick={onForceConfirm}
loading={isDeleting}
>
Force Delete
</Button>
) : (
<Button
variant="destructive"
onClick={onConfirm}
loading={isDeleting}
>
Delete
</Button>
)}
</Dialog.Footer>
</Dialog.Content>
</Dialog>

View File

@@ -0,0 +1,76 @@
import { render, screen, fireEvent, cleanup } from "@testing-library/react";
import { afterEach, describe, expect, it, vi } from "vitest";
import { DeleteConfirmationModal } from "../DeleteConfirmationModal";
afterEach(() => {
cleanup();
});
const credential = { id: "cred-1", title: "My API Key" };
function renderModal(
overrides: Partial<Parameters<typeof DeleteConfirmationModal>[0]> = {},
) {
const defaultProps = {
credentialToDelete: credential,
isDeleting: false,
onClose: vi.fn(),
onConfirm: vi.fn(),
onForceConfirm: vi.fn(),
...overrides,
};
return {
...render(<DeleteConfirmationModal {...defaultProps} />),
props: defaultProps,
};
}
describe("DeleteConfirmationModal", () => {
it("shows confirmation text with credential title when no warning", () => {
renderModal();
expect(screen.getByText(/Are you sure you want to delete/)).toBeDefined();
expect(screen.getByText(/My API Key/)).toBeDefined();
});
it("shows Delete button when no warning message", () => {
renderModal();
expect(screen.getByText("Delete")).toBeDefined();
expect(screen.queryByText("Force Delete")).toBeNull();
});
it("shows warning message when provided", () => {
renderModal({ warningMessage: "Used by 3 agents" });
expect(screen.getByText("Used by 3 agents")).toBeDefined();
expect(screen.queryByText(/Are you sure/)).toBeNull();
});
it("shows Force Delete button when warning message is present", () => {
renderModal({ warningMessage: "Credential is in use" });
expect(screen.getByText("Force Delete")).toBeDefined();
expect(screen.queryByText("Delete")).toBeNull();
});
it("calls onConfirm when Delete button is clicked", () => {
const { props } = renderModal();
fireEvent.click(screen.getByText("Delete"));
expect(props.onConfirm).toHaveBeenCalledOnce();
});
it("calls onForceConfirm when Force Delete button is clicked", () => {
const { props } = renderModal({ warningMessage: "In use" });
fireEvent.click(screen.getByText("Force Delete"));
expect(props.onForceConfirm).toHaveBeenCalledOnce();
});
it("calls onClose when Cancel button is clicked", () => {
const { props } = renderModal();
fireEvent.click(screen.getByText("Cancel"));
expect(props.onClose).toHaveBeenCalledOnce();
});
it("disables Cancel button when isDeleting is true", () => {
renderModal({ isDeleting: true });
const cancelButton = screen.getByText("Cancel");
expect(cancelButton.closest("button")?.disabled).toBe(true);
});
});

View File

@@ -1,4 +1,4 @@
import { useEffect, useState } from "react";
import { useContext, useEffect, useState } from "react";
import { z } from "zod";
import { useForm } from "react-hook-form";
import { zodResolver } from "@hookform/resolvers/zod";
@@ -16,8 +16,19 @@ import {
BlockIOCredentialsSubSchema,
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
import { getHostFromUrl } from "@/lib/utils/url";
import { PlusIcon, TrashIcon } from "@phosphor-icons/react";
import { toast } from "@/components/molecules/Toast/use-toast";
import {
addHeaderPairToList,
findExistingHostCredentials,
hasExistingHostCredential,
headerPairsToRecord,
removeHeaderPairFromList,
updateHeaderPairInList,
type HeaderPair,
} from "../../helpers";
type Props = {
schema: BlockIOCredentialsSubSchema;
@@ -35,6 +46,7 @@ export function HostScopedCredentialsModal({
siblingInputs,
}: Props) {
const credentials = useCredentials(schema, siblingInputs);
const allProviders = useContext(CredentialsProvidersContext);
// Get current host from siblingInputs or discriminator_values
const currentUrl = credentials?.discriminatorValue;
@@ -65,9 +77,9 @@ export function HostScopedCredentialsModal({
},
});
const [headerPairs, setHeaderPairs] = useState<
Array<{ key: string; value: string }>
>([{ key: "", value: "" }]);
const [headerPairs, setHeaderPairs] = useState<HeaderPair[]>([
{ key: "", value: "" },
]);
// Update form values when siblingInputs change
useEffect(() => {
@@ -89,16 +101,30 @@ export function HostScopedCredentialsModal({
return null;
}
const { provider, providerName, createHostScopedCredentials } = credentials;
const {
provider,
providerName,
createHostScopedCredentials,
deleteCredentials,
} = credentials;
// Use the unfiltered credential list from the provider context for deduplication.
// The hook's savedCredentials is pre-filtered by discriminatorValue, which may be
// empty when no URL is entered yet — causing deduplication to miss existing creds.
const allProviderCredentials =
allProviders?.[provider]?.savedCredentials ?? [];
const hasExistingForHost = hasExistingHostCredential(
allProviderCredentials,
currentHost || form.getValues("host"),
);
const addHeaderPair = () => {
setHeaderPairs([...headerPairs, { key: "", value: "" }]);
setHeaderPairs((prev) => addHeaderPairToList(prev));
};
const removeHeaderPair = (index: number) => {
if (headerPairs.length > 1) {
setHeaderPairs(headerPairs.filter((_, i) => i !== index));
}
setHeaderPairs((prev) => removeHeaderPairFromList(prev, index));
};
const updateHeaderPair = (
@@ -106,40 +132,55 @@ export function HostScopedCredentialsModal({
field: "key" | "value",
value: string,
) => {
const newPairs = [...headerPairs];
newPairs[index][field] = value;
setHeaderPairs(newPairs);
setHeaderPairs((prev) => updateHeaderPairInList(prev, index, field, value));
};
async function onSubmit(values: z.infer<typeof formSchema>) {
// Convert header pairs to object, filtering out empty pairs
const headers = headerPairs.reduce(
(acc, pair) => {
if (pair.key.trim() && pair.value.trim()) {
acc[pair.key.trim()] = pair.value.trim();
}
return acc;
},
{} as Record<string, string>,
const headers = headerPairsToRecord(headerPairs);
// Delete existing host-scoped credentials for the same host to avoid duplicates.
// Uses unfiltered provider credentials (not the hook's pre-filtered list).
const host = values.host;
const existingForHost = findExistingHostCredentials(
allProviderCredentials,
host,
);
const newCredentials = await createHostScopedCredentials({
host: values.host,
title: currentHost || values.host,
headers,
});
try {
for (const existing of existingForHost) {
await deleteCredentials(existing.id, true);
}
onCredentialsCreate({
provider,
id: newCredentials.id,
type: "host_scoped",
title: newCredentials.title,
});
const newCredentials = await createHostScopedCredentials({
host,
title: currentHost || host,
headers,
});
onCredentialsCreate({
provider,
id: newCredentials.id,
type: "host_scoped",
title: newCredentials.title,
});
} catch (error) {
const message =
error instanceof Error ? error.message : "Something went wrong";
toast({
title: "Failed to save credentials",
description: message,
variant: "destructive",
});
}
}
return (
<Dialog
title={`Add sensitive headers for ${providerName}`}
title={
hasExistingForHost
? `Update sensitive headers for ${providerName}`
: `Add sensitive headers for ${providerName}`
}
controlled={{
isOpen: open,
set: (isOpen) => {
@@ -241,7 +282,9 @@ export function HostScopedCredentialsModal({
<div className="pt-8">
<Button type="submit" className="w-full" size="small">
Save & use these credentials
{hasExistingForHost
? "Update & use these credentials"
: "Save & use these credentials"}
</Button>
</div>
</form>

View File

@@ -0,0 +1,554 @@
import { describe, expect, it, vi } from "vitest";
import {
countSupportedTypes,
getSupportedTypes,
getCredentialTypeLabel,
getActionButtonText,
getCredentialDisplayName,
isSystemCredential,
filterSystemCredentials,
getSystemCredentials,
processCredentialDeletion,
findExistingHostCredentials,
hasExistingHostCredential,
OAUTH_TIMEOUT_MS,
MASKED_KEY_LENGTH,
resolveActionTarget,
headerPairsToRecord,
addHeaderPairToList,
removeHeaderPairFromList,
updateHeaderPairInList,
} from "./helpers";
describe("countSupportedTypes", () => {
it("returns 0 when no types are supported", () => {
expect(countSupportedTypes(false, false, false, false)).toBe(0);
});
it("returns 1 when only one type is supported", () => {
expect(countSupportedTypes(true, false, false, false)).toBe(1);
expect(countSupportedTypes(false, true, false, false)).toBe(1);
expect(countSupportedTypes(false, false, true, false)).toBe(1);
expect(countSupportedTypes(false, false, false, true)).toBe(1);
});
it("returns correct count for multiple types", () => {
expect(countSupportedTypes(true, true, false, false)).toBe(2);
expect(countSupportedTypes(true, true, true, false)).toBe(3);
expect(countSupportedTypes(true, true, true, true)).toBe(4);
});
});
describe("getSupportedTypes", () => {
it("returns empty array when no types are supported", () => {
expect(getSupportedTypes(false, false, false, false)).toEqual([]);
});
it("returns oauth2 when supportsOAuth2 is true", () => {
expect(getSupportedTypes(true, false, false, false)).toEqual(["oauth2"]);
});
it("returns api_key when supportsApiKey is true", () => {
expect(getSupportedTypes(false, true, false, false)).toEqual(["api_key"]);
});
it("returns user_password when supportsUserPassword is true", () => {
expect(getSupportedTypes(false, false, true, false)).toEqual([
"user_password",
]);
});
it("returns host_scoped when supportsHostScoped is true", () => {
expect(getSupportedTypes(false, false, false, true)).toEqual([
"host_scoped",
]);
});
it("returns all types in order when all are supported", () => {
expect(getSupportedTypes(true, true, true, true)).toEqual([
"oauth2",
"api_key",
"user_password",
"host_scoped",
]);
});
});
describe("getCredentialTypeLabel", () => {
it("returns OAuth for oauth2", () => {
expect(getCredentialTypeLabel("oauth2")).toBe("OAuth");
});
it("returns API Key for api_key", () => {
expect(getCredentialTypeLabel("api_key")).toBe("API Key");
});
it("returns Password for user_password", () => {
expect(getCredentialTypeLabel("user_password")).toBe("Password");
});
it("returns Headers for host_scoped", () => {
expect(getCredentialTypeLabel("host_scoped")).toBe("Headers");
});
});
describe("getActionButtonText", () => {
describe("when multiple types are supported", () => {
it("returns generic text without existing credentials", () => {
expect(getActionButtonText(true, true, false, false, false)).toBe(
"Add credential",
);
});
it("returns generic text with existing credentials", () => {
expect(getActionButtonText(true, true, false, false, true)).toBe(
"Add another credential",
);
});
});
describe("when only OAuth2 is supported", () => {
it("returns 'Add account' without existing credentials", () => {
expect(getActionButtonText(true, false, false, false, false)).toBe(
"Add account",
);
});
it("returns 'Connect another account' with existing credentials", () => {
expect(getActionButtonText(true, false, false, false, true)).toBe(
"Connect another account",
);
});
});
describe("when only API key is supported", () => {
it("returns 'Add API key' without existing credentials", () => {
expect(getActionButtonText(false, true, false, false, false)).toBe(
"Add API key",
);
});
it("returns 'Use a new API key' with existing credentials", () => {
expect(getActionButtonText(false, true, false, false, true)).toBe(
"Use a new API key",
);
});
});
describe("when only user_password is supported", () => {
it("returns 'Add username and password' without existing credentials", () => {
expect(getActionButtonText(false, false, true, false, false)).toBe(
"Add username and password",
);
});
it("returns 'Add a new username and password' with existing credentials", () => {
expect(getActionButtonText(false, false, true, false, true)).toBe(
"Add a new username and password",
);
});
});
describe("when only host_scoped is supported", () => {
it("returns 'Add headers' without existing credentials", () => {
expect(getActionButtonText(false, false, false, true, false)).toBe(
"Add headers",
);
});
it("returns 'Update headers' with existing credentials", () => {
expect(getActionButtonText(false, false, false, true, true)).toBe(
"Update headers",
);
});
});
describe("when no types are supported", () => {
it("returns 'Add credentials' without existing credentials", () => {
expect(getActionButtonText(false, false, false, false, false)).toBe(
"Add credentials",
);
});
it("returns 'Add new credentials' with existing credentials", () => {
expect(getActionButtonText(false, false, false, false, true)).toBe(
"Add new credentials",
);
});
});
});
describe("getCredentialDisplayName", () => {
it("returns title when present", () => {
expect(
getCredentialDisplayName({ title: "My Key", username: "user" }, "GitHub"),
).toBe("My Key");
});
it("falls back to username when title is missing", () => {
expect(getCredentialDisplayName({ username: "jdoe" }, "GitHub")).toBe(
"jdoe",
);
});
it("falls back to display name when both title and username are missing", () => {
expect(getCredentialDisplayName({}, "GitHub")).toBe("Your GitHub account");
});
it("falls back when title is empty string", () => {
expect(getCredentialDisplayName({ title: "" }, "GitHub")).toBe(
"Your GitHub account",
);
});
});
describe("isSystemCredential", () => {
it("returns true when is_system is true", () => {
expect(isSystemCredential({ is_system: true })).toBe(true);
});
it("returns false when is_system is false and no title", () => {
expect(isSystemCredential({ is_system: false })).toBe(false);
});
it("returns false when title is null", () => {
expect(isSystemCredential({ title: null })).toBe(false);
});
it("returns false when title is absent", () => {
expect(isSystemCredential({})).toBe(false);
});
it("returns true when title contains 'system'", () => {
expect(isSystemCredential({ title: "System API Key" })).toBe(true);
});
it("returns true when title contains 'system' case-insensitively", () => {
expect(isSystemCredential({ title: "SYSTEM key" })).toBe(true);
});
it("returns true when title starts with 'Use credits for'", () => {
expect(isSystemCredential({ title: "Use credits for OpenAI" })).toBe(true);
});
it("returns true when title starts with 'use credits for' case-insensitively", () => {
expect(isSystemCredential({ title: "use credits for Anthropic" })).toBe(
true,
);
});
it("returns true when title contains 'use credits'", () => {
expect(isSystemCredential({ title: "Please use credits here" })).toBe(true);
});
it("returns false for a normal credential title", () => {
expect(isSystemCredential({ title: "My Personal Key" })).toBe(false);
});
});
describe("filterSystemCredentials", () => {
it("returns empty array for empty input", () => {
expect(filterSystemCredentials([])).toEqual([]);
});
it("filters out system credentials", () => {
const credentials = [
{ title: "My Key" },
{ title: "System Key" },
{ title: "Use credits for OpenAI" },
{ title: "Personal Token" },
];
const result = filterSystemCredentials(credentials);
expect(result).toEqual([{ title: "My Key" }, { title: "Personal Token" }]);
});
it("filters out credentials with is_system flag", () => {
const credentials = [
{ title: "Normal", is_system: false },
{ title: "Hidden", is_system: true },
];
const result = filterSystemCredentials(credentials);
expect(result).toEqual([{ title: "Normal", is_system: false }]);
});
});
describe("getSystemCredentials", () => {
it("returns empty array for empty input", () => {
expect(getSystemCredentials([])).toEqual([]);
});
it("returns only system credentials", () => {
const credentials = [
{ title: "My Key" },
{ title: "System Key" },
{ title: "Use credits for OpenAI" },
{ title: "Personal Token" },
];
const result = getSystemCredentials(credentials);
expect(result).toEqual([
{ title: "System Key" },
{ title: "Use credits for OpenAI" },
]);
});
it("returns credentials with is_system flag", () => {
const credentials = [
{ title: "Normal", is_system: false },
{ title: "Hidden", is_system: true },
];
const result = getSystemCredentials(credentials);
expect(result).toEqual([{ title: "Hidden", is_system: true }]);
});
});
describe("constants", () => {
it("OAUTH_TIMEOUT_MS is 5 minutes", () => {
expect(OAUTH_TIMEOUT_MS).toBe(300000);
});
it("MASKED_KEY_LENGTH is 15", () => {
expect(MASKED_KEY_LENGTH).toBe(15);
});
});
describe("processCredentialDeletion", () => {
const cred = { id: "cred-1", title: "My Key" };
it("returns cleared state on successful deletion", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: true });
const state = await processCredentialDeletion(
cred,
"other-id",
deleteFn,
false,
);
expect(deleteFn).toHaveBeenCalledWith("cred-1", false);
expect(state.credentialToDelete).toBeNull();
expect(state.warningMessage).toBeNull();
expect(state.shouldUnselectCurrent).toBe(false);
});
it("sets shouldUnselectCurrent when deleting the selected credential", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: true });
const state = await processCredentialDeletion(
cred,
"cred-1",
deleteFn,
false,
);
expect(state.shouldUnselectCurrent).toBe(true);
expect(state.credentialToDelete).toBeNull();
});
it("returns warning state when confirmation is needed", async () => {
const deleteFn = vi.fn().mockResolvedValue({
deleted: false,
need_confirmation: true,
message: "Used by 3 agents",
});
const state = await processCredentialDeletion(
cred,
undefined,
deleteFn,
false,
);
expect(state.warningMessage).toBe("Used by 3 agents");
expect(state.credentialToDelete).toBe(cred);
expect(state.shouldUnselectCurrent).toBe(false);
});
it("uses default warning message when none provided", async () => {
const deleteFn = vi.fn().mockResolvedValue({
deleted: false,
need_confirmation: true,
message: "",
});
const state = await processCredentialDeletion(
cred,
undefined,
deleteFn,
false,
);
expect(state.warningMessage).toBe(
"This credential is in use. Force delete?",
);
});
it("passes force flag to delete function", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: true });
await processCredentialDeletion(cred, undefined, deleteFn, true);
expect(deleteFn).toHaveBeenCalledWith("cred-1", true);
});
it("returns unchanged state for unknown result shape", async () => {
const deleteFn = vi.fn().mockResolvedValue({ deleted: false });
const state = await processCredentialDeletion(
cred,
undefined,
deleteFn,
false,
);
expect(state.warningMessage).toBeNull();
expect(state.credentialToDelete).toBe(cred);
expect(state.shouldUnselectCurrent).toBe(false);
});
});
describe("findExistingHostCredentials", () => {
const credentials = [
{ id: "1", type: "host_scoped", host: "api.example.com" },
{ id: "2", type: "host_scoped", host: "api.other.com" },
{ id: "3", type: "api_key" },
{ id: "4", type: "host_scoped", host: "api.example.com" },
];
it("finds credentials matching the given host", () => {
const result = findExistingHostCredentials(credentials, "api.example.com");
expect(result).toHaveLength(2);
expect(result[0].id).toBe("1");
expect(result[1].id).toBe("4");
});
it("returns empty array when no match", () => {
expect(findExistingHostCredentials(credentials, "unknown.com")).toEqual([]);
});
it("ignores non-host_scoped credentials", () => {
const result = findExistingHostCredentials(credentials, "api.other.com");
expect(result).toHaveLength(1);
expect(result[0].id).toBe("2");
});
it("returns empty array for empty credentials list", () => {
expect(findExistingHostCredentials([], "any.com")).toEqual([]);
});
});
describe("hasExistingHostCredential", () => {
const credentials = [
{ type: "host_scoped", host: "api.example.com" },
{ type: "api_key" },
];
it("returns true when a host_scoped credential exists for the host", () => {
expect(hasExistingHostCredential(credentials, "api.example.com")).toBe(
true,
);
});
it("returns false when no matching host_scoped credential exists", () => {
expect(hasExistingHostCredential(credentials, "other.com")).toBe(false);
});
it("returns false for empty credentials list", () => {
expect(hasExistingHostCredential([], "any.com")).toBe(false);
});
});
describe("resolveActionTarget", () => {
it("returns type_selector when hasMultipleCredentialTypes is true", () => {
expect(resolveActionTarget(true, true, true, false, false)).toBe(
"type_selector",
);
});
it("returns oauth when only OAuth2 is supported", () => {
expect(resolveActionTarget(false, true, false, false, false)).toBe("oauth");
});
it("returns api_key when only API key is supported", () => {
expect(resolveActionTarget(false, false, true, false, false)).toBe(
"api_key",
);
});
it("returns user_password when only user_password is supported", () => {
expect(resolveActionTarget(false, false, false, true, false)).toBe(
"user_password",
);
});
it("returns host_scoped when only host_scoped is supported", () => {
expect(resolveActionTarget(false, false, false, false, true)).toBe(
"host_scoped",
);
});
it("returns null when nothing is supported", () => {
expect(resolveActionTarget(false, false, false, false, false)).toBeNull();
});
});
describe("headerPairsToRecord", () => {
it("converts non-empty pairs to record", () => {
const pairs = [
{ key: "Authorization", value: "Bearer token" },
{ key: "", value: "ignored" },
{ key: "X-Key", value: "" },
];
expect(headerPairsToRecord(pairs)).toEqual({
Authorization: "Bearer token",
});
});
it("trims keys and values", () => {
expect(
headerPairsToRecord([{ key: " Accept ", value: " text/html " }]),
).toEqual({ Accept: "text/html" });
});
it("returns empty object for empty pairs", () => {
expect(headerPairsToRecord([])).toEqual({});
});
});
describe("addHeaderPairToList", () => {
it("appends an empty pair", () => {
const result = addHeaderPairToList([{ key: "a", value: "b" }]);
expect(result).toHaveLength(2);
expect(result[1]).toEqual({ key: "", value: "" });
});
});
describe("removeHeaderPairFromList", () => {
it("removes the pair at index", () => {
const pairs = [
{ key: "a", value: "1" },
{ key: "b", value: "2" },
];
expect(removeHeaderPairFromList(pairs, 0)).toEqual([
{ key: "b", value: "2" },
]);
});
it("does not remove the last pair", () => {
const pairs = [{ key: "a", value: "1" }];
expect(removeHeaderPairFromList(pairs, 0)).toBe(pairs);
});
});
describe("updateHeaderPairInList", () => {
it("updates key at the given index", () => {
const pairs = [{ key: "a", value: "1" }];
const result = updateHeaderPairInList(pairs, 0, "key", "b");
expect(result[0]).toEqual({ key: "b", value: "1" });
});
it("updates value at the given index", () => {
const pairs = [{ key: "a", value: "1" }];
const result = updateHeaderPairInList(pairs, 0, "value", "2");
expect(result[0]).toEqual({ key: "a", value: "2" });
});
it("does not mutate originals", () => {
const pairs = [{ key: "a", value: "1" }];
updateHeaderPairInList(pairs, 0, "key", "b");
expect(pairs[0].key).toBe("a");
});
});

View File

@@ -149,7 +149,7 @@ export function getActionButtonText(
if (supportsOAuth2) return "Connect another account";
if (supportsApiKey) return "Use a new API key";
if (supportsUserPassword) return "Add a new username and password";
if (supportsHostScoped) return "Add new headers";
if (supportsHostScoped) return "Update headers";
return "Add new credentials";
} else {
if (supportsOAuth2) return "Add account";
@@ -197,3 +197,123 @@ export function getSystemCredentials<
>(credentials: T[]): T[] {
return credentials.filter((cred) => isSystemCredential(cred));
}
export type DeleteResult =
| { deleted: true }
| { deleted: false; need_confirmation: true; message: string };
export type DeleteState = {
warningMessage: string | null;
credentialToDelete: { id: string; title: string } | null;
shouldUnselectCurrent: boolean;
};
export async function processCredentialDeletion(
credentialToDelete: { id: string; title: string },
selectedCredentialId: string | undefined,
deleteCredentials: (id: string, force: boolean) => Promise<DeleteResult>,
force: boolean,
): Promise<DeleteState> {
const result = await deleteCredentials(credentialToDelete.id, force);
if (result.deleted) {
return {
warningMessage: null,
credentialToDelete: null,
shouldUnselectCurrent: selectedCredentialId === credentialToDelete.id,
};
}
if ("need_confirmation" in result && result.need_confirmation) {
return {
warningMessage:
result.message || "This credential is in use. Force delete?",
credentialToDelete,
shouldUnselectCurrent: false,
};
}
return {
warningMessage: null,
credentialToDelete,
shouldUnselectCurrent: false,
};
}
export function findExistingHostCredentials<
T extends { type: string; id: string; host?: string },
>(credentials: T[], host: string): T[] {
return credentials.filter(
(c) => c.type === "host_scoped" && "host" in c && c.host === host,
);
}
export function hasExistingHostCredential<
T extends { type: string; host?: string },
>(credentials: T[], host: string): boolean {
return credentials.some(
(c) => c.type === "host_scoped" && "host" in c && c.host === host,
);
}
export type ActionTarget =
| "type_selector"
| "oauth"
| "api_key"
| "user_password"
| "host_scoped"
| null;
export function resolveActionTarget(
hasMultipleCredentialTypes: boolean,
supportsOAuth2: boolean,
supportsApiKey: boolean,
supportsUserPassword: boolean,
supportsHostScoped: boolean,
): ActionTarget {
if (hasMultipleCredentialTypes) return "type_selector";
if (supportsOAuth2) return "oauth";
if (supportsApiKey) return "api_key";
if (supportsUserPassword) return "user_password";
if (supportsHostScoped) return "host_scoped";
return null;
}
export type HeaderPair = { key: string; value: string };
export function headerPairsToRecord(
pairs: HeaderPair[],
): Record<string, string> {
return pairs.reduce(
(acc, pair) => {
if (pair.key.trim() && pair.value.trim()) {
acc[pair.key.trim()] = pair.value.trim();
}
return acc;
},
{} as Record<string, string>,
);
}
export function addHeaderPairToList(pairs: HeaderPair[]): HeaderPair[] {
return [...pairs, { key: "", value: "" }];
}
export function removeHeaderPairFromList(
pairs: HeaderPair[],
index: number,
): HeaderPair[] {
if (pairs.length <= 1) return pairs;
return pairs.filter((_, i) => i !== index);
}
export function updateHeaderPairInList(
pairs: HeaderPair[],
index: number,
field: "key" | "value",
value: string,
): HeaderPair[] {
const newPairs = [...pairs];
newPairs[index] = { ...newPairs[index], [field]: value };
return newPairs;
}

View File

@@ -1,10 +1,10 @@
import { useDeleteV1DeleteCredentials } from "@/app/api/__generated__/endpoints/integrations/integrations";
import useCredentials from "@/hooks/useCredentials";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import {
BlockIOCredentialsSubSchema,
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
import { toast } from "@/components/molecules/Toast/use-toast";
import { postV2InitiateOauthLoginForAnMcpServer } from "@/app/api/__generated__/endpoints/mcp/mcp";
import {
OAUTH_ERROR_FLOW_CANCELED,
@@ -12,7 +12,6 @@ import {
OAUTH_ERROR_WINDOW_CLOSED,
openOAuthPopup,
} from "@/lib/oauth-popup";
import { useQueryClient } from "@tanstack/react-query";
import { useEffect, useRef, useState } from "react";
import {
countSupportedTypes,
@@ -20,6 +19,8 @@ import {
getActionButtonText,
getSupportedTypes,
getSystemCredentials,
processCredentialDeletion,
resolveActionTarget,
} from "./helpers";
export type CredentialsInputState = ReturnType<typeof useCredentialsInput>;
@@ -59,12 +60,15 @@ export function useCredentialsInput({
id: string;
title: string;
} | null>(null);
const [deleteWarningMessage, setDeleteWarningMessage] = useState<
string | null
>(null);
const api = useBackendAPI();
const queryClient = useQueryClient();
const credentials = useCredentials(schema, siblingInputs);
const hasAttemptedAutoSelect = useRef(false);
const oauthAbortRef = useRef<((reason?: string) => void) | null>(null);
const [isDeletingCredential, setIsDeletingCredential] = useState(false);
// Clean up on unmount
useEffect(() => {
@@ -73,23 +77,6 @@ export function useCredentialsInput({
};
}, []);
const deleteCredentialsMutation = useDeleteV1DeleteCredentials({
mutation: {
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: ["/api/integrations/credentials"],
});
queryClient.invalidateQueries({
queryKey: [`/api/integrations/${credentials?.provider}/credentials`],
});
setCredentialToDelete(null);
if (selectedCredential?.id === credentialToDelete?.id) {
onSelectCredential(undefined);
}
},
},
});
useEffect(() => {
if (onLoaded) {
onLoaded(Boolean(credentials && credentials.isLoading === false));
@@ -282,19 +269,29 @@ export function useCredentialsInput({
);
function handleActionButtonClick() {
if (hasMultipleCredentialTypes) {
setCredentialTypeSelectorOpen(true);
return;
}
if (supportsOAuth2) {
handleOAuthLogin();
} else if (supportsApiKey) {
setAPICredentialsModalOpen(true);
} else if (supportsUserPassword) {
setUserPasswordCredentialsModalOpen(true);
} else if (supportsHostScoped) {
setHostScopedCredentialsModalOpen(true);
const target = resolveActionTarget(
hasMultipleCredentialTypes,
supportsOAuth2,
supportsApiKey,
supportsUserPassword,
supportsHostScoped,
);
switch (target) {
case "type_selector":
setCredentialTypeSelectorOpen(true);
break;
case "oauth":
handleOAuthLogin();
break;
case "api_key":
setAPICredentialsModalOpen(true);
break;
case "user_password":
setUserPasswordCredentialsModalOpen(true);
break;
case "host_scoped":
setHostScopedCredentialsModalOpen(true);
break;
}
}
@@ -315,15 +312,42 @@ export function useCredentialsInput({
}
function handleDeleteCredential(credential: { id: string; title: string }) {
setDeleteWarningMessage(null);
setCredentialToDelete(credential);
}
function handleDeleteConfirm() {
if (credentialToDelete && credentials) {
deleteCredentialsMutation.mutate({
provider: credentials.provider,
credId: credentialToDelete.id,
async function handleDeleteConfirm(force: boolean = false) {
if (
!credentialToDelete ||
!credentials ||
!("deleteCredentials" in credentials)
)
return;
setIsDeletingCredential(true);
try {
const state = await processCredentialDeletion(
credentialToDelete,
selectedCredential?.id,
credentials.deleteCredentials,
force,
);
if (state.shouldUnselectCurrent) {
onSelectCredential(undefined);
}
setDeleteWarningMessage(state.warningMessage);
setCredentialToDelete(state.credentialToDelete);
} catch (error) {
const message =
error instanceof Error ? error.message : "Something went wrong";
toast({
title: "Failed to delete credential",
description: message,
variant: "destructive",
});
} finally {
setIsDeletingCredential(false);
}
}
@@ -350,7 +374,8 @@ export function useCredentialsInput({
isOAuth2FlowInProgress,
cancelOAuthFlow,
credentialToDelete,
deleteCredentialsMutation,
deleteWarningMessage,
isDeletingCredential,
actionButtonText: getActionButtonText(
supportsOAuth2,
supportsApiKey,

View File

@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
| condition | A plaintext English description of the condition to evaluate | str | Yes |
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
### Outputs
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|-------|-------------|------|----------|
| prompt | The prompt to send to the language model. | str | No |
| messages | List of messages in the conversation. | List[Any] | Yes |
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
| ollama_host | Ollama host for local models | str | No |
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|-------|-------------|------|----------|
| focus | The focus of the list to generate. | str | No |
| source_data | The data to generate the list from. | str | No |
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| max_retries | Maximum number of retries for generating a valid list. | int | No |
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
| prompt | The prompt to send to the language model. | str | Yes |
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| text | The text to summarize. | str | Yes |
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| focus | The topic to focus on in the summary | str | No |
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
@@ -721,7 +721,7 @@ _Add technical explanation here._
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| prompt | The prompt to send to the language model. | str | Yes |
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "z-ai/glm-4-32b" \| "z-ai/glm-4.5" \| "z-ai/glm-4.5-air" \| "z-ai/glm-4.5-air:free" \| "z-ai/glm-4.5v" \| "z-ai/glm-4.6" \| "z-ai/glm-4.6v" \| "z-ai/glm-4.7" \| "z-ai/glm-4.7-flash" \| "z-ai/glm-5" \| "z-ai/glm-5-turbo" \| "z-ai/glm-5v-turbo" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |