mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-17 10:12:02 -05:00
Compare commits
3 Commits
dev
...
chore/remo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
682f8b5752 | ||
|
|
4b3611ca43 | ||
|
|
cd6271b787 |
@@ -104,8 +104,8 @@ def _get_linear_config() -> tuple[LinearClient, str, str]:
|
|||||||
Raises RuntimeError if any required setting is missing.
|
Raises RuntimeError if any required setting is missing.
|
||||||
"""
|
"""
|
||||||
secrets = _get_settings().secrets
|
secrets = _get_settings().secrets
|
||||||
if not secrets.copilot_linear_api_key:
|
if not secrets.linear_api_key:
|
||||||
raise RuntimeError("COPILOT_LINEAR_API_KEY is not configured")
|
raise RuntimeError("LINEAR_API_KEY is not configured")
|
||||||
if not secrets.linear_feature_request_project_id:
|
if not secrets.linear_feature_request_project_id:
|
||||||
raise RuntimeError("LINEAR_FEATURE_REQUEST_PROJECT_ID is not configured")
|
raise RuntimeError("LINEAR_FEATURE_REQUEST_PROJECT_ID is not configured")
|
||||||
if not secrets.linear_feature_request_team_id:
|
if not secrets.linear_feature_request_team_id:
|
||||||
@@ -114,7 +114,7 @@ def _get_linear_config() -> tuple[LinearClient, str, str]:
|
|||||||
credentials = APIKeyCredentials(
|
credentials = APIKeyCredentials(
|
||||||
id="system-linear",
|
id="system-linear",
|
||||||
provider="linear",
|
provider="linear",
|
||||||
api_key=SecretStr(secrets.copilot_linear_api_key),
|
api_key=SecretStr(secrets.linear_api_key),
|
||||||
title="System Linear API Key",
|
title="System Linear API Key",
|
||||||
)
|
)
|
||||||
client = LinearClient(credentials=credentials)
|
client = LinearClient(credentials=credentials)
|
||||||
|
|||||||
@@ -106,8 +106,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
|||||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||||
GPT4O_MINI = "gpt-4o-mini"
|
GPT4O_MINI = "gpt-4o-mini"
|
||||||
GPT4O = "gpt-4o"
|
GPT4O = "gpt-4o"
|
||||||
GPT4_TURBO = "gpt-4-turbo"
|
|
||||||
GPT3_5_TURBO = "gpt-3.5-turbo"
|
|
||||||
# Anthropic models
|
# Anthropic models
|
||||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||||
@@ -255,12 +253,6 @@ MODEL_METADATA = {
|
|||||||
LlmModel.GPT4O: ModelMetadata(
|
LlmModel.GPT4O: ModelMetadata(
|
||||||
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
||||||
), # gpt-4o-2024-08-06
|
), # gpt-4o-2024-08-06
|
||||||
LlmModel.GPT4_TURBO: ModelMetadata(
|
|
||||||
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
|
|
||||||
), # gpt-4-turbo-2024-04-09
|
|
||||||
LlmModel.GPT3_5_TURBO: ModelMetadata(
|
|
||||||
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
|
|
||||||
), # gpt-3.5-turbo-0125
|
|
||||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||||
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||||
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
||||||
|
|||||||
@@ -75,8 +75,6 @@ MODEL_COST: dict[LlmModel, int] = {
|
|||||||
LlmModel.GPT41_MINI: 1,
|
LlmModel.GPT41_MINI: 1,
|
||||||
LlmModel.GPT4O_MINI: 1,
|
LlmModel.GPT4O_MINI: 1,
|
||||||
LlmModel.GPT4O: 3,
|
LlmModel.GPT4O: 3,
|
||||||
LlmModel.GPT4_TURBO: 10,
|
|
||||||
LlmModel.GPT3_5_TURBO: 1,
|
|
||||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||||
LlmModel.CLAUDE_4_OPUS: 21,
|
LlmModel.CLAUDE_4_OPUS: 21,
|
||||||
LlmModel.CLAUDE_4_SONNET: 5,
|
LlmModel.CLAUDE_4_SONNET: 5,
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
|||||||
node_exec_id="test_node_exec",
|
node_exec_id="test_node_exec",
|
||||||
block_id=AITextGeneratorBlock().id,
|
block_id=AITextGeneratorBlock().id,
|
||||||
inputs={
|
inputs={
|
||||||
"model": "gpt-4-turbo",
|
"model": "gpt-4o",
|
||||||
"credentials": {
|
"credentials": {
|
||||||
"id": openai_credentials.id,
|
"id": openai_credentials.id,
|
||||||
"provider": openai_credentials.provider,
|
"provider": openai_credentials.provider,
|
||||||
@@ -100,7 +100,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
|||||||
graph_exec_id="test_graph_exec",
|
graph_exec_id="test_graph_exec",
|
||||||
node_exec_id="test_node_exec",
|
node_exec_id="test_node_exec",
|
||||||
block_id=AITextGeneratorBlock().id,
|
block_id=AITextGeneratorBlock().id,
|
||||||
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
|
inputs={"model": "gpt-4o", "api_key": "owned_api_key"},
|
||||||
execution_context=ExecutionContext(user_timezone="UTC"),
|
execution_context=ExecutionContext(user_timezone="UTC"),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -662,7 +662,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
|||||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||||
|
|
||||||
copilot_linear_api_key: str = Field(
|
linear_api_key: str = Field(
|
||||||
default="", description="Linear API key for system-level operations"
|
default="", description="Linear API key for system-level operations"
|
||||||
)
|
)
|
||||||
linear_feature_request_project_id: str = Field(
|
linear_feature_request_project_id: str = Field(
|
||||||
|
|||||||
@@ -0,0 +1,42 @@
|
|||||||
|
-- Migrate deprecated OpenAI GPT-4-turbo and GPT-3.5-turbo models
|
||||||
|
-- This updates all AgentNode blocks that use deprecated models
|
||||||
|
-- OpenAI is retiring these models:
|
||||||
|
-- - gpt-4-turbo: March 26, 2026 -> migrate to gpt-4o
|
||||||
|
-- - gpt-3.5-turbo: September 28, 2026 -> migrate to gpt-4o-mini
|
||||||
|
|
||||||
|
-- Update gpt-4-turbo to gpt-4o (staying in same capability tier)
|
||||||
|
UPDATE "AgentNode"
|
||||||
|
SET "constantInput" = JSONB_SET(
|
||||||
|
"constantInput"::jsonb,
|
||||||
|
'{model}',
|
||||||
|
'"gpt-4o"'::jsonb
|
||||||
|
)
|
||||||
|
WHERE "constantInput"::jsonb->>'model' = 'gpt-4-turbo';
|
||||||
|
|
||||||
|
-- Update gpt-3.5-turbo to gpt-4o-mini (appropriate replacement for lightweight model)
|
||||||
|
UPDATE "AgentNode"
|
||||||
|
SET "constantInput" = JSONB_SET(
|
||||||
|
"constantInput"::jsonb,
|
||||||
|
'{model}',
|
||||||
|
'"gpt-4o-mini"'::jsonb
|
||||||
|
)
|
||||||
|
WHERE "constantInput"::jsonb->>'model' = 'gpt-3.5-turbo';
|
||||||
|
|
||||||
|
-- Update AgentPreset input overrides (stored in AgentNodeExecutionInputOutput)
|
||||||
|
UPDATE "AgentNodeExecutionInputOutput"
|
||||||
|
SET "data" = JSONB_SET(
|
||||||
|
"data"::jsonb,
|
||||||
|
'{model}',
|
||||||
|
'"gpt-4o"'::jsonb
|
||||||
|
)
|
||||||
|
WHERE "agentPresetId" IS NOT NULL
|
||||||
|
AND "data"::jsonb->>'model' = 'gpt-4-turbo';
|
||||||
|
|
||||||
|
UPDATE "AgentNodeExecutionInputOutput"
|
||||||
|
SET "data" = JSONB_SET(
|
||||||
|
"data"::jsonb,
|
||||||
|
'{model}',
|
||||||
|
'"gpt-4o-mini"'::jsonb
|
||||||
|
)
|
||||||
|
WHERE "agentPresetId" IS NOT NULL
|
||||||
|
AND "data"::jsonb->>'model' = 'gpt-3.5-turbo';
|
||||||
@@ -62,6 +62,7 @@
|
|||||||
"@rjsf/validator-ajv8": "6.1.2",
|
"@rjsf/validator-ajv8": "6.1.2",
|
||||||
"@sentry/nextjs": "10.27.0",
|
"@sentry/nextjs": "10.27.0",
|
||||||
"@streamdown/cjk": "1.0.1",
|
"@streamdown/cjk": "1.0.1",
|
||||||
|
"@streamdown/code": "1.0.1",
|
||||||
"@streamdown/math": "1.0.1",
|
"@streamdown/math": "1.0.1",
|
||||||
"@streamdown/mermaid": "1.0.1",
|
"@streamdown/mermaid": "1.0.1",
|
||||||
"@supabase/ssr": "0.7.0",
|
"@supabase/ssr": "0.7.0",
|
||||||
@@ -115,7 +116,6 @@
|
|||||||
"remark-gfm": "4.0.1",
|
"remark-gfm": "4.0.1",
|
||||||
"remark-math": "6.0.0",
|
"remark-math": "6.0.0",
|
||||||
"shepherd.js": "14.5.1",
|
"shepherd.js": "14.5.1",
|
||||||
"shiki": "^3.21.0",
|
|
||||||
"sonner": "2.0.7",
|
"sonner": "2.0.7",
|
||||||
"streamdown": "2.1.0",
|
"streamdown": "2.1.0",
|
||||||
"tailwind-merge": "2.6.0",
|
"tailwind-merge": "2.6.0",
|
||||||
|
|||||||
16
autogpt_platform/frontend/pnpm-lock.yaml
generated
16
autogpt_platform/frontend/pnpm-lock.yaml
generated
@@ -108,6 +108,9 @@ importers:
|
|||||||
'@streamdown/cjk':
|
'@streamdown/cjk':
|
||||||
specifier: 1.0.1
|
specifier: 1.0.1
|
||||||
version: 1.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(react@18.3.1)(unified@11.0.5)
|
version: 1.0.1(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(react@18.3.1)(unified@11.0.5)
|
||||||
|
'@streamdown/code':
|
||||||
|
specifier: 1.0.1
|
||||||
|
version: 1.0.1(react@18.3.1)
|
||||||
'@streamdown/math':
|
'@streamdown/math':
|
||||||
specifier: 1.0.1
|
specifier: 1.0.1
|
||||||
version: 1.0.1(react@18.3.1)
|
version: 1.0.1(react@18.3.1)
|
||||||
@@ -267,9 +270,6 @@ importers:
|
|||||||
shepherd.js:
|
shepherd.js:
|
||||||
specifier: 14.5.1
|
specifier: 14.5.1
|
||||||
version: 14.5.1
|
version: 14.5.1
|
||||||
shiki:
|
|
||||||
specifier: ^3.21.0
|
|
||||||
version: 3.21.0
|
|
||||||
sonner:
|
sonner:
|
||||||
specifier: 2.0.7
|
specifier: 2.0.7
|
||||||
version: 2.0.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
version: 2.0.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||||
@@ -3307,6 +3307,11 @@ packages:
|
|||||||
peerDependencies:
|
peerDependencies:
|
||||||
react: ^18.0.0 || ^19.0.0
|
react: ^18.0.0 || ^19.0.0
|
||||||
|
|
||||||
|
'@streamdown/code@1.0.1':
|
||||||
|
resolution: {integrity: sha512-U9LITfQ28tZYAoY922jdtw1ryg4kgRBdURopqK9hph7G2fBUwPeHthjH7SvaV0fvFv7EqjqCzARJuWUljLe9Ag==}
|
||||||
|
peerDependencies:
|
||||||
|
react: ^18.0.0 || ^19.0.0
|
||||||
|
|
||||||
'@streamdown/math@1.0.1':
|
'@streamdown/math@1.0.1':
|
||||||
resolution: {integrity: sha512-R9WdHbpERiRU7WeO7oT1aIbnLJ/jraDr89F7X9x2OM//Y8G8UMATRnLD/RUwg4VLr8Nu7QSIJ0Pa8lXd2meM4Q==}
|
resolution: {integrity: sha512-R9WdHbpERiRU7WeO7oT1aIbnLJ/jraDr89F7X9x2OM//Y8G8UMATRnLD/RUwg4VLr8Nu7QSIJ0Pa8lXd2meM4Q==}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
@@ -11902,6 +11907,11 @@ snapshots:
|
|||||||
- micromark-util-types
|
- micromark-util-types
|
||||||
- unified
|
- unified
|
||||||
|
|
||||||
|
'@streamdown/code@1.0.1(react@18.3.1)':
|
||||||
|
dependencies:
|
||||||
|
react: 18.3.1
|
||||||
|
shiki: 3.21.0
|
||||||
|
|
||||||
'@streamdown/math@1.0.1(react@18.3.1)':
|
'@streamdown/math@1.0.1(react@18.3.1)':
|
||||||
dependencies:
|
dependencies:
|
||||||
katex: 0.16.28
|
katex: 0.16.28
|
||||||
|
|||||||
@@ -1,16 +1,10 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import {
|
|
||||||
DropdownMenu,
|
|
||||||
DropdownMenuContent,
|
|
||||||
DropdownMenuItem,
|
|
||||||
DropdownMenuTrigger,
|
|
||||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
|
||||||
import { SidebarProvider } from "@/components/ui/sidebar";
|
import { SidebarProvider } from "@/components/ui/sidebar";
|
||||||
import { DotsThree } from "@phosphor-icons/react";
|
// TODO: Replace with modern Dialog component when available
|
||||||
|
import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
|
||||||
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
||||||
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
||||||
import { DeleteChatDialog } from "./components/DeleteChatDialog/DeleteChatDialog";
|
|
||||||
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
||||||
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
||||||
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
|
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
|
||||||
@@ -62,7 +56,19 @@ export function CopilotPage() {
|
|||||||
>
|
>
|
||||||
{!isMobile && <ChatSidebar />}
|
{!isMobile && <ChatSidebar />}
|
||||||
<div className="relative flex h-full w-full flex-col overflow-hidden bg-[#f8f8f9] px-0">
|
<div className="relative flex h-full w-full flex-col overflow-hidden bg-[#f8f8f9] px-0">
|
||||||
{isMobile && <MobileHeader onOpenDrawer={handleOpenDrawer} />}
|
{isMobile && (
|
||||||
|
<MobileHeader
|
||||||
|
onOpenDrawer={handleOpenDrawer}
|
||||||
|
showDelete={!!sessionId}
|
||||||
|
isDeleting={isDeleting}
|
||||||
|
onDelete={() => {
|
||||||
|
const session = sessions.find((s) => s.id === sessionId);
|
||||||
|
if (session) {
|
||||||
|
handleDeleteClick(session.id, session.title);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
<div className="flex-1 overflow-hidden">
|
<div className="flex-1 overflow-hidden">
|
||||||
<ChatContainer
|
<ChatContainer
|
||||||
messages={messages}
|
messages={messages}
|
||||||
@@ -74,38 +80,6 @@ export function CopilotPage() {
|
|||||||
onCreateSession={createSession}
|
onCreateSession={createSession}
|
||||||
onSend={onSend}
|
onSend={onSend}
|
||||||
onStop={stop}
|
onStop={stop}
|
||||||
headerSlot={
|
|
||||||
isMobile && sessionId ? (
|
|
||||||
<div className="flex justify-end">
|
|
||||||
<DropdownMenu>
|
|
||||||
<DropdownMenuTrigger asChild>
|
|
||||||
<button
|
|
||||||
className="rounded p-1.5 hover:bg-neutral-100"
|
|
||||||
aria-label="More actions"
|
|
||||||
>
|
|
||||||
<DotsThree className="h-5 w-5 text-neutral-600" />
|
|
||||||
</button>
|
|
||||||
</DropdownMenuTrigger>
|
|
||||||
<DropdownMenuContent align="end">
|
|
||||||
<DropdownMenuItem
|
|
||||||
onClick={() => {
|
|
||||||
const session = sessions.find(
|
|
||||||
(s) => s.id === sessionId,
|
|
||||||
);
|
|
||||||
if (session) {
|
|
||||||
handleDeleteClick(session.id, session.title);
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
disabled={isDeleting}
|
|
||||||
className="text-red-600 focus:bg-red-50 focus:text-red-600"
|
|
||||||
>
|
|
||||||
Delete chat
|
|
||||||
</DropdownMenuItem>
|
|
||||||
</DropdownMenuContent>
|
|
||||||
</DropdownMenu>
|
|
||||||
</div>
|
|
||||||
) : undefined
|
|
||||||
}
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -123,11 +97,12 @@ export function CopilotPage() {
|
|||||||
)}
|
)}
|
||||||
{/* Delete confirmation dialog - rendered at top level for proper z-index on mobile */}
|
{/* Delete confirmation dialog - rendered at top level for proper z-index on mobile */}
|
||||||
{isMobile && (
|
{isMobile && (
|
||||||
<DeleteChatDialog
|
<DeleteConfirmDialog
|
||||||
session={sessionToDelete}
|
entityType="chat"
|
||||||
isDeleting={isDeleting}
|
entityName={sessionToDelete?.title || "Untitled chat"}
|
||||||
onConfirm={handleConfirmDelete}
|
open={!!sessionToDelete}
|
||||||
onCancel={handleCancelDelete}
|
onOpenChange={(open) => !open && handleCancelDelete()}
|
||||||
|
onDoDelete={handleConfirmDelete}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
</SidebarProvider>
|
</SidebarProvider>
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
import { ChatInput } from "@/app/(platform)/copilot/components/ChatInput/ChatInput";
|
import { ChatInput } from "@/app/(platform)/copilot/components/ChatInput/ChatInput";
|
||||||
import { UIDataTypes, UIMessage, UITools } from "ai";
|
import { UIDataTypes, UIMessage, UITools } from "ai";
|
||||||
import { LayoutGroup, motion } from "framer-motion";
|
import { LayoutGroup, motion } from "framer-motion";
|
||||||
import { ReactNode } from "react";
|
|
||||||
import { ChatMessagesContainer } from "../ChatMessagesContainer/ChatMessagesContainer";
|
import { ChatMessagesContainer } from "../ChatMessagesContainer/ChatMessagesContainer";
|
||||||
import { CopilotChatActionsProvider } from "../CopilotChatActionsProvider/CopilotChatActionsProvider";
|
import { CopilotChatActionsProvider } from "../CopilotChatActionsProvider/CopilotChatActionsProvider";
|
||||||
import { EmptySession } from "../EmptySession/EmptySession";
|
import { EmptySession } from "../EmptySession/EmptySession";
|
||||||
@@ -17,7 +16,6 @@ export interface ChatContainerProps {
|
|||||||
onCreateSession: () => void | Promise<string>;
|
onCreateSession: () => void | Promise<string>;
|
||||||
onSend: (message: string) => void | Promise<void>;
|
onSend: (message: string) => void | Promise<void>;
|
||||||
onStop: () => void;
|
onStop: () => void;
|
||||||
headerSlot?: ReactNode;
|
|
||||||
}
|
}
|
||||||
export const ChatContainer = ({
|
export const ChatContainer = ({
|
||||||
messages,
|
messages,
|
||||||
@@ -29,7 +27,6 @@ export const ChatContainer = ({
|
|||||||
onCreateSession,
|
onCreateSession,
|
||||||
onSend,
|
onSend,
|
||||||
onStop,
|
onStop,
|
||||||
headerSlot,
|
|
||||||
}: ChatContainerProps) => {
|
}: ChatContainerProps) => {
|
||||||
const inputLayoutId = "copilot-2-chat-input";
|
const inputLayoutId = "copilot-2-chat-input";
|
||||||
|
|
||||||
@@ -44,7 +41,6 @@ export const ChatContainer = ({
|
|||||||
status={status}
|
status={status}
|
||||||
error={error}
|
error={error}
|
||||||
isLoading={isLoadingSession}
|
isLoading={isLoadingSession}
|
||||||
headerSlot={headerSlot}
|
|
||||||
/>
|
/>
|
||||||
<motion.div
|
<motion.div
|
||||||
initial={{ opacity: 0 }}
|
initial={{ opacity: 0 }}
|
||||||
|
|||||||
@@ -118,7 +118,6 @@ interface ChatMessagesContainerProps {
|
|||||||
status: string;
|
status: string;
|
||||||
error: Error | undefined;
|
error: Error | undefined;
|
||||||
isLoading: boolean;
|
isLoading: boolean;
|
||||||
headerSlot?: React.ReactNode;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export const ChatMessagesContainer = ({
|
export const ChatMessagesContainer = ({
|
||||||
@@ -126,7 +125,6 @@ export const ChatMessagesContainer = ({
|
|||||||
status,
|
status,
|
||||||
error,
|
error,
|
||||||
isLoading,
|
isLoading,
|
||||||
headerSlot,
|
|
||||||
}: ChatMessagesContainerProps) => {
|
}: ChatMessagesContainerProps) => {
|
||||||
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
|
||||||
const lastToastTimeRef = useRef(0);
|
const lastToastTimeRef = useRef(0);
|
||||||
@@ -167,7 +165,6 @@ export const ChatMessagesContainer = ({
|
|||||||
return (
|
return (
|
||||||
<Conversation className="min-h-0 flex-1">
|
<Conversation className="min-h-0 flex-1">
|
||||||
<ConversationContent className="flex flex-1 flex-col gap-6 px-3 py-6">
|
<ConversationContent className="flex flex-1 flex-col gap-6 px-3 py-6">
|
||||||
{headerSlot}
|
|
||||||
{isLoading && messages.length === 0 && (
|
{isLoading && messages.length === 0 && (
|
||||||
<div className="flex min-h-full flex-1 items-center justify-center">
|
<div className="flex min-h-full flex-1 items-center justify-center">
|
||||||
<LoadingSpinner className="text-neutral-600" />
|
<LoadingSpinner className="text-neutral-600" />
|
||||||
|
|||||||
@@ -7,13 +7,9 @@ import {
|
|||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
import {
|
|
||||||
DropdownMenu,
|
|
||||||
DropdownMenuContent,
|
|
||||||
DropdownMenuItem,
|
|
||||||
DropdownMenuTrigger,
|
|
||||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
|
||||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||||
|
// TODO: Replace with modern Dialog component when available
|
||||||
|
import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
|
||||||
import {
|
import {
|
||||||
Sidebar,
|
Sidebar,
|
||||||
SidebarContent,
|
SidebarContent,
|
||||||
@@ -23,12 +19,11 @@ import {
|
|||||||
useSidebar,
|
useSidebar,
|
||||||
} from "@/components/ui/sidebar";
|
} from "@/components/ui/sidebar";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { DotsThree, PlusCircleIcon, PlusIcon } from "@phosphor-icons/react";
|
import { PlusCircleIcon, PlusIcon, TrashIcon } from "@phosphor-icons/react";
|
||||||
import { useQueryClient } from "@tanstack/react-query";
|
import { useQueryClient } from "@tanstack/react-query";
|
||||||
import { motion } from "framer-motion";
|
import { motion } from "framer-motion";
|
||||||
import { parseAsString, useQueryState } from "nuqs";
|
|
||||||
import { useState } from "react";
|
import { useState } from "react";
|
||||||
import { DeleteChatDialog } from "../DeleteChatDialog/DeleteChatDialog";
|
import { parseAsString, useQueryState } from "nuqs";
|
||||||
|
|
||||||
export function ChatSidebar() {
|
export function ChatSidebar() {
|
||||||
const { state } = useSidebar();
|
const { state } = useSidebar();
|
||||||
@@ -97,12 +92,6 @@ export function ChatSidebar() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function handleCancelDelete() {
|
|
||||||
if (!isDeleting) {
|
|
||||||
setSessionToDelete(null);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function formatDate(dateString: string) {
|
function formatDate(dateString: string) {
|
||||||
const date = new Date(dateString);
|
const date = new Date(dateString);
|
||||||
const now = new Date();
|
const now = new Date();
|
||||||
@@ -231,28 +220,16 @@ export function ChatSidebar() {
|
|||||||
</Text>
|
</Text>
|
||||||
</div>
|
</div>
|
||||||
</button>
|
</button>
|
||||||
<DropdownMenu>
|
<button
|
||||||
<DropdownMenuTrigger asChild>
|
onClick={(e) =>
|
||||||
<button
|
handleDeleteClick(e, session.id, session.title)
|
||||||
onClick={(e) => e.stopPropagation()}
|
}
|
||||||
className="absolute right-2 top-1/2 -translate-y-1/2 rounded-full p-1.5 text-zinc-600 transition-all hover:bg-neutral-100"
|
disabled={isDeleting}
|
||||||
aria-label="More actions"
|
className="absolute right-2 top-1/2 -translate-y-1/2 rounded p-1.5 text-zinc-400 opacity-0 transition-all group-hover:opacity-100 hover:bg-red-100 hover:text-red-600 focus-visible:opacity-100 disabled:cursor-not-allowed disabled:opacity-50"
|
||||||
>
|
aria-label="Delete chat"
|
||||||
<DotsThree className="h-4 w-4" />
|
>
|
||||||
</button>
|
<TrashIcon className="h-4 w-4" />
|
||||||
</DropdownMenuTrigger>
|
</button>
|
||||||
<DropdownMenuContent align="end">
|
|
||||||
<DropdownMenuItem
|
|
||||||
onClick={(e) =>
|
|
||||||
handleDeleteClick(e, session.id, session.title)
|
|
||||||
}
|
|
||||||
disabled={isDeleting}
|
|
||||||
className="text-red-600 focus:bg-red-50 focus:text-red-600"
|
|
||||||
>
|
|
||||||
Delete chat
|
|
||||||
</DropdownMenuItem>
|
|
||||||
</DropdownMenuContent>
|
|
||||||
</DropdownMenu>
|
|
||||||
</div>
|
</div>
|
||||||
))
|
))
|
||||||
)}
|
)}
|
||||||
@@ -280,11 +257,12 @@ export function ChatSidebar() {
|
|||||||
)}
|
)}
|
||||||
</Sidebar>
|
</Sidebar>
|
||||||
|
|
||||||
<DeleteChatDialog
|
<DeleteConfirmDialog
|
||||||
session={sessionToDelete}
|
entityType="chat"
|
||||||
isDeleting={isDeleting}
|
entityName={sessionToDelete?.title || "Untitled chat"}
|
||||||
onConfirm={handleConfirmDelete}
|
open={!!sessionToDelete}
|
||||||
onCancel={handleCancelDelete}
|
onOpenChange={(open) => !open && setSessionToDelete(null)}
|
||||||
|
onDoDelete={handleConfirmDelete}
|
||||||
/>
|
/>
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,57 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
|
|
||||||
interface Props {
|
|
||||||
session: { id: string; title: string | null | undefined } | null;
|
|
||||||
isDeleting: boolean;
|
|
||||||
onConfirm: () => void;
|
|
||||||
onCancel: () => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function DeleteChatDialog({
|
|
||||||
session,
|
|
||||||
isDeleting,
|
|
||||||
onConfirm,
|
|
||||||
onCancel,
|
|
||||||
}: Props) {
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Delete chat"
|
|
||||||
styling={{ maxWidth: "30rem", minWidth: "auto" }}
|
|
||||||
controlled={{
|
|
||||||
isOpen: !!session,
|
|
||||||
set: async (open) => {
|
|
||||||
if (!open && !isDeleting) {
|
|
||||||
onCancel();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
onClose={isDeleting ? undefined : onCancel}
|
|
||||||
>
|
|
||||||
<Dialog.Content>
|
|
||||||
<Text variant="body">
|
|
||||||
Are you sure you want to delete{" "}
|
|
||||||
<Text variant="body-medium" as="span">
|
|
||||||
"{session?.title || "Untitled chat"}"
|
|
||||||
</Text>
|
|
||||||
? This action cannot be undone.
|
|
||||||
</Text>
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button variant="secondary" onClick={onCancel} disabled={isDeleting}>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="destructive"
|
|
||||||
onClick={onConfirm}
|
|
||||||
loading={isDeleting}
|
|
||||||
>
|
|
||||||
Delete
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,12 +1,20 @@
|
|||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { NAVBAR_HEIGHT_PX } from "@/lib/constants";
|
import { NAVBAR_HEIGHT_PX } from "@/lib/constants";
|
||||||
import { ListIcon } from "@phosphor-icons/react";
|
import { ListIcon, TrashIcon } from "@phosphor-icons/react";
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
onOpenDrawer: () => void;
|
onOpenDrawer: () => void;
|
||||||
|
showDelete?: boolean;
|
||||||
|
isDeleting?: boolean;
|
||||||
|
onDelete?: () => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function MobileHeader({ onOpenDrawer }: Props) {
|
export function MobileHeader({
|
||||||
|
onOpenDrawer,
|
||||||
|
showDelete,
|
||||||
|
isDeleting,
|
||||||
|
onDelete,
|
||||||
|
}: Props) {
|
||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
className="fixed z-50 flex gap-2"
|
className="fixed z-50 flex gap-2"
|
||||||
@@ -21,6 +29,18 @@ export function MobileHeader({ onOpenDrawer }: Props) {
|
|||||||
>
|
>
|
||||||
<ListIcon width="1.25rem" height="1.25rem" />
|
<ListIcon width="1.25rem" height="1.25rem" />
|
||||||
</Button>
|
</Button>
|
||||||
|
{showDelete && onDelete && (
|
||||||
|
<Button
|
||||||
|
variant="icon"
|
||||||
|
size="icon"
|
||||||
|
aria-label="Delete current chat"
|
||||||
|
onClick={onDelete}
|
||||||
|
disabled={isDeleting}
|
||||||
|
className="bg-white text-red-500 shadow-md hover:bg-red-50 hover:text-red-600 disabled:opacity-50"
|
||||||
|
>
|
||||||
|
<TrashIcon width="1.25rem" height="1.25rem" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -192,10 +192,8 @@ export function useCopilotPage() {
|
|||||||
}, [sessionToDelete, deleteSessionMutation]);
|
}, [sessionToDelete, deleteSessionMutation]);
|
||||||
|
|
||||||
const handleCancelDelete = useCallback(() => {
|
const handleCancelDelete = useCallback(() => {
|
||||||
if (!isDeleting) {
|
setSessionToDelete(null);
|
||||||
setSessionToDelete(null);
|
}, []);
|
||||||
}
|
|
||||||
}, [isDeleting]);
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
sessionId,
|
sessionId,
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import {
|
|||||||
} from "@/components/ui/tooltip";
|
} from "@/components/ui/tooltip";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { cjk } from "@streamdown/cjk";
|
import { cjk } from "@streamdown/cjk";
|
||||||
import { code } from "@/lib/streamdown-code-plugin";
|
import { code } from "@streamdown/code";
|
||||||
import { math } from "@streamdown/math";
|
import { math } from "@streamdown/math";
|
||||||
import { mermaid } from "@streamdown/mermaid";
|
import { mermaid } from "@streamdown/mermaid";
|
||||||
import type { UIMessage } from "ai";
|
import type { UIMessage } from "ai";
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ export function BaseFooter({
|
|||||||
</div>
|
</div>
|
||||||
) : (
|
) : (
|
||||||
<div
|
<div
|
||||||
className={`flex w-full items-end justify-end gap-4 pt-6 ${className}`}
|
className={`flex w-full items-end justify-between gap-4 pt-6 ${className}`}
|
||||||
data-testid={testId}
|
data-testid={testId}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
|
|||||||
@@ -1,70 +0,0 @@
|
|||||||
import {
|
|
||||||
bundledLanguages,
|
|
||||||
bundledLanguagesInfo,
|
|
||||||
createHighlighter,
|
|
||||||
type BundledLanguage,
|
|
||||||
type BundledTheme,
|
|
||||||
type HighlighterGeneric,
|
|
||||||
} from "shiki";
|
|
||||||
|
|
||||||
export type { BundledLanguage, BundledTheme };
|
|
||||||
|
|
||||||
const LANGUAGE_ALIASES: Record<string, string> = Object.fromEntries(
|
|
||||||
bundledLanguagesInfo.flatMap((lang) =>
|
|
||||||
(lang.aliases ?? []).map((alias) => [alias, lang.id]),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
const SUPPORTED_LANGUAGES = new Set(Object.keys(bundledLanguages));
|
|
||||||
|
|
||||||
const PRELOAD_LANGUAGES: BundledLanguage[] = [
|
|
||||||
"javascript",
|
|
||||||
"typescript",
|
|
||||||
"python",
|
|
||||||
"json",
|
|
||||||
"bash",
|
|
||||||
"yaml",
|
|
||||||
"markdown",
|
|
||||||
"html",
|
|
||||||
"css",
|
|
||||||
"sql",
|
|
||||||
"tsx",
|
|
||||||
"jsx",
|
|
||||||
];
|
|
||||||
|
|
||||||
export const SHIKI_THEMES: [BundledTheme, BundledTheme] = [
|
|
||||||
"github-light",
|
|
||||||
"github-dark",
|
|
||||||
];
|
|
||||||
|
|
||||||
let highlighterPromise: Promise<
|
|
||||||
HighlighterGeneric<BundledLanguage, BundledTheme>
|
|
||||||
> | null = null;
|
|
||||||
|
|
||||||
export function getShikiHighlighter(): Promise<
|
|
||||||
HighlighterGeneric<BundledLanguage, BundledTheme>
|
|
||||||
> {
|
|
||||||
if (!highlighterPromise) {
|
|
||||||
highlighterPromise = createHighlighter({
|
|
||||||
themes: SHIKI_THEMES,
|
|
||||||
langs: PRELOAD_LANGUAGES,
|
|
||||||
}).catch((err) => {
|
|
||||||
highlighterPromise = null;
|
|
||||||
throw err;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return highlighterPromise;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function resolveLanguage(lang: string): string {
|
|
||||||
const normalized = lang.trim().toLowerCase();
|
|
||||||
return LANGUAGE_ALIASES[normalized] ?? normalized;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function isLanguageSupported(lang: string): boolean {
|
|
||||||
return SUPPORTED_LANGUAGES.has(resolveLanguage(lang));
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getSupportedLanguages(): BundledLanguage[] {
|
|
||||||
return Array.from(SUPPORTED_LANGUAGES) as BundledLanguage[];
|
|
||||||
}
|
|
||||||
@@ -1,159 +0,0 @@
|
|||||||
import type { CodeHighlighterPlugin } from "streamdown";
|
|
||||||
|
|
||||||
import {
|
|
||||||
type BundledLanguage,
|
|
||||||
type BundledTheme,
|
|
||||||
getShikiHighlighter,
|
|
||||||
getSupportedLanguages,
|
|
||||||
isLanguageSupported,
|
|
||||||
resolveLanguage,
|
|
||||||
SHIKI_THEMES,
|
|
||||||
} from "./shiki-highlighter";
|
|
||||||
|
|
||||||
interface HighlightResult {
|
|
||||||
tokens: {
|
|
||||||
content: string;
|
|
||||||
color?: string;
|
|
||||||
htmlStyle?: Record<string, string>;
|
|
||||||
}[][];
|
|
||||||
fg?: string;
|
|
||||||
bg?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
type HighlightCallback = (result: HighlightResult) => void;
|
|
||||||
|
|
||||||
const MAX_CACHE_SIZE = 500;
|
|
||||||
const tokenCache = new Map<string, HighlightResult>();
|
|
||||||
const pendingCallbacks = new Map<string, Set<HighlightCallback>>();
|
|
||||||
const inFlightLanguageLoads = new Map<string, Promise<void>>();
|
|
||||||
|
|
||||||
function simpleHash(str: string): string {
|
|
||||||
let hash = 0;
|
|
||||||
for (let i = 0; i < str.length; i++) {
|
|
||||||
const char = str.charCodeAt(i);
|
|
||||||
hash = (hash << 5) - hash + char;
|
|
||||||
hash = hash & hash;
|
|
||||||
}
|
|
||||||
return hash.toString(36);
|
|
||||||
}
|
|
||||||
|
|
||||||
function getCacheKey(
|
|
||||||
code: string,
|
|
||||||
lang: string,
|
|
||||||
themes: readonly string[],
|
|
||||||
): string {
|
|
||||||
return `${lang}:${themes.join(",")}:${simpleHash(code)}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
function evictOldestIfNeeded(): void {
|
|
||||||
if (tokenCache.size > MAX_CACHE_SIZE) {
|
|
||||||
const oldestKey = tokenCache.keys().next().value;
|
|
||||||
if (oldestKey) {
|
|
||||||
tokenCache.delete(oldestKey);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function createSingletonCodePlugin(): CodeHighlighterPlugin {
|
|
||||||
return {
|
|
||||||
name: "shiki",
|
|
||||||
type: "code-highlighter",
|
|
||||||
|
|
||||||
supportsLanguage(lang: BundledLanguage): boolean {
|
|
||||||
return isLanguageSupported(lang);
|
|
||||||
},
|
|
||||||
|
|
||||||
getSupportedLanguages(): BundledLanguage[] {
|
|
||||||
return getSupportedLanguages();
|
|
||||||
},
|
|
||||||
|
|
||||||
getThemes(): [BundledTheme, BundledTheme] {
|
|
||||||
return SHIKI_THEMES;
|
|
||||||
},
|
|
||||||
|
|
||||||
highlight({ code, language, themes }, callback) {
|
|
||||||
const lang = resolveLanguage(language);
|
|
||||||
const cacheKey = getCacheKey(code, lang, themes);
|
|
||||||
|
|
||||||
if (tokenCache.has(cacheKey)) {
|
|
||||||
return tokenCache.get(cacheKey)!;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (callback) {
|
|
||||||
if (!pendingCallbacks.has(cacheKey)) {
|
|
||||||
pendingCallbacks.set(cacheKey, new Set());
|
|
||||||
}
|
|
||||||
pendingCallbacks.get(cacheKey)!.add(callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
getShikiHighlighter()
|
|
||||||
.then(async (highlighter) => {
|
|
||||||
const loadedLanguages = highlighter.getLoadedLanguages();
|
|
||||||
|
|
||||||
if (!loadedLanguages.includes(lang) && isLanguageSupported(lang)) {
|
|
||||||
let loadPromise = inFlightLanguageLoads.get(lang);
|
|
||||||
if (!loadPromise) {
|
|
||||||
loadPromise = highlighter
|
|
||||||
.loadLanguage(lang as BundledLanguage)
|
|
||||||
.finally(() => {
|
|
||||||
inFlightLanguageLoads.delete(lang);
|
|
||||||
});
|
|
||||||
inFlightLanguageLoads.set(lang, loadPromise);
|
|
||||||
}
|
|
||||||
await loadPromise;
|
|
||||||
}
|
|
||||||
|
|
||||||
const finalLang = (
|
|
||||||
highlighter.getLoadedLanguages().includes(lang) ? lang : "text"
|
|
||||||
) as BundledLanguage;
|
|
||||||
|
|
||||||
const shikiResult = highlighter.codeToTokens(code, {
|
|
||||||
lang: finalLang,
|
|
||||||
themes: { light: themes[0], dark: themes[1] },
|
|
||||||
});
|
|
||||||
|
|
||||||
const result: HighlightResult = {
|
|
||||||
tokens: shikiResult.tokens.map((line) =>
|
|
||||||
line.map((token) => ({
|
|
||||||
content: token.content,
|
|
||||||
color: token.color,
|
|
||||||
htmlStyle: token.htmlStyle,
|
|
||||||
})),
|
|
||||||
),
|
|
||||||
fg: shikiResult.fg,
|
|
||||||
bg: shikiResult.bg,
|
|
||||||
};
|
|
||||||
|
|
||||||
evictOldestIfNeeded();
|
|
||||||
tokenCache.set(cacheKey, result);
|
|
||||||
|
|
||||||
const callbacks = pendingCallbacks.get(cacheKey);
|
|
||||||
if (callbacks) {
|
|
||||||
callbacks.forEach((cb) => {
|
|
||||||
cb(result);
|
|
||||||
});
|
|
||||||
pendingCallbacks.delete(cacheKey);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.catch((error) => {
|
|
||||||
console.error("[Shiki] Failed to highlight code:", error);
|
|
||||||
|
|
||||||
const fallback: HighlightResult = {
|
|
||||||
tokens: code.split("\n").map((line) => [{ content: line }]),
|
|
||||||
};
|
|
||||||
|
|
||||||
const callbacks = pendingCallbacks.get(cacheKey);
|
|
||||||
if (callbacks) {
|
|
||||||
callbacks.forEach((cb) => {
|
|
||||||
cb(fallback);
|
|
||||||
});
|
|
||||||
pendingCallbacks.delete(cacheKey);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return null;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
export const code = createSingletonCodePlugin();
|
|
||||||
@@ -465,13 +465,9 @@ export async function navigateToAgentByName(
|
|||||||
export async function clickRunButton(page: Page): Promise<void> {
|
export async function clickRunButton(page: Page): Promise<void> {
|
||||||
const { getId } = getSelectors(page);
|
const { getId } = getSelectors(page);
|
||||||
|
|
||||||
// Wait for sidebar loading to complete before detecting buttons.
|
// Wait for page to stabilize and buttons to render
|
||||||
// During sidebar loading, the "New task" button appears transiently
|
// The NewAgentLibraryView shows either "Setup your task" (empty state)
|
||||||
// even for agents with no items, then switches to "Setup your task"
|
// or "New task" (with items) button
|
||||||
// once loading finishes. Waiting for network idle ensures the page
|
|
||||||
// has settled into its final state.
|
|
||||||
await page.waitForLoadState("networkidle");
|
|
||||||
|
|
||||||
const setupTaskButton = page.getByRole("button", {
|
const setupTaskButton = page.getByRole("button", {
|
||||||
name: /Setup your task/i,
|
name: /Setup your task/i,
|
||||||
});
|
});
|
||||||
@@ -479,7 +475,8 @@ export async function clickRunButton(page: Page): Promise<void> {
|
|||||||
const runButton = getId("agent-run-button");
|
const runButton = getId("agent-run-button");
|
||||||
const runAgainButton = getId("run-again-button");
|
const runAgainButton = getId("run-again-button");
|
||||||
|
|
||||||
// Wait for any of the buttons to appear
|
// Use Promise.race with waitFor to wait for any of the buttons to appear
|
||||||
|
// This handles the async rendering in CI environments
|
||||||
try {
|
try {
|
||||||
await Promise.race([
|
await Promise.race([
|
||||||
setupTaskButton.waitFor({ state: "visible", timeout: 15000 }),
|
setupTaskButton.waitFor({ state: "visible", timeout: 15000 }),
|
||||||
@@ -493,7 +490,7 @@ export async function clickRunButton(page: Page): Promise<void> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check which button is visible and click it
|
// Now check which button is visible and click it
|
||||||
if (await setupTaskButton.isVisible()) {
|
if (await setupTaskButton.isVisible()) {
|
||||||
await setupTaskButton.click();
|
await setupTaskButton.click();
|
||||||
const startTaskButton = page
|
const startTaskButton = page
|
||||||
@@ -537,9 +534,7 @@ export async function runAgent(page: Page): Promise<void> {
|
|||||||
|
|
||||||
export async function waitForAgentPageLoad(page: Page): Promise<void> {
|
export async function waitForAgentPageLoad(page: Page): Promise<void> {
|
||||||
await page.waitForURL(/.*\/library\/agents\/[^/]+/);
|
await page.waitForURL(/.*\/library\/agents\/[^/]+/);
|
||||||
// Wait for sidebar data to finish loading so the page settles
|
await page.getByTestId("Run actions").isVisible({ timeout: 10000 });
|
||||||
// into its final state (empty view vs sidebar view)
|
|
||||||
await page.waitForLoadState("networkidle");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function getAgentName(page: Page): Promise<string> {
|
export async function getAgentName(page: Page): Promise<string> {
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
|
|||||||
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
||||||
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
||||||
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
||||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
|
|
||||||
### Outputs
|
### Outputs
|
||||||
|
|
||||||
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|
|||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| prompt | The prompt to send to the language model. | str | No |
|
| prompt | The prompt to send to the language model. | str | No |
|
||||||
| messages | List of messages in the conversation. | List[Any] | Yes |
|
| messages | List of messages in the conversation. | List[Any] | Yes |
|
||||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||||
| ollama_host | Ollama host for local models | str | No |
|
| ollama_host | Ollama host for local models | str | No |
|
||||||
|
|
||||||
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|
|||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| focus | The focus of the list to generate. | str | No |
|
| focus | The focus of the list to generate. | str | No |
|
||||||
| source_data | The data to generate the list from. | str | No |
|
| source_data | The data to generate the list from. | str | No |
|
||||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
||||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||||
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
|
|||||||
| prompt | The prompt to send to the language model. | str | Yes |
|
| prompt | The prompt to send to the language model. | str | Yes |
|
||||||
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
||||||
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
||||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||||
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
|
|||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
||||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||||
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
||||||
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
||||||
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
|
|||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| text | The text to summarize. | str | Yes |
|
| text | The text to summarize. | str | Yes |
|
||||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| focus | The topic to focus on in the summary | str | No |
|
| focus | The topic to focus on in the summary | str | No |
|
||||||
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
||||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||||
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
|
|||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| prompt | The prompt to send to the language model. | str | Yes |
|
| prompt | The prompt to send to the language model. | str | Yes |
|
||||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
||||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||||
|
|||||||
Reference in New Issue
Block a user