mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-24 03:00:28 -05:00
Compare commits
6 Commits
chore/remo
...
chore/reac
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1090f90d95 | ||
|
|
a7c9a3c5ae | ||
|
|
ec06c1278a | ||
|
|
e2525cb8a8 | ||
|
|
02a3a163e7 | ||
|
|
d9d24dcfe6 |
59
.github/workflows/platform-frontend-ci.yml
vendored
59
.github/workflows/platform-frontend-ci.yml
vendored
@@ -83,6 +83,65 @@ jobs:
|
||||
- name: Run lint
|
||||
run: pnpm lint
|
||||
|
||||
react-doctor:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Set up Node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: "22.18.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run React Doctor
|
||||
id: react-doctor
|
||||
continue-on-error: true
|
||||
run: |
|
||||
OUTPUT=$(pnpm react-doctor:diff 2>&1) || true
|
||||
echo "$OUTPUT"
|
||||
SCORE=$(echo "$OUTPUT" | grep -oP '\d+(?= / 100)' | head -1)
|
||||
echo "score=${SCORE:-0}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Check React Doctor score
|
||||
env:
|
||||
RD_SCORE: ${{ steps.react-doctor.outputs.score }}
|
||||
MIN_SCORE: "90"
|
||||
run: |
|
||||
echo "React Doctor score: ${RD_SCORE}/100 (minimum: ${MIN_SCORE})"
|
||||
if [ "${RD_SCORE}" -lt "${MIN_SCORE}" ]; then
|
||||
echo "::error::React Doctor score ${RD_SCORE} is below the minimum threshold of ${MIN_SCORE}."
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo " React Doctor score too low!"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "To fix these issues, run Claude Code locally:"
|
||||
echo ""
|
||||
echo " cd autogpt_platform/frontend"
|
||||
echo " claude"
|
||||
echo ""
|
||||
echo "Then ask Claude to run react-doctor and fix the issues."
|
||||
echo "You can also run it manually:"
|
||||
echo ""
|
||||
echo " pnpm react-doctor # scan all files"
|
||||
echo " pnpm react-doctor:diff # scan only changed files"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
|
||||
@@ -106,6 +106,8 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||
GPT4O_MINI = "gpt-4o-mini"
|
||||
GPT4O = "gpt-4o"
|
||||
GPT4_TURBO = "gpt-4-turbo"
|
||||
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||
# Anthropic models
|
||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||
@@ -253,6 +255,12 @@ MODEL_METADATA = {
|
||||
LlmModel.GPT4O: ModelMetadata(
|
||||
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
||||
), # gpt-4o-2024-08-06
|
||||
LlmModel.GPT4_TURBO: ModelMetadata(
|
||||
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
|
||||
), # gpt-4-turbo-2024-04-09
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata(
|
||||
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
|
||||
), # gpt-3.5-turbo-0125
|
||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
||||
|
||||
@@ -75,6 +75,8 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.GPT41_MINI: 1,
|
||||
LlmModel.GPT4O_MINI: 1,
|
||||
LlmModel.GPT4O: 3,
|
||||
LlmModel.GPT4_TURBO: 10,
|
||||
LlmModel.GPT3_5_TURBO: 1,
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
|
||||
@@ -79,7 +79,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
node_exec_id="test_node_exec",
|
||||
block_id=AITextGeneratorBlock().id,
|
||||
inputs={
|
||||
"model": "gpt-4o",
|
||||
"model": "gpt-4-turbo",
|
||||
"credentials": {
|
||||
"id": openai_credentials.id,
|
||||
"provider": openai_credentials.provider,
|
||||
@@ -100,7 +100,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
||||
graph_exec_id="test_graph_exec",
|
||||
node_exec_id="test_node_exec",
|
||||
block_id=AITextGeneratorBlock().id,
|
||||
inputs={"model": "gpt-4o", "api_key": "owned_api_key"},
|
||||
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
|
||||
execution_context=ExecutionContext(user_timezone="UTC"),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
-- Migrate deprecated OpenAI GPT-4-turbo and GPT-3.5-turbo models
|
||||
-- This updates all AgentNode blocks that use deprecated models
|
||||
-- OpenAI is retiring these models:
|
||||
-- - gpt-4-turbo: March 26, 2026 -> migrate to gpt-4o
|
||||
-- - gpt-3.5-turbo: September 28, 2026 -> migrate to gpt-4o-mini
|
||||
|
||||
-- Update gpt-4-turbo to gpt-4o (staying in same capability tier)
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
'"gpt-4o"'::jsonb
|
||||
)
|
||||
WHERE "constantInput"::jsonb->>'model' = 'gpt-4-turbo';
|
||||
|
||||
-- Update gpt-3.5-turbo to gpt-4o-mini (appropriate replacement for lightweight model)
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
'"gpt-4o-mini"'::jsonb
|
||||
)
|
||||
WHERE "constantInput"::jsonb->>'model' = 'gpt-3.5-turbo';
|
||||
|
||||
-- Update AgentPreset input overrides (stored in AgentNodeExecutionInputOutput)
|
||||
UPDATE "AgentNodeExecutionInputOutput"
|
||||
SET "data" = JSONB_SET(
|
||||
"data"::jsonb,
|
||||
'{model}',
|
||||
'"gpt-4o"'::jsonb
|
||||
)
|
||||
WHERE "agentPresetId" IS NOT NULL
|
||||
AND "data"::jsonb->>'model' = 'gpt-4-turbo';
|
||||
|
||||
UPDATE "AgentNodeExecutionInputOutput"
|
||||
SET "data" = JSONB_SET(
|
||||
"data"::jsonb,
|
||||
'{model}',
|
||||
'"gpt-4o-mini"'::jsonb
|
||||
)
|
||||
WHERE "agentPresetId" IS NOT NULL
|
||||
AND "data"::jsonb->>'model' = 'gpt-3.5-turbo';
|
||||
@@ -23,6 +23,8 @@
|
||||
"build-storybook": "storybook build",
|
||||
"test-storybook": "test-storybook",
|
||||
"test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"pnpm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && pnpm run test-storybook\"",
|
||||
"react-doctor": "npx -y react-doctor@latest . --verbose",
|
||||
"react-doctor:diff": "npx -y react-doctor@latest . --verbose --diff",
|
||||
"generate:api": "npx --yes tsx ./scripts/generate-api-queries.ts && orval --config ./orval.config.ts",
|
||||
"generate:api:force": "npx --yes tsx ./scripts/generate-api-queries.ts --force && orval --config ./orval.config.ts"
|
||||
},
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
"use client";
|
||||
import { ReactFlowProvider } from "@xyflow/react";
|
||||
import { Flow } from "./components/FlowEditor/Flow/Flow";
|
||||
|
||||
export function BuilderContent() {
|
||||
return (
|
||||
<div className="relative h-full w-full">
|
||||
<ReactFlowProvider>
|
||||
<Flow />
|
||||
</ReactFlowProvider>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { useState } from "react";
|
||||
import { useCallback, useState } from "react";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { NodeBadges } from "./NodeBadges";
|
||||
import { NodeContextMenu } from "./NodeContextMenu";
|
||||
@@ -25,6 +25,9 @@ export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
|
||||
const [isEditingTitle, setIsEditingTitle] = useState(false);
|
||||
const [editedTitle, setEditedTitle] = useState(title);
|
||||
const titleInputRef = useCallback((node: HTMLInputElement | null) => {
|
||||
node?.focus();
|
||||
}, []);
|
||||
|
||||
const handleTitleEdit = () => {
|
||||
updateNodeData(nodeId, {
|
||||
@@ -52,10 +55,10 @@ export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
>
|
||||
{isEditingTitle ? (
|
||||
<input
|
||||
ref={titleInputRef}
|
||||
id="node-title-input"
|
||||
value={editedTitle}
|
||||
onChange={(e) => setEditedTitle(e.target.value)}
|
||||
autoFocus
|
||||
className={cn(
|
||||
"m-0 h-fit w-full border-none bg-transparent p-0 focus:outline-none focus:ring-0",
|
||||
"font-sans text-[1rem] font-semibold leading-[1.5rem] text-zinc-800",
|
||||
|
||||
@@ -300,7 +300,6 @@ export function MCPToolDialog({
|
||||
value={serverUrl}
|
||||
onChange={(e) => setServerUrl(e.target.value)}
|
||||
onKeyDown={(e) => e.key === "Enter" && handleDiscoverTools()}
|
||||
autoFocus
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -327,7 +326,6 @@ export function MCPToolDialog({
|
||||
value={manualToken}
|
||||
onChange={(e) => setManualToken(e.target.value)}
|
||||
onKeyDown={(e) => e.key === "Enter" && handleDiscoverTools()}
|
||||
autoFocus
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -52,7 +52,7 @@ export const HorizontalScroll: React.FC<HorizontalScrollAreaProps> = ({
|
||||
return;
|
||||
}
|
||||
const handleScroll = () => updateScrollState();
|
||||
element.addEventListener("scroll", handleScroll);
|
||||
element.addEventListener("scroll", handleScroll, { passive: true });
|
||||
window.addEventListener("resize", handleScroll);
|
||||
return () => {
|
||||
element.removeEventListener("scroll", handleScroll);
|
||||
|
||||
@@ -85,12 +85,20 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
|
||||
<Tooltip delayDuration={300}>
|
||||
<TooltipTrigger asChild>
|
||||
<div
|
||||
role="button"
|
||||
tabIndex={0}
|
||||
className={`mx-4 my-2 flex h-20 cursor-pointer rounded-lg border border-zinc-200 bg-white ${
|
||||
index === selectedIndex
|
||||
? "border-zinc-400 shadow-md"
|
||||
: "hover:border-zinc-300 hover:shadow-sm"
|
||||
}`}
|
||||
onClick={() => onNodeSelect(node.id)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter" || e.key === " ") {
|
||||
e.preventDefault();
|
||||
onNodeSelect(node.id);
|
||||
}
|
||||
}}
|
||||
onMouseEnter={() => {
|
||||
setSelectedIndex(index);
|
||||
onNodeHover?.(node.id);
|
||||
|
||||
@@ -140,10 +140,7 @@ export function AgentRunDraftView({
|
||||
),
|
||||
[agentInputSchema],
|
||||
);
|
||||
const agentCredentialsInputFields = useMemo(
|
||||
() => graph.credentials_input_schema.properties,
|
||||
[graph],
|
||||
);
|
||||
const agentCredentialsInputFields = graph.credentials_input_schema.properties;
|
||||
const credentialFields = useMemo(
|
||||
function getCredentialFields() {
|
||||
return Object.entries(agentCredentialsInputFields);
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
"use client";
|
||||
import { ReactFlowProvider } from "@xyflow/react";
|
||||
import { Flow } from "./components/FlowEditor/Flow/Flow";
|
||||
import type { Metadata } from "next";
|
||||
import { BuilderContent } from "./BuilderContent";
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "Build",
|
||||
description: "Build your agent",
|
||||
};
|
||||
|
||||
export default function BuilderPage() {
|
||||
return (
|
||||
<div className="relative h-full w-full">
|
||||
<ReactFlowProvider>
|
||||
<Flow />
|
||||
</ReactFlowProvider>
|
||||
</div>
|
||||
);
|
||||
return <BuilderContent />;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"use client";
|
||||
import { ChatInput } from "@/app/(platform)/copilot/components/ChatInput/ChatInput";
|
||||
import { UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import { LayoutGroup, motion } from "framer-motion";
|
||||
import { LayoutGroup, LazyMotion, domAnimation, m } from "framer-motion";
|
||||
import { ReactNode } from "react";
|
||||
import { ChatMessagesContainer } from "../ChatMessagesContainer/ChatMessagesContainer";
|
||||
import { CopilotChatActionsProvider } from "../CopilotChatActionsProvider/CopilotChatActionsProvider";
|
||||
@@ -38,45 +38,47 @@ export const ChatContainer = ({
|
||||
const inputLayoutId = "copilot-2-chat-input";
|
||||
|
||||
return (
|
||||
<CopilotChatActionsProvider onSend={onSend}>
|
||||
<LayoutGroup id="copilot-2-chat-layout">
|
||||
<div className="flex h-full min-h-0 w-full flex-col bg-[#f8f8f9] px-2 lg:px-0">
|
||||
{sessionId ? (
|
||||
<div className="mx-auto flex h-full min-h-0 w-full max-w-3xl flex-col">
|
||||
<ChatMessagesContainer
|
||||
messages={messages}
|
||||
status={status}
|
||||
error={error}
|
||||
isLoading={isLoadingSession}
|
||||
headerSlot={headerSlot}
|
||||
/>
|
||||
<motion.div
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.3 }}
|
||||
className="relative px-3 pb-2 pt-2"
|
||||
>
|
||||
<div className="pointer-events-none absolute left-0 right-0 top-[-18px] z-10 h-6 bg-gradient-to-b from-transparent to-[#f8f8f9]" />
|
||||
<ChatInput
|
||||
inputId="chat-input-session"
|
||||
onSend={onSend}
|
||||
disabled={isBusy}
|
||||
isStreaming={isBusy}
|
||||
onStop={onStop}
|
||||
placeholder="What else can I help with?"
|
||||
<LazyMotion features={domAnimation}>
|
||||
<CopilotChatActionsProvider onSend={onSend}>
|
||||
<LayoutGroup id="copilot-2-chat-layout">
|
||||
<div className="flex h-full min-h-0 w-full flex-col bg-[#f8f8f9] px-2 lg:px-0">
|
||||
{sessionId ? (
|
||||
<div className="mx-auto flex h-full min-h-0 w-full max-w-3xl flex-col">
|
||||
<ChatMessagesContainer
|
||||
messages={messages}
|
||||
status={status}
|
||||
error={error}
|
||||
isLoading={isLoadingSession}
|
||||
headerSlot={headerSlot}
|
||||
/>
|
||||
</motion.div>
|
||||
</div>
|
||||
) : (
|
||||
<EmptySession
|
||||
inputLayoutId={inputLayoutId}
|
||||
isCreatingSession={isCreatingSession}
|
||||
onCreateSession={onCreateSession}
|
||||
onSend={onSend}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</LayoutGroup>
|
||||
</CopilotChatActionsProvider>
|
||||
<m.div
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.3 }}
|
||||
className="relative px-3 pb-2 pt-2"
|
||||
>
|
||||
<div className="pointer-events-none absolute left-0 right-0 top-[-18px] z-10 h-6 bg-gradient-to-b from-transparent to-[#f8f8f9]" />
|
||||
<ChatInput
|
||||
inputId="chat-input-session"
|
||||
onSend={onSend}
|
||||
disabled={isBusy}
|
||||
isStreaming={isBusy}
|
||||
onStop={onStop}
|
||||
placeholder="What else can I help with?"
|
||||
/>
|
||||
</m.div>
|
||||
</div>
|
||||
) : (
|
||||
<EmptySession
|
||||
inputLayoutId={inputLayoutId}
|
||||
isCreatingSession={isCreatingSession}
|
||||
onCreateSession={onCreateSession}
|
||||
onSend={onSend}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</LayoutGroup>
|
||||
</CopilotChatActionsProvider>
|
||||
</LazyMotion>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -117,6 +117,7 @@ export function AudioWaveform({
|
||||
{bars.map((height, i) => {
|
||||
const barHeight = Math.max(minBarHeight, height);
|
||||
return (
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
<div
|
||||
key={i}
|
||||
className="relative"
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
|
||||
import Image from "next/image";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
|
||||
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
|
||||
@@ -57,7 +58,7 @@ function resolveWorkspaceUrls(text: string): string {
|
||||
* Falls back to <video> when an <img> fails to load for workspace files.
|
||||
*/
|
||||
function WorkspaceMediaImage(props: React.JSX.IntrinsicElements["img"]) {
|
||||
const { src, alt, ...rest } = props;
|
||||
const { src, alt } = props;
|
||||
const [imgFailed, setImgFailed] = useState(false);
|
||||
const isWorkspace = src?.includes("/workspace/files/") ?? false;
|
||||
|
||||
@@ -79,16 +80,17 @@ function WorkspaceMediaImage(props: React.JSX.IntrinsicElements["img"]) {
|
||||
}
|
||||
|
||||
return (
|
||||
// eslint-disable-next-line @next/next/no-img-element
|
||||
<img
|
||||
<Image
|
||||
src={src}
|
||||
alt={alt || "Image"}
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
loading="lazy"
|
||||
width={0}
|
||||
height={0}
|
||||
sizes="100vw"
|
||||
className="h-auto w-full rounded-md border border-zinc-200"
|
||||
unoptimized
|
||||
onError={() => {
|
||||
if (isWorkspace) setImgFailed(true);
|
||||
}}
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -195,12 +197,12 @@ export const ChatMessagesContainer = ({
|
||||
"group-[.is-assistant]:bg-transparent group-[.is-assistant]:text-slate-900"
|
||||
}
|
||||
>
|
||||
{message.parts.map((part, i) => {
|
||||
{message.parts.map((part) => {
|
||||
switch (part.type) {
|
||||
case "text":
|
||||
return (
|
||||
<MessageResponse
|
||||
key={`${message.id}-${i}`}
|
||||
key={`${message.id}-text`}
|
||||
components={STREAMDOWN_COMPONENTS}
|
||||
>
|
||||
{resolveWorkspaceUrls(part.text)}
|
||||
@@ -209,7 +211,7 @@ export const ChatMessagesContainer = ({
|
||||
case "tool-find_block":
|
||||
return (
|
||||
<FindBlocksTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
@@ -217,7 +219,7 @@ export const ChatMessagesContainer = ({
|
||||
case "tool-find_library_agent":
|
||||
return (
|
||||
<FindAgentsTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
@@ -225,14 +227,14 @@ export const ChatMessagesContainer = ({
|
||||
case "tool-get_doc_page":
|
||||
return (
|
||||
<SearchDocsTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-run_block":
|
||||
return (
|
||||
<RunBlockTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
@@ -240,42 +242,42 @@ export const ChatMessagesContainer = ({
|
||||
case "tool-schedule_agent":
|
||||
return (
|
||||
<RunAgentTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-create_agent":
|
||||
return (
|
||||
<CreateAgentTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-edit_agent":
|
||||
return (
|
||||
<EditAgentTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-view_agent_output":
|
||||
return (
|
||||
<ViewAgentOutputTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-search_feature_requests":
|
||||
return (
|
||||
<SearchFeatureRequestsTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
case "tool-create_feature_request":
|
||||
return (
|
||||
<CreateFeatureRequestTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
@@ -285,7 +287,7 @@ export const ChatMessagesContainer = ({
|
||||
if (part.type.startsWith("tool-")) {
|
||||
return (
|
||||
<GenericTool
|
||||
key={`${message.id}-${i}`}
|
||||
key={(part as ToolUIPart).toolCallId}
|
||||
part={part as ToolUIPart}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -25,7 +25,7 @@ import {
|
||||
import { cn } from "@/lib/utils";
|
||||
import { DotsThree, PlusCircleIcon, PlusIcon } from "@phosphor-icons/react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { motion } from "framer-motion";
|
||||
import { LazyMotion, domAnimation, m } from "framer-motion";
|
||||
import { parseAsString, useQueryState } from "nuqs";
|
||||
import { useState } from "react";
|
||||
import { DeleteChatDialog } from "../DeleteChatDialog/DeleteChatDialog";
|
||||
@@ -129,7 +129,7 @@ export function ChatSidebar() {
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<LazyMotion features={domAnimation}>
|
||||
<Sidebar
|
||||
variant="inset"
|
||||
collapsible="icon"
|
||||
@@ -144,7 +144,7 @@ export function ChatSidebar() {
|
||||
: "flex-row items-center justify-between",
|
||||
)}
|
||||
>
|
||||
<motion.div
|
||||
<m.div
|
||||
key={isCollapsed ? "header-collapsed" : "header-expanded"}
|
||||
className="flex flex-col items-center gap-3 pt-4"
|
||||
initial={{ opacity: 0, filter: "blur(3px)" }}
|
||||
@@ -162,12 +162,12 @@ export function ChatSidebar() {
|
||||
<span className="sr-only">New Chat</span>
|
||||
</Button>
|
||||
</div>
|
||||
</motion.div>
|
||||
</m.div>
|
||||
</SidebarHeader>
|
||||
)}
|
||||
<SidebarContent className="gap-4 overflow-y-auto px-4 py-4 [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
{!isCollapsed && (
|
||||
<motion.div
|
||||
<m.div
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.2, delay: 0.1 }}
|
||||
@@ -179,11 +179,11 @@ export function ChatSidebar() {
|
||||
<div className="relative left-6">
|
||||
<SidebarTrigger />
|
||||
</div>
|
||||
</motion.div>
|
||||
</m.div>
|
||||
)}
|
||||
|
||||
{!isCollapsed && (
|
||||
<motion.div
|
||||
<m.div
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.2, delay: 0.15 }}
|
||||
@@ -256,12 +256,12 @@ export function ChatSidebar() {
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</motion.div>
|
||||
</m.div>
|
||||
)}
|
||||
</SidebarContent>
|
||||
{!isCollapsed && sessionId && (
|
||||
<SidebarFooter className="shrink-0 bg-zinc-50 p-3 pb-1 shadow-[0_-4px_6px_-1px_rgba(0,0,0,0.05)]">
|
||||
<motion.div
|
||||
<m.div
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.2, delay: 0.2 }}
|
||||
@@ -275,7 +275,7 @@ export function ChatSidebar() {
|
||||
>
|
||||
New Chat
|
||||
</Button>
|
||||
</motion.div>
|
||||
</m.div>
|
||||
</SidebarFooter>
|
||||
)}
|
||||
</Sidebar>
|
||||
@@ -286,6 +286,6 @@ export function ChatSidebar() {
|
||||
onConfirm={handleConfirmDelete}
|
||||
onCancel={handleCancelDelete}
|
||||
/>
|
||||
</>
|
||||
</LazyMotion>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||
import { SpinnerGapIcon } from "@phosphor-icons/react";
|
||||
import { motion } from "framer-motion";
|
||||
import { LazyMotion, domAnimation, m } from "framer-motion";
|
||||
import { useEffect, useState } from "react";
|
||||
import {
|
||||
getGreetingName,
|
||||
@@ -29,7 +29,7 @@ export function EmptySession({
|
||||
const greetingName = getGreetingName(user);
|
||||
const quickActions = getQuickActions();
|
||||
const [loadingAction, setLoadingAction] = useState<string | null>(null);
|
||||
const [inputPlaceholder, setInputPlaceholder] = useState(
|
||||
const [inputPlaceholder, setInputPlaceholder] = useState(() =>
|
||||
getInputPlaceholder(),
|
||||
);
|
||||
|
||||
@@ -49,63 +49,65 @@ export function EmptySession({
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-0 py-5 md:px-6 md:py-10">
|
||||
<motion.div
|
||||
className="w-full max-w-3xl text-center"
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.3 }}
|
||||
>
|
||||
<div className="mx-auto max-w-3xl">
|
||||
<Text variant="h3" className="mb-1 !text-[1.375rem] text-zinc-700">
|
||||
Hey, <span className="text-violet-600">{greetingName}</span>
|
||||
</Text>
|
||||
<Text variant="h3" className="mb-8 !font-normal">
|
||||
Tell me about your work — I'll find what to automate.
|
||||
</Text>
|
||||
<LazyMotion features={domAnimation}>
|
||||
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-0 py-5 md:px-6 md:py-10">
|
||||
<m.div
|
||||
className="w-full max-w-3xl text-center"
|
||||
initial={{ opacity: 0 }}
|
||||
animate={{ opacity: 1 }}
|
||||
transition={{ duration: 0.3 }}
|
||||
>
|
||||
<div className="mx-auto max-w-3xl">
|
||||
<Text variant="h3" className="mb-1 !text-[1.375rem] text-zinc-700">
|
||||
Hey, <span className="text-violet-600">{greetingName}</span>
|
||||
</Text>
|
||||
<Text variant="h3" className="mb-8 !font-normal">
|
||||
Tell me about your work — I'll find what to automate.
|
||||
</Text>
|
||||
|
||||
<div className="mb-6">
|
||||
<motion.div
|
||||
layoutId={inputLayoutId}
|
||||
transition={{ type: "spring", bounce: 0.2, duration: 0.65 }}
|
||||
className="w-full px-2"
|
||||
>
|
||||
<ChatInput
|
||||
inputId="chat-input-empty"
|
||||
onSend={onSend}
|
||||
disabled={isCreatingSession}
|
||||
placeholder={inputPlaceholder}
|
||||
className="w-full"
|
||||
/>
|
||||
</motion.div>
|
||||
<div className="mb-6">
|
||||
<m.div
|
||||
layoutId={inputLayoutId}
|
||||
transition={{ type: "spring", bounce: 0.2, duration: 0.65 }}
|
||||
className="w-full px-2"
|
||||
>
|
||||
<ChatInput
|
||||
inputId="chat-input-empty"
|
||||
onSend={onSend}
|
||||
disabled={isCreatingSession}
|
||||
placeholder={inputPlaceholder}
|
||||
className="w-full"
|
||||
/>
|
||||
</m.div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-wrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
{quickActions.map((action) => (
|
||||
<Button
|
||||
key={action}
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={() => void handleQuickActionClick(action)}
|
||||
disabled={isCreatingSession || loadingAction !== null}
|
||||
aria-busy={loadingAction === action}
|
||||
leftIcon={
|
||||
loadingAction === action ? (
|
||||
<SpinnerGapIcon
|
||||
className="h-4 w-4 animate-spin"
|
||||
weight="bold"
|
||||
/>
|
||||
) : null
|
||||
}
|
||||
className="h-auto shrink-0 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
|
||||
>
|
||||
{action}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
</motion.div>
|
||||
</div>
|
||||
<div className="flex flex-wrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
{quickActions.map((action) => (
|
||||
<Button
|
||||
key={action}
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={() => void handleQuickActionClick(action)}
|
||||
disabled={isCreatingSession || loadingAction !== null}
|
||||
aria-busy={loadingAction === action}
|
||||
leftIcon={
|
||||
loadingAction === action ? (
|
||||
<SpinnerGapIcon
|
||||
className="h-4 w-4 animate-spin"
|
||||
weight="bold"
|
||||
/>
|
||||
) : null
|
||||
}
|
||||
className="h-auto shrink-0 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
|
||||
>
|
||||
{action}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
</m.div>
|
||||
</div>
|
||||
</LazyMotion>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { cn } from "@/lib/utils";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { AnimatePresence, LazyMotion, domAnimation, m } from "framer-motion";
|
||||
|
||||
interface Props {
|
||||
text: string;
|
||||
@@ -10,45 +10,47 @@ export function MorphingTextAnimation({ text, className }: Props) {
|
||||
const letters = text.split("");
|
||||
|
||||
return (
|
||||
<div className={cn(className)}>
|
||||
<AnimatePresence mode="popLayout" initial={false}>
|
||||
<motion.div key={text} className="whitespace-nowrap">
|
||||
<motion.span className="inline-flex overflow-hidden">
|
||||
{letters.map((char, index) => (
|
||||
<motion.span
|
||||
key={`${text}-${index}`}
|
||||
initial={{
|
||||
opacity: 0,
|
||||
y: 8,
|
||||
rotateX: "80deg",
|
||||
filter: "blur(6px)",
|
||||
}}
|
||||
animate={{
|
||||
opacity: 1,
|
||||
y: 0,
|
||||
rotateX: "0deg",
|
||||
filter: "blur(0px)",
|
||||
}}
|
||||
exit={{
|
||||
opacity: 0,
|
||||
y: -8,
|
||||
rotateX: "-80deg",
|
||||
filter: "blur(6px)",
|
||||
}}
|
||||
style={{ willChange: "transform" }}
|
||||
transition={{
|
||||
delay: 0.015 * index,
|
||||
type: "spring",
|
||||
bounce: 0.5,
|
||||
}}
|
||||
className="inline-block"
|
||||
>
|
||||
{char === " " ? "\u00A0" : char}
|
||||
</motion.span>
|
||||
))}
|
||||
</motion.span>
|
||||
</motion.div>
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
<LazyMotion features={domAnimation}>
|
||||
<div className={cn(className)}>
|
||||
<AnimatePresence mode="popLayout" initial={false}>
|
||||
<m.div key={text} className="whitespace-nowrap">
|
||||
<m.span className="inline-flex overflow-hidden">
|
||||
{letters.map((char, index) => (
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
<m.span
|
||||
key={`${text}-${index}`}
|
||||
initial={{
|
||||
opacity: 0,
|
||||
y: 8,
|
||||
rotateX: "80deg",
|
||||
filter: "blur(6px)",
|
||||
}}
|
||||
animate={{
|
||||
opacity: 1,
|
||||
y: 0,
|
||||
rotateX: "0deg",
|
||||
filter: "blur(0px)",
|
||||
}}
|
||||
exit={{
|
||||
opacity: 0,
|
||||
y: -8,
|
||||
rotateX: "-80deg",
|
||||
filter: "blur(6px)",
|
||||
}}
|
||||
transition={{
|
||||
delay: 0.015 * index,
|
||||
type: "spring",
|
||||
bounce: 0.5,
|
||||
}}
|
||||
className="inline-block"
|
||||
>
|
||||
{char === " " ? "\u00A0" : char}
|
||||
</m.span>
|
||||
))}
|
||||
</m.span>
|
||||
</m.div>
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
</LazyMotion>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,7 +2,13 @@
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CaretDownIcon } from "@phosphor-icons/react";
|
||||
import { AnimatePresence, motion, useReducedMotion } from "framer-motion";
|
||||
import {
|
||||
AnimatePresence,
|
||||
LazyMotion,
|
||||
domAnimation,
|
||||
m,
|
||||
useReducedMotion,
|
||||
} from "framer-motion";
|
||||
import { useId } from "react";
|
||||
import { useToolAccordion } from "./useToolAccordion";
|
||||
|
||||
@@ -38,65 +44,66 @@ export function ToolAccordion({
|
||||
});
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"mt-2 w-full rounded-lg border border-slate-200 bg-slate-100 px-3 py-2",
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<button
|
||||
type="button"
|
||||
aria-expanded={isExpanded}
|
||||
aria-controls={contentId}
|
||||
onClick={toggle}
|
||||
className="flex w-full items-center justify-between gap-3 py-1 text-left"
|
||||
>
|
||||
<div className="flex min-w-0 items-center gap-3">
|
||||
<span className="flex shrink-0 items-center text-gray-800">
|
||||
{icon}
|
||||
</span>
|
||||
<div className="min-w-0">
|
||||
<p
|
||||
className={cn(
|
||||
"truncate text-sm font-medium text-gray-800",
|
||||
titleClassName,
|
||||
)}
|
||||
>
|
||||
{title}
|
||||
</p>
|
||||
{description && (
|
||||
<p className="truncate text-xs text-slate-800">{description}</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<CaretDownIcon
|
||||
className={cn(
|
||||
"h-4 w-4 shrink-0 text-slate-500 transition-transform",
|
||||
isExpanded && "rotate-180",
|
||||
)}
|
||||
weight="bold"
|
||||
/>
|
||||
</button>
|
||||
|
||||
<AnimatePresence initial={false}>
|
||||
{isExpanded && (
|
||||
<motion.div
|
||||
id={contentId}
|
||||
initial={{ height: 0, opacity: 0, filter: "blur(10px)" }}
|
||||
animate={{ height: "auto", opacity: 1, filter: "blur(0px)" }}
|
||||
exit={{ height: 0, opacity: 0, filter: "blur(10px)" }}
|
||||
transition={
|
||||
shouldReduceMotion
|
||||
? { duration: 0 }
|
||||
: { type: "spring", bounce: 0.35, duration: 0.55 }
|
||||
}
|
||||
className="overflow-hidden"
|
||||
style={{ willChange: "height, opacity, filter" }}
|
||||
>
|
||||
<div className="pb-2 pt-3">{children}</div>
|
||||
</motion.div>
|
||||
<LazyMotion features={domAnimation}>
|
||||
<div
|
||||
className={cn(
|
||||
"mt-2 w-full rounded-lg border border-slate-200 bg-slate-100 px-3 py-2",
|
||||
className,
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
>
|
||||
<button
|
||||
type="button"
|
||||
aria-expanded={isExpanded}
|
||||
aria-controls={contentId}
|
||||
onClick={toggle}
|
||||
className="flex w-full items-center justify-between gap-3 py-1 text-left"
|
||||
>
|
||||
<div className="flex min-w-0 items-center gap-3">
|
||||
<span className="flex shrink-0 items-center text-gray-800">
|
||||
{icon}
|
||||
</span>
|
||||
<div className="min-w-0">
|
||||
<p
|
||||
className={cn(
|
||||
"truncate text-sm font-medium text-gray-800",
|
||||
titleClassName,
|
||||
)}
|
||||
>
|
||||
{title}
|
||||
</p>
|
||||
{description && (
|
||||
<p className="truncate text-xs text-slate-800">{description}</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<CaretDownIcon
|
||||
className={cn(
|
||||
"h-4 w-4 shrink-0 text-slate-500 transition-transform",
|
||||
isExpanded && "rotate-180",
|
||||
)}
|
||||
weight="bold"
|
||||
/>
|
||||
</button>
|
||||
|
||||
<AnimatePresence initial={false}>
|
||||
{isExpanded && (
|
||||
<m.div
|
||||
id={contentId}
|
||||
initial={{ height: 0, opacity: 0, filter: "blur(10px)" }}
|
||||
animate={{ height: "auto", opacity: 1, filter: "blur(0px)" }}
|
||||
exit={{ height: 0, opacity: 0, filter: "blur(10px)" }}
|
||||
transition={
|
||||
shouldReduceMotion
|
||||
? { duration: 0 }
|
||||
: { type: "spring", bounce: 0.35, duration: 0.55 }
|
||||
}
|
||||
className="overflow-hidden"
|
||||
>
|
||||
<div className="pb-2 pt-3">{children}</div>
|
||||
</m.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
</LazyMotion>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
import type { Metadata } from "next";
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "Copilot",
|
||||
description: "Chat with your AI copilot",
|
||||
};
|
||||
|
||||
export default function CopilotLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return children;
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
import type { Metadata } from "next";
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "Copilot Styleguide",
|
||||
description: "Copilot UI component styleguide",
|
||||
};
|
||||
|
||||
export default function StyleguideLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return children;
|
||||
}
|
||||
@@ -161,7 +161,7 @@ export function ClarificationQuestionsCard({
|
||||
|
||||
return (
|
||||
<div
|
||||
key={`${q.keyword}-${index}`}
|
||||
key={q.keyword}
|
||||
className={cn(
|
||||
"relative rounded-lg border p-3",
|
||||
isAnswered
|
||||
|
||||
@@ -557,8 +557,11 @@ function getTodoAccordionData(input: unknown): AccordionData {
|
||||
description: `${completed}/${total} completed`,
|
||||
content: (
|
||||
<div className="space-y-1 py-1">
|
||||
{todos.map((todo, i) => (
|
||||
<div key={i} className="flex items-start gap-2 text-xs">
|
||||
{todos.map((todo, idx) => (
|
||||
<div
|
||||
key={`${todo.status}:${todo.content}:${idx}`}
|
||||
className="flex items-start gap-2 text-xs"
|
||||
>
|
||||
<span className="mt-0.5 flex-shrink-0">
|
||||
{todo.status === "completed" ? (
|
||||
<CheckCircleIcon
|
||||
|
||||
@@ -4,7 +4,7 @@ import type { AgentDetailsResponse } from "@/app/api/__generated__/models/agentD
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { AnimatePresence, LazyMotion, domAnimation, m } from "framer-motion";
|
||||
import { useState } from "react";
|
||||
import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||
import { ContentMessage } from "../../../../components/ToolAccordion/AccordionContent";
|
||||
@@ -39,78 +39,83 @@ export function AgentDetailsCard({ output }: Props) {
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="grid gap-2">
|
||||
<ContentMessage>
|
||||
Run this agent with example values or your own inputs.
|
||||
</ContentMessage>
|
||||
<LazyMotion features={domAnimation}>
|
||||
<div className="grid gap-2">
|
||||
<ContentMessage>
|
||||
Run this agent with example values or your own inputs.
|
||||
</ContentMessage>
|
||||
|
||||
<div className="flex gap-2 pt-4">
|
||||
<Button size="small" className="w-fit" onClick={handleRunWithExamples}>
|
||||
Run with example values
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={() => setShowInputForm((prev) => !prev)}
|
||||
>
|
||||
Run with my inputs
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<AnimatePresence initial={false}>
|
||||
{showInputForm && buildInputSchema(output.agent.inputs) && (
|
||||
<motion.div
|
||||
initial={{ height: 0, opacity: 0, filter: "blur(6px)" }}
|
||||
animate={{ height: "auto", opacity: 1, filter: "blur(0px)" }}
|
||||
exit={{ height: 0, opacity: 0, filter: "blur(6px)" }}
|
||||
transition={{
|
||||
height: { type: "spring", bounce: 0.15, duration: 0.5 },
|
||||
opacity: { duration: 0.25 },
|
||||
filter: { duration: 0.2 },
|
||||
}}
|
||||
className="overflow-hidden"
|
||||
style={{ willChange: "height, opacity, filter" }}
|
||||
<div className="flex gap-2 pt-4">
|
||||
<Button
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={handleRunWithExamples}
|
||||
>
|
||||
<div className="mt-4 rounded-2xl border bg-background p-3 pt-4">
|
||||
<Text variant="body-medium">Enter your inputs</Text>
|
||||
<FormRenderer
|
||||
jsonSchema={buildInputSchema(output.agent.inputs)!}
|
||||
handleChange={(v) => setInputValues(v.formData ?? {})}
|
||||
uiSchema={{
|
||||
"ui:submitButtonOptions": { norender: true },
|
||||
}}
|
||||
initialValues={inputValues}
|
||||
formContext={{
|
||||
showHandles: false,
|
||||
size: "small",
|
||||
}}
|
||||
/>
|
||||
<div className="-mt-8 flex gap-2">
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={handleRunWithInputs}
|
||||
>
|
||||
Run
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={() => {
|
||||
setShowInputForm(false);
|
||||
setInputValues({});
|
||||
Run with example values
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={() => setShowInputForm((prev) => !prev)}
|
||||
>
|
||||
Run with my inputs
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<AnimatePresence initial={false}>
|
||||
{showInputForm && buildInputSchema(output.agent.inputs) && (
|
||||
<m.div
|
||||
initial={{ height: 0, opacity: 0, filter: "blur(6px)" }}
|
||||
animate={{ height: "auto", opacity: 1, filter: "blur(0px)" }}
|
||||
exit={{ height: 0, opacity: 0, filter: "blur(6px)" }}
|
||||
transition={{
|
||||
height: { type: "spring", bounce: 0.15, duration: 0.5 },
|
||||
opacity: { duration: 0.25 },
|
||||
filter: { duration: 0.2 },
|
||||
}}
|
||||
className="overflow-hidden"
|
||||
>
|
||||
<div className="mt-4 rounded-2xl border bg-background p-3 pt-4">
|
||||
<Text variant="body-medium">Enter your inputs</Text>
|
||||
<FormRenderer
|
||||
jsonSchema={buildInputSchema(output.agent.inputs)!}
|
||||
handleChange={(v) => setInputValues(v.formData ?? {})}
|
||||
uiSchema={{
|
||||
"ui:submitButtonOptions": { norender: true },
|
||||
}}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
initialValues={inputValues}
|
||||
formContext={{
|
||||
showHandles: false,
|
||||
size: "small",
|
||||
}}
|
||||
/>
|
||||
<div className="-mt-8 flex gap-2">
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={handleRunWithInputs}
|
||||
>
|
||||
Run
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
className="w-fit"
|
||||
onClick={() => {
|
||||
setShowInputForm(false);
|
||||
setInputValues({});
|
||||
}}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
</m.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
</LazyMotion>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ function OutputKeySection({
|
||||
</div>
|
||||
<div className="mt-2">
|
||||
{visibleItems.map((item, i) => (
|
||||
<RenderOutputValue key={i} value={item} />
|
||||
<RenderOutputValue key={`${outputKey}-${i}`} value={item} />
|
||||
))}
|
||||
</div>
|
||||
{hasMoreItems && (
|
||||
|
||||
@@ -209,7 +209,10 @@ export function ViewAgentOutputTool({ part }: Props) {
|
||||
</div>
|
||||
<div className="mt-2">
|
||||
{items.slice(0, 3).map((item, i) => (
|
||||
<RenderOutputValue key={i} value={item} />
|
||||
<RenderOutputValue
|
||||
key={`${key}-${i}`}
|
||||
value={item}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</ContentCard>
|
||||
|
||||
@@ -23,13 +23,23 @@ export function SidebarItemCard({
|
||||
onClick,
|
||||
actions,
|
||||
}: Props) {
|
||||
function handleKeyDown(e: React.KeyboardEvent<HTMLDivElement>) {
|
||||
if (e.key === "Enter" || e.key === " ") {
|
||||
e.preventDefault();
|
||||
onClick?.();
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
role="button"
|
||||
tabIndex={0}
|
||||
className={cn(
|
||||
"w-full cursor-pointer rounded-large border border-zinc-200 bg-white p-3 text-left ring-1 ring-transparent transition-all duration-150 hover:scale-[1.01] hover:bg-slate-50/50",
|
||||
selected ? "border-slate-800 ring-slate-800" : undefined,
|
||||
)}
|
||||
onClick={onClick}
|
||||
onKeyDown={handleKeyDown}
|
||||
>
|
||||
<div className="flex min-w-0 items-center justify-start gap-3">
|
||||
{icon}
|
||||
@@ -49,7 +59,13 @@ export function SidebarItemCard({
|
||||
</Text>
|
||||
</div>
|
||||
{actions ? (
|
||||
<div onClick={(e) => e.stopPropagation()}>{actions}</div>
|
||||
<div
|
||||
role="presentation"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
onKeyDown={(e) => e.stopPropagation()}
|
||||
>
|
||||
{actions}
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
"use client";
|
||||
import { redirect } from "next/navigation";
|
||||
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect } from "react";
|
||||
import type { Metadata } from "next";
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: "AutoGPT Platform",
|
||||
description: "AutoGPT Platform",
|
||||
};
|
||||
|
||||
export default function Page() {
|
||||
const router = useRouter();
|
||||
|
||||
useEffect(() => {
|
||||
router.replace("/copilot");
|
||||
}, [router]);
|
||||
|
||||
return <LoadingSpinner size="large" cover />;
|
||||
redirect("/copilot");
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import Image from "next/image";
|
||||
|
||||
const getYouTubeVideoId = (url: string) => {
|
||||
const regExp =
|
||||
@@ -76,6 +77,7 @@ const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
width="100%"
|
||||
height="315"
|
||||
src={`https://www.youtube.com/embed/${videoId}`}
|
||||
title="Embedded content"
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
|
||||
allowFullScreen
|
||||
></iframe>
|
||||
@@ -92,15 +94,15 @@ const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
const ImageRenderer: React.FC<{ imageUrl: string }> = ({ imageUrl }) => {
|
||||
return (
|
||||
<div className="w-full p-2">
|
||||
<picture>
|
||||
<img
|
||||
src={imageUrl}
|
||||
alt="Image"
|
||||
className="h-auto max-w-full"
|
||||
width="100%"
|
||||
height="auto"
|
||||
/>
|
||||
</picture>
|
||||
<Image
|
||||
src={imageUrl}
|
||||
alt="Image"
|
||||
width={0}
|
||||
height={0}
|
||||
sizes="100vw"
|
||||
className="h-auto w-full"
|
||||
unoptimized
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -93,7 +93,7 @@ export function APIKeyCredentialsModal({
|
||||
<FormDescription>
|
||||
Required scope(s) for this block:{" "}
|
||||
{schema.credentials_scopes?.map((s, i, a) => (
|
||||
<span key={i}>
|
||||
<span key={s}>
|
||||
<code className="text-xs font-bold">{s}</code>
|
||||
{i < a.length - 1 && ", "}
|
||||
</span>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { z } from "zod";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
@@ -66,16 +66,18 @@ export function HostScopedCredentialsModal({
|
||||
});
|
||||
|
||||
const [headerPairs, setHeaderPairs] = useState<
|
||||
Array<{ key: string; value: string }>
|
||||
>([{ key: "", value: "" }]);
|
||||
Array<{ id: string; key: string; value: string }>
|
||||
>([{ id: crypto.randomUUID(), key: "", value: "" }]);
|
||||
|
||||
// Update form values when siblingInputs change
|
||||
const prevHostRef = useRef(currentHost);
|
||||
useEffect(() => {
|
||||
if (currentHost === prevHostRef.current) return;
|
||||
prevHostRef.current = currentHost;
|
||||
if (currentHost) {
|
||||
form.setValue("host", currentHost);
|
||||
form.setValue("title", currentHost);
|
||||
} else {
|
||||
// Reset to empty when no current host
|
||||
form.setValue("host", "");
|
||||
form.setValue("title", "Manual Entry");
|
||||
}
|
||||
@@ -91,9 +93,12 @@ export function HostScopedCredentialsModal({
|
||||
|
||||
const { provider, providerName, createHostScopedCredentials } = credentials;
|
||||
|
||||
const addHeaderPair = () => {
|
||||
setHeaderPairs([...headerPairs, { key: "", value: "" }]);
|
||||
};
|
||||
function addHeaderPair() {
|
||||
setHeaderPairs((prev) => [
|
||||
...prev,
|
||||
{ id: crypto.randomUUID(), key: "", value: "" },
|
||||
]);
|
||||
}
|
||||
|
||||
const removeHeaderPair = (index: number) => {
|
||||
if (headerPairs.length > 1) {
|
||||
@@ -192,7 +197,7 @@ export function HostScopedCredentialsModal({
|
||||
</FormDescription>
|
||||
|
||||
{headerPairs.map((pair, index) => (
|
||||
<div key={index} className="flex w-full items-center gap-4">
|
||||
<div key={pair.id} className="flex w-full items-center gap-4">
|
||||
<Input
|
||||
id={`header-${index}-key`}
|
||||
label="Header Name"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import { useRef, useState } from "react";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
@@ -7,6 +7,7 @@ import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { getTimezoneDisplayName } from "@/lib/timezone-utils";
|
||||
import { useUserTimezone } from "@/lib/hooks/useUserTimezone";
|
||||
import { InfoIcon } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
|
||||
// Base type for cron expression only
|
||||
type CronOnlyCallback = (cronExpression: string) => void;
|
||||
@@ -53,15 +54,15 @@ export function CronSchedulerDialog(props: CronSchedulerDialogProps) {
|
||||
const userTimezone = useUserTimezone();
|
||||
const timezoneDisplay = getTimezoneDisplayName(userTimezone || "UTC");
|
||||
|
||||
// Reset state when dialog opens
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
const defaultName =
|
||||
props.mode === "with-name" ? props.defaultScheduleName || "" : "";
|
||||
setScheduleName(defaultName);
|
||||
setCronExpression(defaultCronExpression);
|
||||
}
|
||||
}, [open, props, defaultCronExpression]);
|
||||
// Reset state when dialog opens (render-time sync instead of useEffect)
|
||||
const prevOpenRef = useRef(open);
|
||||
if (open && !prevOpenRef.current) {
|
||||
const defaultName =
|
||||
props.mode === "with-name" ? props.defaultScheduleName || "" : "";
|
||||
setScheduleName(defaultName);
|
||||
setCronExpression(defaultCronExpression);
|
||||
}
|
||||
prevOpenRef.current = open;
|
||||
|
||||
const handleDone = () => {
|
||||
if (props.mode === "with-name" && !scheduleName.trim()) {
|
||||
@@ -100,8 +101,11 @@ export function CronSchedulerDialog(props: CronSchedulerDialogProps) {
|
||||
<div className="flex flex-col gap-4">
|
||||
{props.mode === "with-name" && (
|
||||
<div className="flex max-w-[448px] flex-col space-y-2">
|
||||
<label className="text-sm font-medium">Schedule Name</label>
|
||||
<label htmlFor="schedule-name" className="text-sm font-medium">
|
||||
Schedule Name
|
||||
</label>
|
||||
<Input
|
||||
id="schedule-name"
|
||||
value={scheduleName}
|
||||
onChange={(e) => setScheduleName(e.target.value)}
|
||||
placeholder="Enter a name for this schedule"
|
||||
@@ -121,9 +125,9 @@ export function CronSchedulerDialog(props: CronSchedulerDialogProps) {
|
||||
<InfoIcon className="h-4 w-4 text-amber-600" />
|
||||
<p className="text-sm text-amber-800">
|
||||
No timezone set. Schedule will run in UTC.
|
||||
<a href="/profile/settings" className="ml-1 underline">
|
||||
<Link href="/profile/settings" className="ml-1 underline">
|
||||
Set your timezone
|
||||
</a>
|
||||
</Link>
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
|
||||
@@ -452,7 +452,7 @@ export function CronScheduler({
|
||||
const monthNumber = i + 1;
|
||||
return (
|
||||
<Button
|
||||
key={i}
|
||||
key={month.label}
|
||||
variant={
|
||||
selectedMonths.includes(monthNumber) ? "default" : "outline"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import Image from "next/image";
|
||||
import ReactMarkdown from "react-markdown";
|
||||
import remarkGfm from "remark-gfm";
|
||||
import remarkMath from "remark-math";
|
||||
@@ -359,20 +360,23 @@ function renderMarkdown(
|
||||
</del>
|
||||
),
|
||||
// Image handling
|
||||
img: ({ src, alt, ...props }) => {
|
||||
img: ({ src, alt }) => {
|
||||
// Check if it's a video URL pattern
|
||||
if (src && isVideoUrl(src)) {
|
||||
return renderVideoEmbed(src);
|
||||
}
|
||||
|
||||
if (!src) return null;
|
||||
|
||||
return (
|
||||
// eslint-disable-next-line @next/next/no-img-element
|
||||
<img
|
||||
<Image
|
||||
src={src}
|
||||
alt={alt}
|
||||
className="my-4 h-auto max-w-full rounded-lg shadow-md"
|
||||
loading="lazy"
|
||||
{...props}
|
||||
alt={alt || "Image"}
|
||||
width={0}
|
||||
height={0}
|
||||
sizes="100vw"
|
||||
className="my-4 h-auto w-full rounded-lg shadow-md"
|
||||
unoptimized
|
||||
/>
|
||||
);
|
||||
},
|
||||
|
||||
@@ -89,7 +89,6 @@ export function ActivityDropdown({
|
||||
className="!focus:border-1 w-full pr-10"
|
||||
wrapperClassName="!mb-0"
|
||||
autoComplete="off"
|
||||
autoFocus
|
||||
/>
|
||||
<button
|
||||
onClick={handleClearSearch}
|
||||
|
||||
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
|
||||
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
||||
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
||||
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | No |
|
||||
| messages | List of messages in the conversation. | List[Any] | Yes |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
| ollama_host | Ollama host for local models | str | No |
|
||||
|
||||
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|
||||
|-------|-------------|------|----------|
|
||||
| focus | The focus of the list to generate. | str | No |
|
||||
| source_data | The data to generate the list from. | str | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
||||
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
||||
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
||||
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| text | The text to summarize. | str | Yes |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| focus | The topic to focus on in the summary | str | No |
|
||||
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
|
||||
@@ -218,17 +218,6 @@ If you initially installed Docker with Hyper-V, you **don’t need to reinstall*
|
||||
|
||||
For more details, refer to [Docker's official documentation](https://docs.docker.com/desktop/windows/wsl/).
|
||||
|
||||
### ⚠️ Podman Not Supported
|
||||
|
||||
AutoGPT requires **Docker** (Docker Desktop or Docker Engine). **Podman and podman-compose are not supported** and may cause path resolution issues, particularly on Windows.
|
||||
|
||||
If you see errors like:
|
||||
```text
|
||||
Error: the specified Containerfile or Dockerfile does not exist, ..\..\autogpt_platform\backend\Dockerfile
|
||||
```
|
||||
|
||||
This indicates you're using Podman instead of Docker. Please install [Docker Desktop](https://docs.docker.com/desktop/) and use `docker compose` instead of `podman-compose`.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
|
||||
Reference in New Issue
Block a user