mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-26 07:28:44 -05:00
Compare commits
4 Commits
abhi/show-
...
remove-cla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e0ffb4211 | ||
|
|
3a9a0997f6 | ||
|
|
ee71866ed6 | ||
|
|
168e4ed0ec |
@@ -115,7 +115,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
@@ -280,9 +279,6 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
|
||||
), # claude-haiku-4-5-20251001
|
||||
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2
|
||||
), # claude-3-7-sonnet-20250219
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
|
||||
), # claude-3-haiku-20240307
|
||||
|
||||
@@ -82,9 +82,6 @@ class StagehandRecommendedLlmModel(str, Enum):
|
||||
GPT41 = "gpt-4.1-2025-04-14"
|
||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||
|
||||
# Anthropic
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
"""
|
||||
@@ -137,7 +134,7 @@ class StagehandObserveBlock(Block):
|
||||
model: StagehandRecommendedLlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
description="LLM to use for Stagehand (provider is inferred)",
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET,
|
||||
default=StagehandRecommendedLlmModel.GPT41,
|
||||
advanced=False,
|
||||
)
|
||||
model_credentials: AICredentials = AICredentialsField()
|
||||
@@ -230,7 +227,7 @@ class StagehandActBlock(Block):
|
||||
model: StagehandRecommendedLlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
description="LLM to use for Stagehand (provider is inferred)",
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET,
|
||||
default=StagehandRecommendedLlmModel.GPT41,
|
||||
advanced=False,
|
||||
)
|
||||
model_credentials: AICredentials = AICredentialsField()
|
||||
@@ -330,7 +327,7 @@ class StagehandExtractBlock(Block):
|
||||
model: StagehandRecommendedLlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
description="LLM to use for Stagehand (provider is inferred)",
|
||||
default=StagehandRecommendedLlmModel.CLAUDE_3_7_SONNET,
|
||||
default=StagehandRecommendedLlmModel.GPT41,
|
||||
advanced=False,
|
||||
)
|
||||
model_credentials: AICredentials = AICredentialsField()
|
||||
|
||||
@@ -81,7 +81,6 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
LlmModel.CLAUDE_3_7_SONNET: 5,
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
LlmModel.AIML_API_QWEN2_5_72B: 1,
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: 1,
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
-- Migrate Claude 3.7 Sonnet to Claude 4.5 Sonnet
|
||||
-- This updates all AgentNode blocks that use the deprecated Claude 3.7 Sonnet model
|
||||
-- Anthropic is retiring claude-3-7-sonnet-20250219 on February 19, 2026
|
||||
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
'"claude-sonnet-4-5-20250929"'::jsonb
|
||||
)
|
||||
WHERE "constantInput"::jsonb->>'model' = 'claude-3-7-sonnet-20250219';
|
||||
@@ -38,12 +38,8 @@ export const AgentOutputs = ({ flowID }: { flowID: string | null }) => {
|
||||
|
||||
return outputNodes
|
||||
.map((node) => {
|
||||
const executionResults = node.data.nodeExecutionResults || [];
|
||||
const latestResult =
|
||||
executionResults.length > 0
|
||||
? executionResults[executionResults.length - 1]
|
||||
: undefined;
|
||||
const outputData = latestResult?.output_data?.output;
|
||||
const executionResult = node.data.nodeExecutionResult;
|
||||
const outputData = executionResult?.output_data?.output;
|
||||
|
||||
const renderer = globalRegistry.getRenderer(outputData);
|
||||
|
||||
|
||||
@@ -153,9 +153,6 @@ export const useRunInputDialog = ({
|
||||
Object.entries(credentialValues).filter(([_, cred]) => cred && cred.id),
|
||||
);
|
||||
|
||||
useNodeStore.getState().clearAllNodeExecutionResults();
|
||||
useNodeStore.getState().cleanNodesStatuses();
|
||||
|
||||
await executeGraph({
|
||||
graphId: flowID ?? "",
|
||||
graphVersion: flowVersion || null,
|
||||
|
||||
@@ -34,7 +34,7 @@ export type CustomNodeData = {
|
||||
uiType: BlockUIType;
|
||||
block_id: string;
|
||||
status?: AgentExecutionStatus;
|
||||
nodeExecutionResults?: NodeExecutionResult[];
|
||||
nodeExecutionResult?: NodeExecutionResult;
|
||||
staticOutput?: boolean;
|
||||
// TODO : We need better type safety for the following backend fields.
|
||||
costs: BlockCost[];
|
||||
@@ -75,11 +75,7 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
(value) => value !== null && value !== undefined && value !== "",
|
||||
);
|
||||
|
||||
const latestResult =
|
||||
data.nodeExecutionResults && data.nodeExecutionResults.length > 0
|
||||
? data.nodeExecutionResults[data.nodeExecutionResults.length - 1]
|
||||
: undefined;
|
||||
const outputData = latestResult?.output_data;
|
||||
const outputData = data.nodeExecutionResult?.output_data;
|
||||
const hasOutputError =
|
||||
typeof outputData === "object" &&
|
||||
outputData !== null &&
|
||||
|
||||
@@ -14,15 +14,10 @@ import { useNodeOutput } from "./useNodeOutput";
|
||||
import { ViewMoreData } from "./components/ViewMoreData";
|
||||
|
||||
export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
const {
|
||||
latestOutputData,
|
||||
copiedKey,
|
||||
handleCopy,
|
||||
executionResultId,
|
||||
latestInputData,
|
||||
} = useNodeOutput(nodeId);
|
||||
const { outputData, copiedKey, handleCopy, executionResultId, inputData } =
|
||||
useNodeOutput(nodeId);
|
||||
|
||||
if (Object.keys(latestOutputData).length === 0) {
|
||||
if (Object.keys(outputData).length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -46,19 +41,18 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
<div className="space-y-2">
|
||||
<Text variant="small-medium">Input</Text>
|
||||
|
||||
<ContentRenderer value={latestInputData} shortContent={false} />
|
||||
<ContentRenderer value={inputData} shortContent={false} />
|
||||
|
||||
<div className="mt-1 flex justify-end gap-1">
|
||||
<NodeDataViewer
|
||||
data={inputData}
|
||||
pinName="Input"
|
||||
nodeId={nodeId}
|
||||
execId={executionResultId}
|
||||
dataType="input"
|
||||
/>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={() => handleCopy("input", latestInputData)}
|
||||
onClick={() => handleCopy("input", inputData)}
|
||||
className={cn(
|
||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||
copiedKey === "input" &&
|
||||
@@ -74,72 +68,70 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{Object.entries(latestOutputData)
|
||||
{Object.entries(outputData)
|
||||
.slice(0, 2)
|
||||
.map(([key, value]) => {
|
||||
return (
|
||||
<div key={key} className="flex flex-col gap-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Text
|
||||
variant="small-medium"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Pin:
|
||||
</Text>
|
||||
<Text variant="small" className="text-slate-700">
|
||||
{beautifyString(key)}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="w-full space-y-2">
|
||||
<Text
|
||||
variant="small"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Data:
|
||||
</Text>
|
||||
<div className="relative space-y-2">
|
||||
{value.map((item, index) => (
|
||||
<div key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
shortContent={true}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
|
||||
<div className="mt-1 flex justify-end gap-1">
|
||||
<NodeDataViewer
|
||||
pinName={key}
|
||||
nodeId={nodeId}
|
||||
execId={executionResultId}
|
||||
/>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={() => handleCopy(key, value)}
|
||||
className={cn(
|
||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||
copiedKey === key &&
|
||||
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
||||
)}
|
||||
>
|
||||
{copiedKey === key ? (
|
||||
<CheckIcon
|
||||
size={12}
|
||||
className="text-green-600"
|
||||
/>
|
||||
) : (
|
||||
<CopyIcon size={12} />
|
||||
)}
|
||||
</Button>
|
||||
.map(([key, value]) => (
|
||||
<div key={key} className="flex flex-col gap-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Text
|
||||
variant="small-medium"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Pin:
|
||||
</Text>
|
||||
<Text variant="small" className="text-slate-700">
|
||||
{beautifyString(key)}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="w-full space-y-2">
|
||||
<Text
|
||||
variant="small"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Data:
|
||||
</Text>
|
||||
<div className="relative space-y-2">
|
||||
{value.map((item, index) => (
|
||||
<div key={index}>
|
||||
<ContentRenderer value={item} shortContent={true} />
|
||||
</div>
|
||||
))}
|
||||
|
||||
<div className="mt-1 flex justify-end gap-1">
|
||||
<NodeDataViewer
|
||||
data={value}
|
||||
pinName={key}
|
||||
execId={executionResultId}
|
||||
/>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={() => handleCopy(key, value)}
|
||||
className={cn(
|
||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||
copiedKey === key &&
|
||||
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
||||
)}
|
||||
>
|
||||
{copiedKey === key ? (
|
||||
<CheckIcon size={12} className="text-green-600" />
|
||||
) : (
|
||||
<CopyIcon size={12} />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
<ViewMoreData nodeId={nodeId} />
|
||||
|
||||
{Object.keys(outputData).length > 2 && (
|
||||
<ViewMoreData
|
||||
outputData={outputData}
|
||||
execId={executionResultId}
|
||||
/>
|
||||
)}
|
||||
</AccordionContent>
|
||||
</AccordionItem>
|
||||
</Accordion>
|
||||
|
||||
@@ -19,51 +19,22 @@ import {
|
||||
CopyIcon,
|
||||
DownloadIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import React, { FC } from "react";
|
||||
import { FC } from "react";
|
||||
import { useNodeDataViewer } from "./useNodeDataViewer";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { NodeDataType } from "../../helpers";
|
||||
|
||||
export interface NodeDataViewerProps {
|
||||
data?: any;
|
||||
interface NodeDataViewerProps {
|
||||
data: any;
|
||||
pinName: string;
|
||||
nodeId?: string;
|
||||
execId?: string;
|
||||
isViewMoreData?: boolean;
|
||||
dataType?: NodeDataType;
|
||||
}
|
||||
|
||||
export const NodeDataViewer: FC<NodeDataViewerProps> = ({
|
||||
data,
|
||||
pinName,
|
||||
nodeId,
|
||||
execId = "N/A",
|
||||
isViewMoreData = false,
|
||||
dataType = "output",
|
||||
}) => {
|
||||
const executionResults = useNodeStore(
|
||||
useShallow((state) =>
|
||||
nodeId ? state.getNodeExecutionResults(nodeId) : [],
|
||||
),
|
||||
);
|
||||
const latestInputData = useNodeStore(
|
||||
useShallow((state) =>
|
||||
nodeId ? state.getLatestNodeInputData(nodeId) : undefined,
|
||||
),
|
||||
);
|
||||
const accumulatedOutputData = useNodeStore(
|
||||
useShallow((state) =>
|
||||
nodeId ? state.getAccumulatedNodeOutputData(nodeId) : {},
|
||||
),
|
||||
);
|
||||
|
||||
const resolvedData =
|
||||
data ??
|
||||
(dataType === "input"
|
||||
? (latestInputData ?? {})
|
||||
: (accumulatedOutputData[pinName] ?? []));
|
||||
|
||||
const {
|
||||
outputItems,
|
||||
copyExecutionId,
|
||||
@@ -71,20 +42,7 @@ export const NodeDataViewer: FC<NodeDataViewerProps> = ({
|
||||
handleDownloadItem,
|
||||
dataArray,
|
||||
copiedIndex,
|
||||
groupedExecutions,
|
||||
totalGroupedItems,
|
||||
handleCopyGroupedItem,
|
||||
handleDownloadGroupedItem,
|
||||
copiedKey,
|
||||
} = useNodeDataViewer(
|
||||
resolvedData,
|
||||
pinName,
|
||||
execId,
|
||||
executionResults,
|
||||
dataType,
|
||||
);
|
||||
|
||||
const shouldGroupExecutions = groupedExecutions.length > 0;
|
||||
} = useNodeDataViewer(data, pinName, execId);
|
||||
return (
|
||||
<Dialog styling={{ width: "600px" }}>
|
||||
<TooltipProvider>
|
||||
@@ -110,141 +68,44 @@ export const NodeDataViewer: FC<NodeDataViewerProps> = ({
|
||||
<div className="flex items-center gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="large-medium" className="text-slate-900">
|
||||
Full {dataType === "input" ? "Input" : "Output"} Preview
|
||||
Full Output Preview
|
||||
</Text>
|
||||
</div>
|
||||
<div className="rounded-full border border-slate-300 bg-slate-100 px-3 py-1.5 text-xs font-medium text-black">
|
||||
{shouldGroupExecutions ? totalGroupedItems : dataArray.length}{" "}
|
||||
item
|
||||
{shouldGroupExecutions
|
||||
? totalGroupedItems !== 1
|
||||
? "s"
|
||||
: ""
|
||||
: dataArray.length !== 1
|
||||
? "s"
|
||||
: ""}{" "}
|
||||
total
|
||||
{dataArray.length} item{dataArray.length !== 1 ? "s" : ""} total
|
||||
</div>
|
||||
</div>
|
||||
<div className="text-sm text-gray-600">
|
||||
{shouldGroupExecutions ? (
|
||||
<div>
|
||||
Pin:{" "}
|
||||
<span className="font-semibold">{beautifyString(pinName)}</span>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
>
|
||||
{execId}
|
||||
</Text>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={copyExecutionId}
|
||||
className="h-6 w-6 min-w-0 p-0"
|
||||
>
|
||||
<CopyIcon size={14} />
|
||||
</Button>
|
||||
</div>
|
||||
<div className="mt-2">
|
||||
Pin:{" "}
|
||||
<span className="font-semibold">
|
||||
{beautifyString(pinName)}
|
||||
</span>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
>
|
||||
{execId}
|
||||
</Text>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={copyExecutionId}
|
||||
className="h-6 w-6 min-w-0 p-0"
|
||||
>
|
||||
<CopyIcon size={14} />
|
||||
</Button>
|
||||
</div>
|
||||
<div className="mt-2">
|
||||
Pin:{" "}
|
||||
<span className="font-semibold">{beautifyString(pinName)}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex-1 overflow-hidden">
|
||||
<ScrollArea className="h-full">
|
||||
<div className="my-4">
|
||||
{shouldGroupExecutions ? (
|
||||
<div className="space-y-4">
|
||||
{groupedExecutions.map((execution) => (
|
||||
<div
|
||||
key={execution.execId}
|
||||
className="rounded-3xl border border-slate-200 bg-white p-4 shadow-sm"
|
||||
>
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
>
|
||||
{execution.execId}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="mt-2 space-y-4">
|
||||
{execution.outputItems.length > 0 ? (
|
||||
execution.outputItems.map((item, index) => (
|
||||
<div
|
||||
key={item.key}
|
||||
className="group flex items-start gap-4"
|
||||
>
|
||||
<div className="w-full flex-1">
|
||||
<OutputItem
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex w-fit gap-3">
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="min-w-0 p-1"
|
||||
size="icon"
|
||||
onClick={() =>
|
||||
handleCopyGroupedItem(
|
||||
execution.execId,
|
||||
index,
|
||||
item,
|
||||
)
|
||||
}
|
||||
aria-label="Copy item"
|
||||
>
|
||||
{copiedKey ===
|
||||
`${execution.execId}-${index}` ? (
|
||||
<CheckIcon className="size-4 text-green-600" />
|
||||
) : (
|
||||
<CopyIcon className="size-4 text-black" />
|
||||
)}
|
||||
</Button>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="icon"
|
||||
className="min-w-0 p-1"
|
||||
onClick={() =>
|
||||
handleDownloadGroupedItem(item)
|
||||
}
|
||||
aria-label="Download item"
|
||||
>
|
||||
<DownloadIcon className="size-4 text-black" />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
) : (
|
||||
<div className="py-4 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
) : dataArray.length > 0 ? (
|
||||
{dataArray.length > 0 ? (
|
||||
<div className="space-y-4">
|
||||
{outputItems.map((item, index) => (
|
||||
<div key={item.key} className="group relative">
|
||||
|
||||
@@ -1,70 +1,82 @@
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import { globalRegistry } from "@/components/contextual/OutputRenderers";
|
||||
import { downloadOutputs } from "@/components/contextual/OutputRenderers/utils/download";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { useState } from "react";
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import {
|
||||
NodeDataType,
|
||||
createOutputItems,
|
||||
getExecutionData,
|
||||
normalizeToArray,
|
||||
type OutputItem,
|
||||
} from "../../helpers";
|
||||
|
||||
export type GroupedExecution = {
|
||||
execId: string;
|
||||
outputItems: Array<OutputItem>;
|
||||
};
|
||||
import React, { useMemo, useState } from "react";
|
||||
|
||||
export const useNodeDataViewer = (
|
||||
data: any,
|
||||
pinName: string,
|
||||
execId: string,
|
||||
executionResults?: NodeExecutionResult[],
|
||||
dataType?: NodeDataType,
|
||||
) => {
|
||||
const { toast } = useToast();
|
||||
const [copiedIndex, setCopiedIndex] = useState<number | null>(null);
|
||||
const [copiedKey, setCopiedKey] = useState<string | null>(null);
|
||||
|
||||
const dataArray = Array.isArray(data) ? data : [data];
|
||||
// Normalize data to array format
|
||||
const dataArray = useMemo(() => {
|
||||
return Array.isArray(data) ? data : [data];
|
||||
}, [data]);
|
||||
|
||||
const outputItems =
|
||||
!dataArray || dataArray.length === 0
|
||||
? []
|
||||
: createOutputItems(dataArray).map((item, index) => ({
|
||||
...item,
|
||||
// Prepare items for the enhanced renderer system
|
||||
const outputItems = useMemo(() => {
|
||||
if (!dataArray) return [];
|
||||
|
||||
const items: Array<{
|
||||
key: string;
|
||||
label: string;
|
||||
value: unknown;
|
||||
metadata?: OutputMetadata;
|
||||
renderer: any;
|
||||
}> = [];
|
||||
|
||||
dataArray.forEach((value, index) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
|
||||
// Extract metadata from the value if it's an object
|
||||
if (
|
||||
typeof value === "object" &&
|
||||
value !== null &&
|
||||
!React.isValidElement(value)
|
||||
) {
|
||||
const objValue = value as any;
|
||||
if (objValue.type) metadata.type = objValue.type;
|
||||
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
|
||||
if (objValue.filename) metadata.filename = objValue.filename;
|
||||
if (objValue.language) metadata.language = objValue.language;
|
||||
}
|
||||
|
||||
const renderer = globalRegistry.getRenderer(value, metadata);
|
||||
if (renderer) {
|
||||
items.push({
|
||||
key: `item-${index}`,
|
||||
label: index === 0 ? beautifyString(pinName) : "",
|
||||
}));
|
||||
|
||||
const groupedExecutions =
|
||||
!executionResults || executionResults.length === 0
|
||||
? []
|
||||
: [...executionResults].reverse().map((result) => {
|
||||
const rawData = getExecutionData(
|
||||
result,
|
||||
dataType || "output",
|
||||
pinName,
|
||||
);
|
||||
let dataArray: unknown[];
|
||||
if (dataType === "input") {
|
||||
dataArray =
|
||||
rawData !== undefined && rawData !== null ? [rawData] : [];
|
||||
} else {
|
||||
dataArray = normalizeToArray(rawData);
|
||||
}
|
||||
|
||||
const outputItems = createOutputItems(dataArray);
|
||||
return {
|
||||
execId: result.node_exec_id,
|
||||
outputItems,
|
||||
};
|
||||
value,
|
||||
metadata,
|
||||
renderer,
|
||||
});
|
||||
} else {
|
||||
// Fallback to text renderer
|
||||
const textRenderer = globalRegistry
|
||||
.getAllRenderers()
|
||||
.find((r) => r.name === "TextRenderer");
|
||||
if (textRenderer) {
|
||||
items.push({
|
||||
key: `item-${index}`,
|
||||
label: index === 0 ? beautifyString(pinName) : "",
|
||||
value:
|
||||
typeof value === "string"
|
||||
? value
|
||||
: JSON.stringify(value, null, 2),
|
||||
metadata,
|
||||
renderer: textRenderer,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const totalGroupedItems = groupedExecutions.reduce(
|
||||
(total, execution) => total + execution.outputItems.length,
|
||||
0,
|
||||
);
|
||||
return items;
|
||||
}, [dataArray, pinName]);
|
||||
|
||||
const copyExecutionId = () => {
|
||||
navigator.clipboard.writeText(execId).then(() => {
|
||||
@@ -110,45 +122,6 @@ export const useNodeDataViewer = (
|
||||
]);
|
||||
};
|
||||
|
||||
const handleCopyGroupedItem = async (
|
||||
execId: string,
|
||||
index: number,
|
||||
item: OutputItem,
|
||||
) => {
|
||||
const copyContent = item.renderer.getCopyContent(item.value, item.metadata);
|
||||
|
||||
if (!copyContent) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
let text: string;
|
||||
if (typeof copyContent.data === "string") {
|
||||
text = copyContent.data;
|
||||
} else if (copyContent.fallbackText) {
|
||||
text = copyContent.fallbackText;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
await navigator.clipboard.writeText(text);
|
||||
setCopiedKey(`${execId}-${index}`);
|
||||
setTimeout(() => setCopiedKey(null), 2000);
|
||||
} catch (error) {
|
||||
console.error("Failed to copy:", error);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDownloadGroupedItem = (item: OutputItem) => {
|
||||
downloadOutputs([
|
||||
{
|
||||
value: item.value,
|
||||
metadata: item.metadata,
|
||||
renderer: item.renderer,
|
||||
},
|
||||
]);
|
||||
};
|
||||
|
||||
return {
|
||||
outputItems,
|
||||
dataArray,
|
||||
@@ -156,10 +129,5 @@ export const useNodeDataViewer = (
|
||||
handleCopyItem,
|
||||
handleDownloadItem,
|
||||
copiedIndex,
|
||||
groupedExecutions,
|
||||
totalGroupedItems,
|
||||
handleCopyGroupedItem,
|
||||
handleDownloadGroupedItem,
|
||||
copiedKey,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -8,28 +8,16 @@ import { useState } from "react";
|
||||
import { NodeDataViewer } from "./NodeDataViewer/NodeDataViewer";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import { CheckIcon, CopyIcon } from "@phosphor-icons/react";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import {
|
||||
NodeDataType,
|
||||
getExecutionEntries,
|
||||
normalizeToArray,
|
||||
} from "../helpers";
|
||||
|
||||
export const ViewMoreData = ({
|
||||
nodeId,
|
||||
dataType = "output",
|
||||
outputData,
|
||||
execId,
|
||||
}: {
|
||||
nodeId: string;
|
||||
dataType?: NodeDataType;
|
||||
outputData: Record<string, Array<any>>;
|
||||
execId?: string;
|
||||
}) => {
|
||||
const [copiedKey, setCopiedKey] = useState<string | null>(null);
|
||||
const { toast } = useToast();
|
||||
const executionResults = useNodeStore(
|
||||
useShallow((state) => state.getNodeExecutionResults(nodeId)),
|
||||
);
|
||||
|
||||
const reversedExecutionResults = [...executionResults].reverse();
|
||||
|
||||
const handleCopy = (key: string, value: any) => {
|
||||
const textToCopy =
|
||||
@@ -41,8 +29,8 @@ export const ViewMoreData = ({
|
||||
setTimeout(() => setCopiedKey(null), 2000);
|
||||
};
|
||||
|
||||
const copyExecutionId = (executionId: string) => {
|
||||
navigator.clipboard.writeText(executionId || "N/A").then(() => {
|
||||
const copyExecutionId = () => {
|
||||
navigator.clipboard.writeText(execId || "N/A").then(() => {
|
||||
toast({
|
||||
title: "Execution ID copied to clipboard!",
|
||||
duration: 2000,
|
||||
@@ -54,7 +42,7 @@ export const ViewMoreData = ({
|
||||
<Dialog styling={{ width: "600px", paddingRight: "16px" }}>
|
||||
<Dialog.Trigger>
|
||||
<Button
|
||||
variant="secondary"
|
||||
variant="primary"
|
||||
size="small"
|
||||
className="h-fit w-fit min-w-0 !text-xs"
|
||||
>
|
||||
@@ -64,114 +52,83 @@ export const ViewMoreData = ({
|
||||
<Dialog.Content>
|
||||
<div className="flex flex-col gap-4">
|
||||
<Text variant="h4" className="text-slate-900">
|
||||
Complete {dataType === "input" ? "Input" : "Output"} Data
|
||||
Complete Output Data
|
||||
</Text>
|
||||
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
>
|
||||
{execId}
|
||||
</Text>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={copyExecutionId}
|
||||
className="h-6 w-6 min-w-0 p-0"
|
||||
>
|
||||
<CopyIcon size={14} />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<ScrollArea className="h-full">
|
||||
<div className="flex flex-col gap-4">
|
||||
{reversedExecutionResults.map((result) => (
|
||||
<div
|
||||
key={result.node_exec_id}
|
||||
className="rounded-3xl border border-slate-200 bg-white p-4 shadow-sm"
|
||||
>
|
||||
{Object.entries(outputData).map(([key, value]) => (
|
||||
<div key={key} className="flex flex-col gap-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Text variant="body" className="text-slate-600">
|
||||
Execution ID:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="rounded-full border border-gray-300 bg-gray-50 px-2 py-1 font-mono text-xs"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
{result.node_exec_id}
|
||||
Pin:
|
||||
</Text>
|
||||
<Text variant="body-medium" className="text-slate-700">
|
||||
{beautifyString(key)}
|
||||
</Text>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="small"
|
||||
onClick={() => copyExecutionId(result.node_exec_id)}
|
||||
className="h-6 w-6 min-w-0 p-0"
|
||||
>
|
||||
<CopyIcon size={14} />
|
||||
</Button>
|
||||
</div>
|
||||
<div className="w-full space-y-2">
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Data:
|
||||
</Text>
|
||||
<div className="relative space-y-2">
|
||||
{value.map((item, index) => (
|
||||
<div key={index}>
|
||||
<ContentRenderer value={item} shortContent={false} />
|
||||
</div>
|
||||
))}
|
||||
|
||||
<div className="mt-4 flex flex-col gap-4">
|
||||
{getExecutionEntries(result, dataType).map(
|
||||
([key, value]) => {
|
||||
const normalizedValue = normalizeToArray(value);
|
||||
return (
|
||||
<div key={key} className="flex flex-col gap-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Pin:
|
||||
</Text>
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="text-slate-700"
|
||||
>
|
||||
{beautifyString(key)}
|
||||
</Text>
|
||||
</div>
|
||||
<div className="w-full space-y-2">
|
||||
<Text
|
||||
variant="body-medium"
|
||||
className="!font-semibold text-slate-600"
|
||||
>
|
||||
Data:
|
||||
</Text>
|
||||
<div className="relative space-y-2">
|
||||
{normalizedValue.map((item, index) => (
|
||||
<div key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
shortContent={false}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
|
||||
<div className="mt-1 flex justify-end gap-1">
|
||||
<NodeDataViewer
|
||||
data={normalizedValue}
|
||||
pinName={key}
|
||||
execId={result.node_exec_id}
|
||||
isViewMoreData={true}
|
||||
dataType={dataType}
|
||||
/>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={() =>
|
||||
handleCopy(
|
||||
`${result.node_exec_id}-${key}`,
|
||||
normalizedValue,
|
||||
)
|
||||
}
|
||||
className={cn(
|
||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||
copiedKey ===
|
||||
`${result.node_exec_id}-${key}` &&
|
||||
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
||||
)}
|
||||
>
|
||||
{copiedKey ===
|
||||
`${result.node_exec_id}-${key}` ? (
|
||||
<CheckIcon
|
||||
size={16}
|
||||
className="text-green-600"
|
||||
/>
|
||||
) : (
|
||||
<CopyIcon size={16} />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
)}
|
||||
<div className="mt-1 flex justify-end gap-1">
|
||||
<NodeDataViewer
|
||||
data={value}
|
||||
pinName={key}
|
||||
execId={execId}
|
||||
isViewMoreData={true}
|
||||
/>
|
||||
<Button
|
||||
variant="secondary"
|
||||
size="small"
|
||||
onClick={() => handleCopy(key, value)}
|
||||
className={cn(
|
||||
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
|
||||
copiedKey === key &&
|
||||
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
|
||||
)}
|
||||
>
|
||||
{copiedKey === key ? (
|
||||
<CheckIcon size={16} className="text-green-600" />
|
||||
) : (
|
||||
<CopyIcon size={16} />
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
import type { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import { globalRegistry } from "@/components/contextual/OutputRenderers";
|
||||
import React from "react";
|
||||
|
||||
export type NodeDataType = "input" | "output";
|
||||
|
||||
export type OutputItem = {
|
||||
key: string;
|
||||
value: unknown;
|
||||
metadata?: OutputMetadata;
|
||||
renderer: any;
|
||||
};
|
||||
|
||||
export const normalizeToArray = (value: unknown) => {
|
||||
if (value === undefined) return [];
|
||||
return Array.isArray(value) ? value : [value];
|
||||
};
|
||||
|
||||
export const getExecutionData = (
|
||||
result: NodeExecutionResult,
|
||||
dataType: NodeDataType,
|
||||
pinName: string,
|
||||
) => {
|
||||
if (dataType === "input") {
|
||||
return result.input_data;
|
||||
}
|
||||
|
||||
return result.output_data?.[pinName];
|
||||
};
|
||||
|
||||
export const createOutputItems = (dataArray: unknown[]): Array<OutputItem> => {
|
||||
const items: Array<OutputItem> = [];
|
||||
|
||||
dataArray.forEach((value, index) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
|
||||
if (
|
||||
typeof value === "object" &&
|
||||
value !== null &&
|
||||
!React.isValidElement(value)
|
||||
) {
|
||||
const objValue = value as any;
|
||||
if (objValue.type) metadata.type = objValue.type;
|
||||
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
|
||||
if (objValue.filename) metadata.filename = objValue.filename;
|
||||
if (objValue.language) metadata.language = objValue.language;
|
||||
}
|
||||
|
||||
const renderer = globalRegistry.getRenderer(value, metadata);
|
||||
if (renderer) {
|
||||
items.push({
|
||||
key: `item-${index}`,
|
||||
value,
|
||||
metadata,
|
||||
renderer,
|
||||
});
|
||||
} else {
|
||||
const textRenderer = globalRegistry
|
||||
.getAllRenderers()
|
||||
.find((r) => r.name === "TextRenderer");
|
||||
if (textRenderer) {
|
||||
items.push({
|
||||
key: `item-${index}`,
|
||||
value:
|
||||
typeof value === "string" ? value : JSON.stringify(value, null, 2),
|
||||
metadata,
|
||||
renderer: textRenderer,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return items;
|
||||
};
|
||||
|
||||
export const getExecutionEntries = (
|
||||
result: NodeExecutionResult,
|
||||
dataType: NodeDataType,
|
||||
) => {
|
||||
const data = dataType === "input" ? result.input_data : result.output_data;
|
||||
return Object.entries(data || {});
|
||||
};
|
||||
@@ -7,18 +7,15 @@ export const useNodeOutput = (nodeId: string) => {
|
||||
const [copiedKey, setCopiedKey] = useState<string | null>(null);
|
||||
const { toast } = useToast();
|
||||
|
||||
const latestResult = useNodeStore(
|
||||
useShallow((state) => state.getLatestNodeExecutionResult(nodeId)),
|
||||
const nodeExecutionResult = useNodeStore(
|
||||
useShallow((state) => state.getNodeExecutionResult(nodeId)),
|
||||
);
|
||||
|
||||
const latestInputData = useNodeStore(
|
||||
useShallow((state) => state.getLatestNodeInputData(nodeId)),
|
||||
);
|
||||
|
||||
const latestOutputData: Record<string, Array<any>> = useNodeStore(
|
||||
useShallow((state) => state.getLatestNodeOutputData(nodeId) || {}),
|
||||
);
|
||||
const inputData = nodeExecutionResult?.input_data;
|
||||
|
||||
const outputData: Record<string, Array<any>> = {
|
||||
...nodeExecutionResult?.output_data,
|
||||
};
|
||||
const handleCopy = async (key: string, value: any) => {
|
||||
try {
|
||||
const text = JSON.stringify(value, null, 2);
|
||||
@@ -38,12 +35,11 @@ export const useNodeOutput = (nodeId: string) => {
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
latestOutputData,
|
||||
latestInputData,
|
||||
outputData,
|
||||
inputData,
|
||||
copiedKey,
|
||||
handleCopy,
|
||||
executionResultId: latestResult?.node_exec_id,
|
||||
executionResultId: nodeExecutionResult?.node_exec_id,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import { useState, useCallback, useEffect } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import {
|
||||
useNodeStore,
|
||||
NodeResolutionData,
|
||||
} from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import {
|
||||
useSubAgentUpdate,
|
||||
@@ -10,7 +13,6 @@ import {
|
||||
} from "@/app/(platform)/build/hooks/useSubAgentUpdate";
|
||||
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
|
||||
import { CustomNodeData } from "../../CustomNode";
|
||||
import { NodeResolutionData } from "@/app/(platform)/build/stores/types";
|
||||
|
||||
// Stable empty set to avoid creating new references in selectors
|
||||
const EMPTY_SET: Set<string> = new Set();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { NodeResolutionData } from "@/app/(platform)/build/stores/types";
|
||||
import { NodeResolutionData } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
|
||||
export const nodeStyleBasedOnStatus: Record<AgentExecutionStatus, string> = {
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
export const accumulateExecutionData = (
|
||||
accumulated: Record<string, unknown[]>,
|
||||
data: Record<string, unknown> | undefined,
|
||||
) => {
|
||||
if (!data) return { ...accumulated };
|
||||
const next = { ...accumulated };
|
||||
Object.entries(data).forEach(([key, values]) => {
|
||||
const nextValues = Array.isArray(values) ? values : [values];
|
||||
if (next[key]) {
|
||||
next[key] = [...next[key], ...nextValues];
|
||||
} else {
|
||||
next[key] = [...nextValues];
|
||||
}
|
||||
});
|
||||
return next;
|
||||
};
|
||||
@@ -10,8 +10,6 @@ import {
|
||||
import { Node } from "@/app/api/__generated__/models/node";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import { NodeExecutionResultInputData } from "@/app/api/__generated__/models/nodeExecutionResultInputData";
|
||||
import { NodeExecutionResultOutputData } from "@/app/api/__generated__/models/nodeExecutionResultOutputData";
|
||||
import { useHistoryStore } from "./historyStore";
|
||||
import { useEdgeStore } from "./edgeStore";
|
||||
import { BlockUIType } from "../components/types";
|
||||
@@ -20,10 +18,31 @@ import {
|
||||
ensurePathExists,
|
||||
parseHandleIdToPath,
|
||||
} from "@/components/renderers/InputRenderer/helpers";
|
||||
import { accumulateExecutionData } from "./helpers";
|
||||
import { NodeResolutionData } from "./types";
|
||||
import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types";
|
||||
|
||||
// Resolution mode data stored per node
|
||||
export type NodeResolutionData = {
|
||||
incompatibilities: IncompatibilityInfo;
|
||||
// The NEW schema from the update (what we're updating TO)
|
||||
pendingUpdate: {
|
||||
input_schema: Record<string, unknown>;
|
||||
output_schema: Record<string, unknown>;
|
||||
};
|
||||
// The OLD schema before the update (what we're updating FROM)
|
||||
// Needed to merge and show removed inputs during resolution
|
||||
currentSchema: {
|
||||
input_schema: Record<string, unknown>;
|
||||
output_schema: Record<string, unknown>;
|
||||
};
|
||||
// The full updated hardcoded values to apply when resolution completes
|
||||
pendingHardcodedValues: Record<string, unknown>;
|
||||
};
|
||||
|
||||
// Minimum movement (in pixels) required before logging position change to history
|
||||
// Prevents spamming history with small movements when clicking on inputs inside blocks
|
||||
const MINIMUM_MOVE_BEFORE_LOG = 50;
|
||||
|
||||
// Track initial positions when drag starts (outside store to avoid re-renders)
|
||||
const dragStartPositions: Record<string, XYPosition> = {};
|
||||
|
||||
let dragStartState: { nodes: CustomNode[]; edges: CustomEdge[] } | null = null;
|
||||
@@ -33,15 +52,6 @@ type NodeStore = {
|
||||
nodeCounter: number;
|
||||
setNodeCounter: (nodeCounter: number) => void;
|
||||
nodeAdvancedStates: Record<string, boolean>;
|
||||
|
||||
latestNodeInputData: Record<string, NodeExecutionResultInputData | undefined>;
|
||||
latestNodeOutputData: Record<
|
||||
string,
|
||||
NodeExecutionResultOutputData | undefined
|
||||
>;
|
||||
accumulatedNodeInputData: Record<string, Record<string, unknown[]>>;
|
||||
accumulatedNodeOutputData: Record<string, Record<string, unknown[]>>;
|
||||
|
||||
setNodes: (nodes: CustomNode[]) => void;
|
||||
onNodesChange: (changes: NodeChange<CustomNode>[]) => void;
|
||||
addNode: (node: CustomNode) => void;
|
||||
@@ -62,26 +72,12 @@ type NodeStore = {
|
||||
|
||||
updateNodeStatus: (nodeId: string, status: AgentExecutionStatus) => void;
|
||||
getNodeStatus: (nodeId: string) => AgentExecutionStatus | undefined;
|
||||
cleanNodesStatuses: () => void;
|
||||
|
||||
updateNodeExecutionResult: (
|
||||
nodeId: string,
|
||||
result: NodeExecutionResult,
|
||||
) => void;
|
||||
getNodeExecutionResults: (nodeId: string) => NodeExecutionResult[];
|
||||
getLatestNodeInputData: (
|
||||
nodeId: string,
|
||||
) => NodeExecutionResultInputData | undefined;
|
||||
getLatestNodeOutputData: (
|
||||
nodeId: string,
|
||||
) => NodeExecutionResultOutputData | undefined;
|
||||
getAccumulatedNodeInputData: (nodeId: string) => Record<string, unknown[]>;
|
||||
getAccumulatedNodeOutputData: (nodeId: string) => Record<string, unknown[]>;
|
||||
getLatestNodeExecutionResult: (
|
||||
nodeId: string,
|
||||
) => NodeExecutionResult | undefined;
|
||||
clearAllNodeExecutionResults: () => void;
|
||||
|
||||
getNodeExecutionResult: (nodeId: string) => NodeExecutionResult | undefined;
|
||||
getNodeBlockUIType: (nodeId: string) => BlockUIType;
|
||||
hasWebhookNodes: () => boolean;
|
||||
|
||||
@@ -126,10 +122,6 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
nodeCounter: 0,
|
||||
setNodeCounter: (nodeCounter) => set({ nodeCounter }),
|
||||
nodeAdvancedStates: {},
|
||||
latestNodeInputData: {},
|
||||
latestNodeOutputData: {},
|
||||
accumulatedNodeInputData: {},
|
||||
accumulatedNodeOutputData: {},
|
||||
incrementNodeCounter: () =>
|
||||
set((state) => ({
|
||||
nodeCounter: state.nodeCounter + 1,
|
||||
@@ -325,163 +317,18 @@ export const useNodeStore = create<NodeStore>((set, get) => ({
|
||||
return get().nodes.find((n) => n.id === nodeId)?.data?.status;
|
||||
},
|
||||
|
||||
cleanNodesStatuses: () => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) => ({
|
||||
...n,
|
||||
data: { ...n.data, status: undefined },
|
||||
})),
|
||||
}));
|
||||
},
|
||||
|
||||
updateNodeExecutionResult: (nodeId: string, result: NodeExecutionResult) => {
|
||||
set((state) => {
|
||||
let latestNodeInputData = state.latestNodeInputData;
|
||||
let latestNodeOutputData = state.latestNodeOutputData;
|
||||
let accumulatedNodeInputData = state.accumulatedNodeInputData;
|
||||
let accumulatedNodeOutputData = state.accumulatedNodeOutputData;
|
||||
|
||||
const nodes = state.nodes.map((n) => {
|
||||
if (n.id !== nodeId) return n;
|
||||
|
||||
const existingResults = n.data.nodeExecutionResults || [];
|
||||
const duplicateIndex = existingResults.findIndex(
|
||||
(r) => r.node_exec_id === result.node_exec_id,
|
||||
);
|
||||
|
||||
if (duplicateIndex !== -1) {
|
||||
const oldResult = existingResults[duplicateIndex];
|
||||
const inputDataChanged =
|
||||
JSON.stringify(oldResult.input_data) !==
|
||||
JSON.stringify(result.input_data);
|
||||
const outputDataChanged =
|
||||
JSON.stringify(oldResult.output_data) !==
|
||||
JSON.stringify(result.output_data);
|
||||
|
||||
if (!inputDataChanged && !outputDataChanged) {
|
||||
return n;
|
||||
}
|
||||
|
||||
const updatedResults = [...existingResults];
|
||||
updatedResults[duplicateIndex] = result;
|
||||
|
||||
const recomputedAccumulatedInput = updatedResults.reduce(
|
||||
(acc, r) => accumulateExecutionData(acc, r.input_data),
|
||||
{} as Record<string, unknown[]>,
|
||||
);
|
||||
const recomputedAccumulatedOutput = updatedResults.reduce(
|
||||
(acc, r) => accumulateExecutionData(acc, r.output_data),
|
||||
{} as Record<string, unknown[]>,
|
||||
);
|
||||
|
||||
const mostRecentResult = updatedResults[updatedResults.length - 1];
|
||||
latestNodeInputData = {
|
||||
...latestNodeInputData,
|
||||
[nodeId]: mostRecentResult.input_data,
|
||||
};
|
||||
latestNodeOutputData = {
|
||||
...latestNodeOutputData,
|
||||
[nodeId]: mostRecentResult.output_data,
|
||||
};
|
||||
|
||||
accumulatedNodeInputData = {
|
||||
...accumulatedNodeInputData,
|
||||
[nodeId]: recomputedAccumulatedInput,
|
||||
};
|
||||
accumulatedNodeOutputData = {
|
||||
...accumulatedNodeOutputData,
|
||||
[nodeId]: recomputedAccumulatedOutput,
|
||||
};
|
||||
|
||||
return {
|
||||
...n,
|
||||
data: {
|
||||
...n.data,
|
||||
nodeExecutionResults: updatedResults,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
accumulatedNodeInputData = {
|
||||
...accumulatedNodeInputData,
|
||||
[nodeId]: accumulateExecutionData(
|
||||
accumulatedNodeInputData[nodeId] || {},
|
||||
result.input_data,
|
||||
),
|
||||
};
|
||||
accumulatedNodeOutputData = {
|
||||
...accumulatedNodeOutputData,
|
||||
[nodeId]: accumulateExecutionData(
|
||||
accumulatedNodeOutputData[nodeId] || {},
|
||||
result.output_data,
|
||||
),
|
||||
};
|
||||
|
||||
latestNodeInputData = {
|
||||
...latestNodeInputData,
|
||||
[nodeId]: result.input_data,
|
||||
};
|
||||
latestNodeOutputData = {
|
||||
...latestNodeOutputData,
|
||||
[nodeId]: result.output_data,
|
||||
};
|
||||
|
||||
return {
|
||||
...n,
|
||||
data: {
|
||||
...n.data,
|
||||
nodeExecutionResults: [...existingResults, result],
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
nodes,
|
||||
latestNodeInputData,
|
||||
latestNodeOutputData,
|
||||
accumulatedNodeInputData,
|
||||
accumulatedNodeOutputData,
|
||||
};
|
||||
});
|
||||
},
|
||||
getNodeExecutionResults: (nodeId: string) => {
|
||||
return (
|
||||
get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults || []
|
||||
);
|
||||
},
|
||||
getLatestNodeInputData: (nodeId: string) => {
|
||||
return get().latestNodeInputData[nodeId];
|
||||
},
|
||||
getLatestNodeOutputData: (nodeId: string) => {
|
||||
return get().latestNodeOutputData[nodeId];
|
||||
},
|
||||
getAccumulatedNodeInputData: (nodeId: string) => {
|
||||
return get().accumulatedNodeInputData[nodeId] || {};
|
||||
},
|
||||
getAccumulatedNodeOutputData: (nodeId: string) => {
|
||||
return get().accumulatedNodeOutputData[nodeId] || {};
|
||||
},
|
||||
getLatestNodeExecutionResult: (nodeId: string) => {
|
||||
const results =
|
||||
get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResults ||
|
||||
[];
|
||||
return results.length > 0 ? results[results.length - 1] : undefined;
|
||||
},
|
||||
clearAllNodeExecutionResults: () => {
|
||||
set((state) => ({
|
||||
nodes: state.nodes.map((n) => ({
|
||||
...n,
|
||||
data: {
|
||||
...n.data,
|
||||
nodeExecutionResults: [],
|
||||
},
|
||||
})),
|
||||
latestNodeInputData: {},
|
||||
latestNodeOutputData: {},
|
||||
accumulatedNodeInputData: {},
|
||||
accumulatedNodeOutputData: {},
|
||||
nodes: state.nodes.map((n) =>
|
||||
n.id === nodeId
|
||||
? { ...n, data: { ...n.data, nodeExecutionResult: result } }
|
||||
: n,
|
||||
),
|
||||
}));
|
||||
},
|
||||
getNodeExecutionResult: (nodeId: string) => {
|
||||
return get().nodes.find((n) => n.id === nodeId)?.data?.nodeExecutionResult;
|
||||
},
|
||||
getNodeBlockUIType: (nodeId: string) => {
|
||||
return (
|
||||
get().nodes.find((n) => n.id === nodeId)?.data?.uiType ??
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
import { IncompatibilityInfo } from "../hooks/useSubAgentUpdate/types";
|
||||
|
||||
export type NodeResolutionData = {
|
||||
incompatibilities: IncompatibilityInfo;
|
||||
pendingUpdate: {
|
||||
input_schema: Record<string, unknown>;
|
||||
output_schema: Record<string, unknown>;
|
||||
};
|
||||
currentSchema: {
|
||||
input_schema: Record<string, unknown>;
|
||||
output_schema: Record<string, unknown>;
|
||||
};
|
||||
pendingHardcodedValues: Record<string, unknown>;
|
||||
};
|
||||
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
|
||||
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
||||
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
||||
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | No |
|
||||
| messages | List of messages in the conversation. | List[Any] | Yes |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
| ollama_host | Ollama host for local models | str | No |
|
||||
|
||||
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|
||||
|-------|-------------|------|----------|
|
||||
| focus | The focus of the list to generate. | str | No |
|
||||
| source_data | The data to generate the list from. | str | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
||||
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
||||
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
||||
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| text | The text to summarize. | str | Yes |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| focus | The topic to focus on in the summary | str | No |
|
||||
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
|
||||
@@ -20,7 +20,7 @@ Configure timeouts for DOM settlement and page loading. Variables can be passed
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes |
|
||||
| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No |
|
||||
| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" | No |
|
||||
| url | URL to navigate to. | str | Yes |
|
||||
| action | Action to perform. Suggested actions are: click, fill, type, press, scroll, select from dropdown. For multi-step actions, add an entry for each step. | List[str] | Yes |
|
||||
| variables | Variables to use in the action. Variables contains data you want the action to use. | Dict[str, str] | No |
|
||||
@@ -65,7 +65,7 @@ Supports searching within iframes and configurable timeouts for dynamic content
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes |
|
||||
| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No |
|
||||
| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" | No |
|
||||
| url | URL to navigate to. | str | Yes |
|
||||
| instruction | Natural language description of elements or actions to discover. | str | Yes |
|
||||
| iframes | Whether to search within iframes. If True, Stagehand will search for actions within iframes. | bool | No |
|
||||
@@ -106,7 +106,7 @@ Use this to explore a page's interactive elements before building automated work
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes |
|
||||
| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No |
|
||||
| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" | No |
|
||||
| url | URL to navigate to. | str | Yes |
|
||||
| instruction | Natural language description of elements or actions to discover. | str | Yes |
|
||||
| iframes | Whether to search within iframes. If True, Stagehand will search for actions within iframes. | bool | No |
|
||||
|
||||
Reference in New Issue
Block a user