| undefined;
if (outputs && Object.keys(outputs).length > 0) {
- const outputKeys = Object.keys(outputs);
- return `${blockName} executed successfully. Outputs: ${outputKeys.join(", ")}`;
+ const formattedOutputs: string[] = [];
+
+ for (const [key, values] of Object.entries(outputs)) {
+ if (!Array.isArray(values) || values.length === 0) continue;
+
+ // Format each value in the output array
+ for (const value of values) {
+ const formatted = formatOutputValue(value, key);
+ if (formatted) {
+ formattedOutputs.push(formatted);
+ }
+ }
+ }
+
+ if (formattedOutputs.length > 0) {
+ return `${blockName} executed successfully.\n\n${formattedOutputs.join("\n\n")}`;
+ }
+ return `${blockName} executed successfully.`;
}
return `${blockName} executed successfully.`;
@@ -266,8 +363,8 @@ export function formatToolResponse(result: unknown, toolName: string): string {
case "error":
const errorMsg =
- (response.error as string) || response.message || "An error occurred";
- return `Error: ${errorMsg}`;
+ (response.message as string) || response.error || "An error occurred";
+ return stripInternalReasoning(String(errorMsg));
case "no_results":
const suggestions = (response.suggestions as string[]) || [];
diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx
index 46459ff894..39a6cb36ad 100644
--- a/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx
@@ -10,7 +10,7 @@ export function UserChatBubble({ children, className }: UserChatBubbleProps) {
return (
{
+ if (!messages || messages.length === 0) return false;
+ const pendingTypes = new Set([
+ "operation_pending",
+ "operation_in_progress",
+ "operation_started",
+ ]);
+ return messages.some((msg) => {
+ if (msg.role !== "tool" || !msg.content) return false;
+ try {
+ const content =
+ typeof msg.content === "string"
+ ? JSON.parse(msg.content)
+ : msg.content;
+ return pendingTypes.has(content?.type);
+ } catch {
+ return false;
+ }
+ });
+ }, [messages]);
+
+ // Track when server has acknowledged the pending operation
+ useEffect(() => {
+ if (hasPendingOperationsFromServer) {
+ hasSeenServerPendingRef.current = true;
+ }
+ }, [hasPendingOperationsFromServer]);
+
+ // Combined: poll if server has pending ops OR if we received operation_started via SSE
+ const hasPendingOperations = hasPendingOperationsFromServer || forcePolling;
+
+ // Clear forcePolling only after server has acknowledged AND completed the operation
+ useEffect(() => {
+ if (
+ forcePolling &&
+ !hasPendingOperationsFromServer &&
+ hasSeenServerPendingRef.current
+ ) {
+ // Server acknowledged the operation and it's now complete
+ setForcePolling(false);
+ hasSeenServerPendingRef.current = false;
+ }
+ }, [forcePolling, hasPendingOperationsFromServer]);
+
+ // Function to trigger polling (called when operation_started is received via SSE)
+ function startPollingForOperation() {
+ setForcePolling(true);
+ hasSeenServerPendingRef.current = false; // Reset for new operation
+ }
+
+ // Refresh sessions list when a pending operation completes
+ // (hasPendingOperations transitions from true to false)
+ const prevHasPendingOperationsRef = useRef(hasPendingOperations);
useEffect(
- function refreshSessionsListOnLoad() {
- if (sessionId && sessionData && !isLoadingSession) {
+ function refreshSessionsListOnOperationComplete() {
+ const wasHasPending = prevHasPendingOperationsRef.current;
+ prevHasPendingOperationsRef.current = hasPendingOperations;
+
+ // Only invalidate when transitioning from pending to not pending
+ if (wasHasPending && !hasPendingOperations && sessionId) {
queryClient.invalidateQueries({
queryKey: getGetV2ListSessionsQueryKey(),
});
}
},
- [sessionId, sessionData, isLoadingSession, queryClient],
+ [hasPendingOperations, sessionId, queryClient],
+ );
+
+ // Poll for updates when there are pending operations
+ // Backoff: 2s, 4s, 6s, 8s, 10s, ... up to 30s max
+ const pollAttemptRef = useRef(0);
+ const hasPendingOperationsRef = useRef(hasPendingOperations);
+ hasPendingOperationsRef.current = hasPendingOperations;
+
+ useEffect(
+ function pollForPendingOperations() {
+ if (!sessionId || !hasPendingOperations) {
+ pollAttemptRef.current = 0;
+ return;
+ }
+
+ let cancelled = false;
+ let timeoutId: ReturnType | null = null;
+
+ function schedule() {
+ // 2s, 4s, 6s, 8s, 10s, ... 30s (max)
+ const delay = Math.min((pollAttemptRef.current + 1) * 2000, 30000);
+ timeoutId = setTimeout(async () => {
+ if (cancelled) return;
+ pollAttemptRef.current += 1;
+ try {
+ await refetch();
+ } catch (err) {
+ console.error("[useChatSession] Poll failed:", err);
+ } finally {
+ if (!cancelled && hasPendingOperationsRef.current) {
+ schedule();
+ }
+ }
+ }, delay);
+ }
+
+ schedule();
+
+ return () => {
+ cancelled = true;
+ if (timeoutId) clearTimeout(timeoutId);
+ };
+ },
+ [sessionId, hasPendingOperations, refetch],
);
async function createSession() {
@@ -239,11 +348,13 @@ export function useChatSession({
isCreating,
error,
isSessionNotFound: isNotFoundError(loadError),
+ hasPendingOperations,
createSession,
loadSession,
refreshSession,
claimSession,
clearSession,
+ startPollingForOperation,
};
}
diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx
index 0a3c7de6c8..4a25c84f92 100644
--- a/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx
+++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/Wallet/Wallet.tsx
@@ -255,13 +255,18 @@ export function Wallet() {
(notification: WebSocketNotification) => {
if (
notification.type !== "onboarding" ||
- notification.event !== "step_completed" ||
- !walletRef.current
+ notification.event !== "step_completed"
) {
return;
}
- // Only trigger confetti for tasks that are in groups
+ // Always refresh credits when any onboarding step completes
+ fetchCredits();
+
+ // Only trigger confetti for tasks that are in displayed groups
+ if (!walletRef.current) {
+ return;
+ }
const taskIds = groups
.flatMap((group) => group.tasks)
.map((task) => task.id);
@@ -274,7 +279,6 @@ export function Wallet() {
return;
}
- fetchCredits();
party.confetti(walletRef.current, {
count: 30,
spread: 120,
@@ -284,7 +288,7 @@ export function Wallet() {
modules: [fadeOut],
});
},
- [fetchCredits, fadeOut],
+ [fetchCredits, fadeOut, groups],
);
// WebSocket setup for onboarding notifications
diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
index 82c03bc9f1..74855f5e28 100644
--- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
+++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
@@ -516,7 +516,7 @@ export type GraphValidationErrorResponse = {
/* *** LIBRARY *** */
-/* Mirror of backend/server/v2/library/model.py:LibraryAgent */
+/* Mirror of backend/api/features/library/model.py:LibraryAgent */
export type LibraryAgent = {
id: LibraryAgentID;
graph_id: GraphID;
@@ -616,7 +616,7 @@ export enum LibraryAgentSortEnum {
/* *** CREDENTIALS *** */
-/* Mirror of backend/server/integrations/router.py:CredentialsMetaResponse */
+/* Mirror of backend/api/features/integrations/router.py:CredentialsMetaResponse */
export type CredentialsMetaResponse = {
id: string;
provider: CredentialsProviderName;
@@ -628,13 +628,13 @@ export type CredentialsMetaResponse = {
is_system?: boolean;
};
-/* Mirror of backend/server/integrations/router.py:CredentialsDeletionResponse */
+/* Mirror of backend/api/features/integrations/router.py:CredentialsDeletionResponse */
export type CredentialsDeleteResponse = {
deleted: true;
revoked: boolean | null;
};
-/* Mirror of backend/server/integrations/router.py:CredentialsDeletionNeedsConfirmationResponse */
+/* Mirror of backend/api/features/integrations/router.py:CredentialsDeletionNeedsConfirmationResponse */
export type CredentialsDeleteNeedConfirmationResponse = {
deleted: false;
need_confirmation: true;
@@ -888,7 +888,7 @@ export type Schedule = {
export type ScheduleID = Brand;
-/* Mirror of backend/server/routers/v1.py:ScheduleCreationRequest */
+/* Mirror of backend/api/features/v1.py:ScheduleCreationRequest */
export type ScheduleCreatable = {
graph_id: GraphID;
graph_version: number;
@@ -1003,6 +1003,7 @@ export type OnboardingStep =
| "AGENT_INPUT"
| "CONGRATS"
// First Wins
+ | "VISIT_COPILOT"
| "GET_RESULTS"
| "MARKETPLACE_VISIT"
| "MARKETPLACE_ADD_AGENT"
diff --git a/autogpt_platform/frontend/src/services/storage/session-storage.ts b/autogpt_platform/frontend/src/services/storage/session-storage.ts
index 8404da571c..1be82c98fb 100644
--- a/autogpt_platform/frontend/src/services/storage/session-storage.ts
+++ b/autogpt_platform/frontend/src/services/storage/session-storage.ts
@@ -3,6 +3,7 @@ import { environment } from "../environment";
export enum SessionKey {
CHAT_SENT_INITIAL_PROMPTS = "chat_sent_initial_prompts",
+ CHAT_INITIAL_PROMPTS = "chat_initial_prompts",
}
function get(key: SessionKey) {
diff --git a/autogpt_platform/frontend/src/tests/library.spec.ts b/autogpt_platform/frontend/src/tests/library.spec.ts
index 1972e94522..52941785e3 100644
--- a/autogpt_platform/frontend/src/tests/library.spec.ts
+++ b/autogpt_platform/frontend/src/tests/library.spec.ts
@@ -59,12 +59,13 @@ test.describe("Library", () => {
});
test("pagination works correctly", async ({ page }, testInfo) => {
- test.setTimeout(testInfo.timeout * 3); // Increase timeout for pagination operations
+ test.setTimeout(testInfo.timeout * 3);
await page.goto("/library");
+ const PAGE_SIZE = 20;
const paginationResult = await libraryPage.testPagination();
- if (paginationResult.initialCount >= 10) {
+ if (paginationResult.initialCount >= PAGE_SIZE) {
expect(paginationResult.finalCount).toBeGreaterThanOrEqual(
paginationResult.initialCount,
);
@@ -133,7 +134,10 @@ test.describe("Library", () => {
test.expect(clearedSearchValue).toBe("");
});
- test("pagination while searching works correctly", async ({ page }) => {
+ test("pagination while searching works correctly", async ({
+ page,
+ }, testInfo) => {
+ test.setTimeout(testInfo.timeout * 3);
await page.goto("/library");
const allAgents = await libraryPage.getAgents();
@@ -152,9 +156,10 @@ test.describe("Library", () => {
);
expect(matchingResults.length).toEqual(initialSearchResults.length);
+ const PAGE_SIZE = 20;
const searchPaginationResult = await libraryPage.testPagination();
- if (searchPaginationResult.initialCount >= 10) {
+ if (searchPaginationResult.initialCount >= PAGE_SIZE) {
expect(searchPaginationResult.finalCount).toBeGreaterThanOrEqual(
searchPaginationResult.initialCount,
);
diff --git a/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts b/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts
index 3558f0672c..a41b652afb 100644
--- a/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts
+++ b/autogpt_platform/frontend/src/tests/marketplace-creator.spec.ts
@@ -69,9 +69,12 @@ test.describe("Marketplace Creator Page – Basic Functionality", () => {
await marketplacePage.getFirstCreatorProfile(page);
await firstCreatorProfile.click();
await page.waitForURL("**/marketplace/creator/**");
+ await page.waitForLoadState("networkidle").catch(() => {});
+
const firstAgent = page
.locator('[data-testid="store-card"]:visible')
.first();
+ await firstAgent.waitFor({ state: "visible", timeout: 30000 });
await firstAgent.click();
await page.waitForURL("**/marketplace/agent/**");
diff --git a/autogpt_platform/frontend/src/tests/marketplace.spec.ts b/autogpt_platform/frontend/src/tests/marketplace.spec.ts
index 774713dc82..44d89bf351 100644
--- a/autogpt_platform/frontend/src/tests/marketplace.spec.ts
+++ b/autogpt_platform/frontend/src/tests/marketplace.spec.ts
@@ -77,7 +77,6 @@ test.describe("Marketplace – Basic Functionality", () => {
const firstFeaturedAgent =
await marketplacePage.getFirstFeaturedAgent(page);
- await firstFeaturedAgent.waitFor({ state: "visible" });
await firstFeaturedAgent.click();
await page.waitForURL("**/marketplace/agent/**");
await matchesUrl(page, /\/marketplace\/agent\/.+/);
@@ -116,7 +115,15 @@ test.describe("Marketplace – Basic Functionality", () => {
const searchTerm = page.getByText("DummyInput").first();
await isVisible(searchTerm);
- await page.waitForTimeout(10000);
+ await page.waitForLoadState("networkidle").catch(() => {});
+
+ await page
+ .waitForFunction(
+ () =>
+ document.querySelectorAll('[data-testid="store-card"]').length > 0,
+ { timeout: 15000 },
+ )
+ .catch(() => console.log("No search results appeared within timeout"));
const results = await marketplacePage.getSearchResultsCount(page);
expect(results).toBeGreaterThan(0);
diff --git a/autogpt_platform/frontend/src/tests/pages/library.page.ts b/autogpt_platform/frontend/src/tests/pages/library.page.ts
index 3a7695ec3a..03e98598b4 100644
--- a/autogpt_platform/frontend/src/tests/pages/library.page.ts
+++ b/autogpt_platform/frontend/src/tests/pages/library.page.ts
@@ -300,21 +300,27 @@ export class LibraryPage extends BasePage {
async scrollToLoadMore(): Promise {
console.log(`scrolling to load more agents`);
- // Get initial agent count
- const initialCount = await this.getAgentCount();
- console.log(`Initial agent count: ${initialCount}`);
+ const initialCount = await this.getAgentCountByListLength();
+ console.log(`Initial agent count (DOM cards): ${initialCount}`);
- // Scroll down to trigger pagination
await this.scrollToBottom();
- // Wait for potential new agents to load
- await this.page.waitForTimeout(2000);
+ await this.page
+ .waitForLoadState("networkidle", { timeout: 10000 })
+ .catch(() => console.log("Network idle timeout, continuing..."));
- // Check if more agents loaded
- const newCount = await this.getAgentCount();
- console.log(`New agent count after scroll: ${newCount}`);
+ await this.page
+ .waitForFunction(
+ (prevCount) =>
+ document.querySelectorAll('[data-testid="library-agent-card"]')
+ .length > prevCount,
+ initialCount,
+ { timeout: 5000 },
+ )
+ .catch(() => {});
- return;
+ const newCount = await this.getAgentCountByListLength();
+ console.log(`New agent count after scroll (DOM cards): ${newCount}`);
}
async testPagination(): Promise<{
diff --git a/autogpt_platform/frontend/src/tests/pages/login.page.ts b/autogpt_platform/frontend/src/tests/pages/login.page.ts
index 9082cc6219..adcb8d908b 100644
--- a/autogpt_platform/frontend/src/tests/pages/login.page.ts
+++ b/autogpt_platform/frontend/src/tests/pages/login.page.ts
@@ -37,9 +37,13 @@ export class LoginPage {
this.page.on("load", (page) => console.log(`ℹ️ Now at URL: ${page.url()}`));
// Start waiting for navigation before clicking
+ // Wait for redirect to marketplace, onboarding, library, or copilot (new landing pages)
const leaveLoginPage = this.page
.waitForURL(
- (url) => /^\/(marketplace|onboarding(\/.*)?)?$/.test(url.pathname),
+ (url: URL) =>
+ /^\/(marketplace|onboarding(\/.*)?|library|copilot)?$/.test(
+ url.pathname,
+ ),
{ timeout: 10_000 },
)
.catch((reason) => {
diff --git a/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts b/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts
index 20f60c371a..115a7b2f12 100644
--- a/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts
+++ b/autogpt_platform/frontend/src/tests/pages/marketplace.page.ts
@@ -9,6 +9,7 @@ export class MarketplacePage extends BasePage {
async goto(page: Page) {
await page.goto("/marketplace");
+ await page.waitForLoadState("networkidle").catch(() => {});
}
async getMarketplaceTitle(page: Page) {
@@ -109,16 +110,24 @@ export class MarketplacePage extends BasePage {
async getFirstFeaturedAgent(page: Page) {
const { getId } = getSelectors(page);
- return getId("featured-store-card").first();
+ const card = getId("featured-store-card").first();
+ await card.waitFor({ state: "visible", timeout: 30000 });
+ return card;
}
async getFirstTopAgent() {
- return this.page.locator('[data-testid="store-card"]:visible').first();
+ const card = this.page
+ .locator('[data-testid="store-card"]:visible')
+ .first();
+ await card.waitFor({ state: "visible", timeout: 30000 });
+ return card;
}
async getFirstCreatorProfile(page: Page) {
const { getId } = getSelectors(page);
- return getId("creator-card").first();
+ const card = getId("creator-card").first();
+ await card.waitFor({ state: "visible", timeout: 30000 });
+ return card;
}
async getSearchResultsCount(page: Page) {
diff --git a/autogpt_platform/frontend/src/tests/utils/signup.ts b/autogpt_platform/frontend/src/tests/utils/signup.ts
index 7c8fdbe01b..192a9129b9 100644
--- a/autogpt_platform/frontend/src/tests/utils/signup.ts
+++ b/autogpt_platform/frontend/src/tests/utils/signup.ts
@@ -36,14 +36,16 @@ export async function signupTestUser(
const signupButton = getButton("Sign up");
await signupButton.click();
- // Wait for successful signup - could redirect to onboarding or marketplace
+ // Wait for successful signup - could redirect to various pages depending on onboarding state
try {
- // Wait for either onboarding or marketplace redirect
- await Promise.race([
- page.waitForURL(/\/onboarding/, { timeout: 15000 }),
- page.waitForURL(/\/marketplace/, { timeout: 15000 }),
- ]);
+ // Wait for redirect to onboarding, marketplace, copilot, or library
+ // Use a single waitForURL with a callback to avoid Promise.race race conditions
+ await page.waitForURL(
+ (url: URL) =>
+ /\/(onboarding|marketplace|copilot|library)/.test(url.pathname),
+ { timeout: 15000 },
+ );
} catch (error) {
console.error(
"❌ Timeout waiting for redirect, current URL:",
@@ -54,14 +56,19 @@ export async function signupTestUser(
const currentUrl = page.url();
- // Handle onboarding or marketplace redirect
+ // Handle onboarding redirect if needed
if (currentUrl.includes("/onboarding") && ignoreOnboarding) {
await page.goto("http://localhost:3000/marketplace");
await page.waitForLoadState("domcontentloaded", { timeout: 10000 });
}
- // Verify we're on the expected final page
- if (ignoreOnboarding || currentUrl.includes("/marketplace")) {
+ // Verify we're on an expected final page and user is authenticated
+ if (currentUrl.includes("/copilot") || currentUrl.includes("/library")) {
+ // For copilot/library landing pages, just verify user is authenticated
+ await page
+ .getByTestId("profile-popout-menu-trigger")
+ .waitFor({ state: "visible", timeout: 10000 });
+ } else if (ignoreOnboarding || currentUrl.includes("/marketplace")) {
// Verify we're on marketplace
await page
.getByText(
diff --git a/backend/blocks/video/__init__.py b/backend/blocks/video/__init__.py
deleted file mode 100644
index fd95ef9a58..0000000000
--- a/backend/blocks/video/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Video editing blocks
diff --git a/docs/integrations/README.md b/docs/integrations/README.md
index b91f4e2f14..27d1f756ac 100644
--- a/docs/integrations/README.md
+++ b/docs/integrations/README.md
@@ -53,7 +53,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Block Installation](block-integrations/basic.md#block-installation) | Given a code string, this block allows the verification and installation of a block code into the system |
| [Concatenate Lists](block-integrations/basic.md#concatenate-lists) | Concatenates multiple lists into a single list |
| [Dictionary Is Empty](block-integrations/basic.md#dictionary-is-empty) | Checks if a dictionary is empty |
-| [File Store](block-integrations/basic.md#file-store) | Stores the input file in the temporary directory |
+| [File Store](block-integrations/basic.md#file-store) | Downloads and stores a file from a URL, data URI, or local path |
| [Find In Dictionary](block-integrations/basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
| [Find In List](block-integrations/basic.md#find-in-list) | Finds the index of the value in the list |
| [Get All Memories](block-integrations/basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering |
diff --git a/docs/integrations/block-integrations/basic.md b/docs/integrations/block-integrations/basic.md
index f92d19002f..5a73fd5a03 100644
--- a/docs/integrations/block-integrations/basic.md
+++ b/docs/integrations/block-integrations/basic.md
@@ -709,7 +709,7 @@ This is useful for conditional logic where you need to verify if data was return
## File Store
### What it is
-Stores the input file in the temporary directory.
+Downloads and stores a file from a URL, data URI, or local path. Use this to fetch images, documents, or other files for processing. In CoPilot: saves to workspace (use list_workspace_files to see it). In graphs: outputs a data URI to pass to other blocks.
### How it works
@@ -722,15 +722,15 @@ The block outputs a file path that other blocks can use to access the stored fil
| Input | Description | Type | Required |
|-------|-------------|------|----------|
-| file_in | The file to store in the temporary directory, it can be a URL, data URI, or local path. | str (file) | Yes |
-| base_64 | Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks). | bool | No |
+| file_in | The file to download and store. Can be a URL (https://...), data URI, or local path. | str (file) | Yes |
+| base_64 | Whether to produce output in base64 format (not recommended, you can pass the file reference across blocks). | bool | No |
### Outputs
| Output | Description | Type |
|--------|-------------|------|
| error | Error message if the operation failed | str |
-| file_out | The relative path to the stored file in the temporary directory. | str (file) |
+| file_out | Reference to the stored file. In CoPilot: workspace:// URI (visible in list_workspace_files). In graphs: data URI for passing to other blocks. | str (file) |
### Possible use case
diff --git a/docs/integrations/block-integrations/llm.md b/docs/integrations/block-integrations/llm.md
index f4d69b912b..6a0a9e0987 100644
--- a/docs/integrations/block-integrations/llm.md
+++ b/docs/integrations/block-integrations/llm.md
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
| condition | A plaintext English description of the condition to evaluate | str | Yes |
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
-| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
### Outputs
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|-------|-------------|------|----------|
| prompt | The prompt to send to the language model. | str | No |
| messages | List of messages in the conversation. | List[Any] | Yes |
-| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
| ollama_host | Ollama host for local models | str | No |
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|-------|-------------|------|----------|
| focus | The focus of the list to generate. | str | No |
| source_data | The data to generate the list from. | str | No |
-| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| max_retries | Maximum number of retries for generating a valid list. | int | No |
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
| prompt | The prompt to send to the language model. | str | Yes |
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
-| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
-| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| text | The text to summarize. | str | Yes |
-| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| focus | The topic to focus on in the summary | str | No |
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| prompt | The prompt to send to the language model. | str | Yes |
-| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-7-sonnet-20250219" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
+| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
diff --git a/docs/integrations/block-integrations/multimedia.md b/docs/integrations/block-integrations/multimedia.md
index e2d11cfbf7..6b8f261346 100644
--- a/docs/integrations/block-integrations/multimedia.md
+++ b/docs/integrations/block-integrations/multimedia.md
@@ -12,7 +12,7 @@ Block to attach an audio file to a video file using moviepy.
This block combines a video file with an audio file using the moviepy library. The audio track is attached to the video, optionally with volume adjustment via the volume parameter (1.0 = original volume).
-Input files can be URLs, data URIs, or local paths. The output can be returned as either a file path or base64 data URI.
+Input files can be URLs, data URIs, or local paths. The output format is automatically determined: `workspace://` URLs in CoPilot, data URIs in graph executions.
### Inputs
@@ -22,7 +22,6 @@ Input files can be URLs, data URIs, or local paths. The output can be returned a
| video_in | Video input (URL, data URI, or local path). | str (file) | Yes |
| audio_in | Audio input (URL, data URI, or local path). | str (file) | Yes |
| volume | Volume scale for the newly attached audio track (1.0 = original). | float | No |
-| output_return_type | Return the final output as a relative path or base64 data URI. | "file_path" \| "data_uri" | No |
### Outputs
@@ -51,7 +50,7 @@ Block to loop a video to a given duration or number of repeats.
This block extends a video by repeating it to reach a target duration or number of loops. Set duration to specify the total length in seconds, or use n_loops to repeat the video a specific number of times.
-The looped video is seamlessly concatenated and can be output as a file path or base64 data URI.
+The looped video is seamlessly concatenated. The output format is automatically determined: `workspace://` URLs in CoPilot, data URIs in graph executions.
### Inputs
@@ -61,7 +60,6 @@ The looped video is seamlessly concatenated and can be output as a file path or
| video_in | The input video (can be a URL, data URI, or local path). | str (file) | Yes |
| duration | Target duration (in seconds) to loop the video to. If omitted, defaults to no looping. | float | No |
| n_loops | Number of times to repeat the video. If omitted, defaults to 1 (no repeat). | int | No |
-| output_return_type | How to return the output video. Either a relative path or base64 data URI. | "file_path" \| "data_uri" | No |
### Outputs
diff --git a/docs/integrations/block-integrations/stagehand/blocks.md b/docs/integrations/block-integrations/stagehand/blocks.md
index dac0586fa2..cc201d092b 100644
--- a/docs/integrations/block-integrations/stagehand/blocks.md
+++ b/docs/integrations/block-integrations/stagehand/blocks.md
@@ -20,7 +20,7 @@ Configure timeouts for DOM settlement and page loading. Variables can be passed
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes |
-| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No |
+| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-sonnet-4-5-20250929" | No |
| url | URL to navigate to. | str | Yes |
| action | Action to perform. Suggested actions are: click, fill, type, press, scroll, select from dropdown. For multi-step actions, add an entry for each step. | List[str] | Yes |
| variables | Variables to use in the action. Variables contains data you want the action to use. | Dict[str, str] | No |
@@ -65,7 +65,7 @@ Supports searching within iframes and configurable timeouts for dynamic content
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes |
-| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No |
+| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-sonnet-4-5-20250929" | No |
| url | URL to navigate to. | str | Yes |
| instruction | Natural language description of elements or actions to discover. | str | Yes |
| iframes | Whether to search within iframes. If True, Stagehand will search for actions within iframes. | bool | No |
@@ -106,7 +106,7 @@ Use this to explore a page's interactive elements before building automated work
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| browserbase_project_id | Browserbase project ID (required if using Browserbase) | str | Yes |
-| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-3-7-sonnet-20250219" | No |
+| model | LLM to use for Stagehand (provider is inferred) | "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "claude-sonnet-4-5-20250929" | No |
| url | URL to navigate to. | str | Yes |
| instruction | Natural language description of elements or actions to discover. | str | Yes |
| iframes | Whether to search within iframes. If True, Stagehand will search for actions within iframes. | bool | No |
diff --git a/docs/platform/block-sdk-guide.md b/docs/platform/block-sdk-guide.md
index 5b3eda5184..42fd883251 100644
--- a/docs/platform/block-sdk-guide.md
+++ b/docs/platform/block-sdk-guide.md
@@ -277,6 +277,50 @@ async def run(
token = credentials.api_key.get_secret_value()
```
+### Handling Files
+
+When your block works with files (images, videos, documents), use `store_media_file()`:
+
+```python
+from backend.data.execution import ExecutionContext
+from backend.util.file import store_media_file
+from backend.util.type import MediaFileType
+
+async def run(
+ self,
+ input_data: Input,
+ *,
+ execution_context: ExecutionContext,
+ **kwargs,
+):
+ # PROCESSING: Need local file path for tools like ffmpeg, MoviePy, PIL
+ local_path = await store_media_file(
+ file=input_data.video,
+ execution_context=execution_context,
+ return_format="for_local_processing",
+ )
+
+ # EXTERNAL API: Need base64 content for APIs like Replicate, OpenAI
+ image_b64 = await store_media_file(
+ file=input_data.image,
+ execution_context=execution_context,
+ return_format="for_external_api",
+ )
+
+ # OUTPUT: Return to user/next block (auto-adapts to context)
+ result = await store_media_file(
+ file=generated_url,
+ execution_context=execution_context,
+ return_format="for_block_output", # workspace:// in CoPilot, data URI in graphs
+ )
+ yield "image_url", result
+```
+
+**Return format options:**
+- `"for_local_processing"` - Local file path for processing tools
+- `"for_external_api"` - Data URI for external APIs needing base64
+- `"for_block_output"` - **Always use for outputs** - automatically picks best format
+
## Testing Your Block
```bash
diff --git a/docs/platform/contributing/oauth-integration-flow.md b/docs/platform/contributing/oauth-integration-flow.md
index dbc7a54be5..f6c3f7fd17 100644
--- a/docs/platform/contributing/oauth-integration-flow.md
+++ b/docs/platform/contributing/oauth-integration-flow.md
@@ -25,7 +25,7 @@ This document focuses on the **API Integration OAuth flow** used for connecting
### 2. Backend API Trust Boundary
- **Location**: Server-side FastAPI application
- **Components**:
- - Integration router (`/backend/backend/server/integrations/router.py`)
+ - Integration router (`/backend/backend/api/features/integrations/router.py`)
- OAuth handlers (`/backend/backend/integrations/oauth/`)
- Credentials store (`/backend/backend/integrations/credentials_store.py`)
- **Trust Level**: Trusted - server-controlled environment
diff --git a/docs/platform/create-basic-agent.md b/docs/platform/create-basic-agent.md
index 7721fb9b9c..ffe654ba99 100644
--- a/docs/platform/create-basic-agent.md
+++ b/docs/platform/create-basic-agent.md
@@ -4,6 +4,28 @@
This guide walks through creating a simple question-answer AI agent using AutoGPT's visual builder. This is a basic example that can be expanded into more complex agents.
+## **Prerequisites**
+
+### **Cloud-Hosted AutoGPT**
+If you're using the cloud-hosted version at [agpt.co](https://agpt.co), you're ready to go! AI blocks come with **built-in credits** — no API keys required to get started. If you'd prefer to use your own API keys, you can add them via **Profile → Integrations**.
+
+### **Self-Hosted (Docker)**
+If you're running AutoGPT locally with Docker, you'll need to add your own API keys to `autogpt_platform/backend/.env`:
+
+```bash
+# Create or edit backend/.env
+OPENAI_API_KEY=sk-your-key-here
+ANTHROPIC_API_KEY=sk-ant-your-key-here
+# Add other provider keys as needed
+```
+
+After adding keys, restart the services:
+```bash
+docker compose down && docker compose up -d
+```
+
+**Note:** The Calculator example below doesn't require any API credentials — it's a good way to test your setup before adding AI blocks.
+
## **Example Agent: Q&A (with AI)**
A step-by-step guide to creating a simple Q&A agent using input and output blocks.
diff --git a/docs/platform/new_blocks.md b/docs/platform/new_blocks.md
index d9d329ff51..114ff8d9a4 100644
--- a/docs/platform/new_blocks.md
+++ b/docs/platform/new_blocks.md
@@ -111,6 +111,71 @@ Follow these steps to create and test a new block:
- `graph_exec_id`: The ID of the execution of the agent. This changes every time the agent has a new "run"
- `node_exec_id`: The ID of the execution of the node. This changes every time the node is executed
- `node_id`: The ID of the node that is being executed. It changes every version of the graph, but not every time the node is executed.
+ - `execution_context`: An `ExecutionContext` object containing user_id, graph_exec_id, workspace_id, and session_id. Required for file handling.
+
+### Handling Files in Blocks
+
+When your block needs to work with files (images, videos, documents), use `store_media_file()` from `backend.util.file`. This function handles downloading, validation, virus scanning, and storage.
+
+**Import:**
+```python
+from backend.data.execution import ExecutionContext
+from backend.util.file import store_media_file
+from backend.util.type import MediaFileType
+```
+
+**The `return_format` parameter determines what you get back:**
+
+| Format | Use When | Returns |
+|--------|----------|---------|
+| `"for_local_processing"` | Processing with local tools (ffmpeg, MoviePy, PIL) | Local file path (e.g., `"image.png"`) |
+| `"for_external_api"` | Sending content to external APIs (Replicate, OpenAI) | Data URI (e.g., `"data:image/png;base64,..."`) |
+| `"for_block_output"` | Returning output from your block | Smart: `workspace://` in CoPilot, data URI in graphs |
+
+**Examples:**
+
+```python
+async def run(
+ self,
+ input_data: Input,
+ *,
+ execution_context: ExecutionContext,
+ **kwargs,
+) -> BlockOutput:
+ # PROCESSING: Need to work with file locally (ffmpeg, MoviePy, PIL)
+ local_path = await store_media_file(
+ file=input_data.video,
+ execution_context=execution_context,
+ return_format="for_local_processing",
+ )
+ # local_path = "video.mp4" - use with Path, ffmpeg, subprocess, etc.
+ full_path = get_exec_file_path(execution_context.graph_exec_id, local_path)
+
+ # EXTERNAL API: Need to send content to an API like Replicate
+ image_b64 = await store_media_file(
+ file=input_data.image,
+ execution_context=execution_context,
+ return_format="for_external_api",
+ )
+ # image_b64 = "data:image/png;base64,iVBORw0..." - send to external API
+
+ # OUTPUT: Returning result from block to user/next block
+ result_url = await store_media_file(
+ file=generated_image_url,
+ execution_context=execution_context,
+ return_format="for_block_output",
+ )
+ yield "image_url", result_url
+ # In CoPilot: result_url = "workspace://abc123" (persistent, context-efficient)
+ # In graphs: result_url = "data:image/png;base64,..." (for next block/display)
+```
+
+**Key points:**
+
+- `for_block_output` is the **only** format that auto-adapts to execution context
+- Always use `for_block_output` for block outputs unless you have a specific reason not to
+- Never manually check for `workspace_id` - let `for_block_output` handle the logic
+- The function handles URLs, data URIs, `workspace://` references, and local paths as input
### Field Types
diff --git a/docs/platform/ollama.md b/docs/platform/ollama.md
index 392bfabfe8..ecab9b8ae1 100644
--- a/docs/platform/ollama.md
+++ b/docs/platform/ollama.md
@@ -246,7 +246,7 @@ If you encounter any issues, verify that:
```bash
ollama pull llama3.2
```
-- If using a custom model, ensure it's added to the model list in `backend/server/model.py`
+- If using a custom model, ensure it's added to the model list in `backend/api/model.py`
#### Docker Issues
- Ensure Docker daemon is running: