+
+ );
+}
+
+export function BlockDetailsCard({ output }: Props) {
+ const inputs = output.block.inputs as {
+ properties?: Record;
+ required?: string[];
+ } | null;
+ const outputs = output.block.outputs as {
+ properties?: Record;
+ required?: string[];
+ } | null;
+
+ return (
+
+ {output.message}
+
+ {inputs?.properties && Object.keys(inputs.properties).length > 0 && (
+
+ )}
+
+ {outputs?.properties && Object.keys(outputs.properties).length > 0 && (
+
+ )}
+
+ );
+}
diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx
index b8625988cd..6e56154a5e 100644
--- a/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx
+++ b/autogpt_platform/frontend/src/app/(platform)/copilot/tools/RunBlock/helpers.tsx
@@ -10,18 +10,37 @@ import {
import type { ToolUIPart } from "ai";
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
+/** Block details returned on first run_block attempt (before input_data provided). */
+export interface BlockDetailsResponse {
+ type: typeof ResponseType.block_details;
+ message: string;
+ session_id?: string | null;
+ block: {
+ id: string;
+ name: string;
+ description: string;
+ inputs: Record;
+ outputs: Record;
+ credentials: unknown[];
+ };
+ user_authenticated: boolean;
+}
+
export interface RunBlockInput {
block_id?: string;
+ block_name?: string;
input_data?: Record;
}
export type RunBlockToolOutput =
| SetupRequirementsResponse
+ | BlockDetailsResponse
| BlockOutputResponse
| ErrorResponse;
const RUN_BLOCK_OUTPUT_TYPES = new Set([
ResponseType.setup_requirements,
+ ResponseType.block_details,
ResponseType.block_output,
ResponseType.error,
]);
@@ -35,6 +54,15 @@ export function isRunBlockSetupRequirementsOutput(
);
}
+export function isRunBlockDetailsOutput(
+ output: RunBlockToolOutput,
+): output is BlockDetailsResponse {
+ return (
+ output.type === ResponseType.block_details ||
+ ("block" in output && typeof output.block === "object")
+ );
+}
+
export function isRunBlockBlockOutput(
output: RunBlockToolOutput,
): output is BlockOutputResponse {
@@ -64,6 +92,7 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
return output as RunBlockToolOutput;
}
if ("block_id" in output) return output as BlockOutputResponse;
+ if ("block" in output) return output as BlockDetailsResponse;
if ("setup_info" in output) return output as SetupRequirementsResponse;
if ("error" in output || "details" in output)
return output as ErrorResponse;
@@ -84,17 +113,25 @@ export function getAnimationText(part: {
output?: unknown;
}): string {
const input = part.input as RunBlockInput | undefined;
+ const blockName = input?.block_name?.trim();
const blockId = input?.block_id?.trim();
- const blockText = blockId ? ` "${blockId}"` : "";
+ // Prefer block_name if available, otherwise fall back to block_id
+ const blockText = blockName
+ ? ` "${blockName}"`
+ : blockId
+ ? ` "${blockId}"`
+ : "";
switch (part.state) {
case "input-streaming":
case "input-available":
- return `Running the block${blockText}`;
+ return `Running${blockText}`;
case "output-available": {
const output = parseOutput(part.output);
- if (!output) return `Running the block${blockText}`;
+ if (!output) return `Running${blockText}`;
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
+ if (isRunBlockDetailsOutput(output))
+ return `Details for "${output.block.name}"`;
if (isRunBlockSetupRequirementsOutput(output)) {
return `Setup needed for "${output.setup_info.agent_name}"`;
}
@@ -158,6 +195,21 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
};
}
+ if (isRunBlockDetailsOutput(output)) {
+ const inputKeys = Object.keys(
+ (output.block.inputs as { properties?: Record })
+ ?.properties ?? {},
+ );
+ return {
+ icon,
+ title: output.block.name,
+ description:
+ inputKeys.length > 0
+ ? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
+ : output.message,
+ };
+ }
+
if (isRunBlockSetupRequirementsOutput(output)) {
const missingCredsCount = Object.keys(
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json
index 1f975ff575..5d91f67981 100644
--- a/autogpt_platform/frontend/src/app/api/openapi.json
+++ b/autogpt_platform/frontend/src/app/api/openapi.json
@@ -1053,6 +1053,7 @@
"$ref": "#/components/schemas/ClarificationNeededResponse"
},
{ "$ref": "#/components/schemas/BlockListResponse" },
+ { "$ref": "#/components/schemas/BlockDetailsResponse" },
{ "$ref": "#/components/schemas/BlockOutputResponse" },
{ "$ref": "#/components/schemas/DocSearchResultsResponse" },
{ "$ref": "#/components/schemas/DocPageResponse" },
@@ -4625,6 +4626,128 @@
}
}
},
+ "/api/mcp/discover-tools": {
+ "post": {
+ "tags": ["v2", "mcp", "mcp"],
+ "summary": "Discover available tools on an MCP server",
+ "description": "Connect to an MCP server and return its available tools.\n\nIf the user has a stored MCP credential for this server URL, it will be\nused automatically — no need to pass an explicit auth token.",
+ "operationId": "postV2Discover available tools on an mcp server",
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/DiscoverToolsRequest" }
+ }
+ },
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/DiscoverToolsResponse"
+ }
+ }
+ }
+ },
+ "401": {
+ "$ref": "#/components/responses/HTTP401NotAuthenticatedError"
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/HTTPValidationError" }
+ }
+ }
+ }
+ },
+ "security": [{ "HTTPBearerJWT": [] }]
+ }
+ },
+ "/api/mcp/oauth/callback": {
+ "post": {
+ "tags": ["v2", "mcp", "mcp"],
+ "summary": "Exchange OAuth code for MCP tokens",
+ "description": "Exchange the authorization code for tokens and store the credential.\n\nThe frontend calls this after receiving the OAuth code from the popup.\nOn success, subsequent ``/discover-tools`` calls for the same server URL\nwill automatically use the stored credential.",
+ "operationId": "postV2Exchange oauth code for mcp tokens",
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/MCPOAuthCallbackRequest"
+ }
+ }
+ },
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CredentialsMetaResponse"
+ }
+ }
+ }
+ },
+ "401": {
+ "$ref": "#/components/responses/HTTP401NotAuthenticatedError"
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/HTTPValidationError" }
+ }
+ }
+ }
+ },
+ "security": [{ "HTTPBearerJWT": [] }]
+ }
+ },
+ "/api/mcp/oauth/login": {
+ "post": {
+ "tags": ["v2", "mcp", "mcp"],
+ "summary": "Initiate OAuth login for an MCP server",
+ "description": "Discover OAuth metadata from the MCP server and return a login URL.\n\n1. Discovers the protected-resource metadata (RFC 9728)\n2. Fetches the authorization server metadata (RFC 8414)\n3. Performs Dynamic Client Registration (RFC 7591) if available\n4. Returns the authorization URL for the frontend to open in a popup",
+ "operationId": "postV2Initiate oauth login for an mcp server",
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/MCPOAuthLoginRequest" }
+ }
+ },
+ "required": true
+ },
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/MCPOAuthLoginResponse"
+ }
+ }
+ }
+ },
+ "401": {
+ "$ref": "#/components/responses/HTTP401NotAuthenticatedError"
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": { "$ref": "#/components/schemas/HTTPValidationError" }
+ }
+ }
+ }
+ },
+ "security": [{ "HTTPBearerJWT": [] }]
+ }
+ },
"/api/oauth/app/{client_id}": {
"get": {
"tags": ["oauth"],
@@ -7315,6 +7438,58 @@
"enum": ["run", "byte", "second"],
"title": "BlockCostType"
},
+ "BlockDetails": {
+ "properties": {
+ "id": { "type": "string", "title": "Id" },
+ "name": { "type": "string", "title": "Name" },
+ "description": { "type": "string", "title": "Description" },
+ "inputs": {
+ "additionalProperties": true,
+ "type": "object",
+ "title": "Inputs",
+ "default": {}
+ },
+ "outputs": {
+ "additionalProperties": true,
+ "type": "object",
+ "title": "Outputs",
+ "default": {}
+ },
+ "credentials": {
+ "items": { "$ref": "#/components/schemas/CredentialsMetaInput" },
+ "type": "array",
+ "title": "Credentials",
+ "default": []
+ }
+ },
+ "type": "object",
+ "required": ["id", "name", "description"],
+ "title": "BlockDetails",
+ "description": "Detailed block information."
+ },
+ "BlockDetailsResponse": {
+ "properties": {
+ "type": {
+ "$ref": "#/components/schemas/ResponseType",
+ "default": "block_details"
+ },
+ "message": { "type": "string", "title": "Message" },
+ "session_id": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "Session Id"
+ },
+ "block": { "$ref": "#/components/schemas/BlockDetails" },
+ "user_authenticated": {
+ "type": "boolean",
+ "title": "User Authenticated",
+ "default": false
+ }
+ },
+ "type": "object",
+ "required": ["message", "block"],
+ "title": "BlockDetailsResponse",
+ "description": "Response for block details (first run_block attempt)."
+ },
"BlockInfo": {
"properties": {
"id": { "type": "string", "title": "Id" },
@@ -7379,29 +7554,24 @@
"input_schema": {
"additionalProperties": true,
"type": "object",
- "title": "Input Schema"
+ "title": "Input Schema",
+ "description": "Full JSON schema for block inputs"
},
"output_schema": {
"additionalProperties": true,
"type": "object",
- "title": "Output Schema"
+ "title": "Output Schema",
+ "description": "Full JSON schema for block outputs"
},
"required_inputs": {
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
"type": "array",
"title": "Required Inputs",
- "description": "List of required input fields for this block"
+ "description": "List of input fields for this block"
}
},
"type": "object",
- "required": [
- "id",
- "name",
- "description",
- "categories",
- "input_schema",
- "output_schema"
- ],
+ "required": ["id", "name", "description", "categories"],
"title": "BlockInfoSummary",
"description": "Summary of a block for search results."
},
@@ -7447,7 +7617,7 @@
"usage_hint": {
"type": "string",
"title": "Usage Hint",
- "default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
+ "default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the fields listed in required_inputs."
}
},
"type": "object",
@@ -8017,7 +8187,7 @@
"host": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Host",
- "description": "Host pattern for host-scoped credentials"
+ "description": "Host pattern for host-scoped or MCP server URL for MCP credentials"
}
},
"type": "object",
@@ -8037,6 +8207,45 @@
"required": ["version_counts"],
"title": "DeleteGraphResponse"
},
+ "DiscoverToolsRequest": {
+ "properties": {
+ "server_url": {
+ "type": "string",
+ "title": "Server Url",
+ "description": "URL of the MCP server"
+ },
+ "auth_token": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "Auth Token",
+ "description": "Optional Bearer token for authenticated MCP servers"
+ }
+ },
+ "type": "object",
+ "required": ["server_url"],
+ "title": "DiscoverToolsRequest",
+ "description": "Request to discover tools on an MCP server."
+ },
+ "DiscoverToolsResponse": {
+ "properties": {
+ "tools": {
+ "items": { "$ref": "#/components/schemas/MCPToolResponse" },
+ "type": "array",
+ "title": "Tools"
+ },
+ "server_name": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "Server Name"
+ },
+ "protocol_version": {
+ "anyOf": [{ "type": "string" }, { "type": "null" }],
+ "title": "Protocol Version"
+ }
+ },
+ "type": "object",
+ "required": ["tools"],
+ "title": "DiscoverToolsResponse",
+ "description": "Response containing the list of tools available on an MCP server."
+ },
"DocPageResponse": {
"properties": {
"type": {
@@ -9808,6 +10017,62 @@
"required": ["login_url", "state_token"],
"title": "LoginResponse"
},
+ "MCPOAuthCallbackRequest": {
+ "properties": {
+ "code": {
+ "type": "string",
+ "title": "Code",
+ "description": "Authorization code from OAuth callback"
+ },
+ "state_token": {
+ "type": "string",
+ "title": "State Token",
+ "description": "State token for CSRF verification"
+ }
+ },
+ "type": "object",
+ "required": ["code", "state_token"],
+ "title": "MCPOAuthCallbackRequest",
+ "description": "Request to exchange an OAuth code for tokens."
+ },
+ "MCPOAuthLoginRequest": {
+ "properties": {
+ "server_url": {
+ "type": "string",
+ "title": "Server Url",
+ "description": "URL of the MCP server that requires OAuth"
+ }
+ },
+ "type": "object",
+ "required": ["server_url"],
+ "title": "MCPOAuthLoginRequest",
+ "description": "Request to start an OAuth flow for an MCP server."
+ },
+ "MCPOAuthLoginResponse": {
+ "properties": {
+ "login_url": { "type": "string", "title": "Login Url" },
+ "state_token": { "type": "string", "title": "State Token" }
+ },
+ "type": "object",
+ "required": ["login_url", "state_token"],
+ "title": "MCPOAuthLoginResponse",
+ "description": "Response with the OAuth login URL for the user to authenticate."
+ },
+ "MCPToolResponse": {
+ "properties": {
+ "name": { "type": "string", "title": "Name" },
+ "description": { "type": "string", "title": "Description" },
+ "input_schema": {
+ "additionalProperties": true,
+ "type": "object",
+ "title": "Input Schema"
+ }
+ },
+ "type": "object",
+ "required": ["name", "description", "input_schema"],
+ "title": "MCPToolResponse",
+ "description": "A single MCP tool returned by discovery."
+ },
"MarketplaceListing": {
"properties": {
"id": { "type": "string", "title": "Id" },
@@ -11053,6 +11318,7 @@
"agent_saved",
"clarification_needed",
"block_list",
+ "block_details",
"block_output",
"doc_search_results",
"doc_page",
@@ -11064,7 +11330,12 @@
"operation_started",
"operation_pending",
"operation_in_progress",
- "input_validation_error"
+ "input_validation_error",
+ "web_fetch",
+ "bash_exec",
+ "operation_status",
+ "feature_request_search",
+ "feature_request_created"
],
"title": "ResponseType",
"description": "Types of tool responses."
diff --git a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx
index 135a960431..22d0a318a9 100644
--- a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx
+++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView.tsx
@@ -38,13 +38,8 @@ export function CredentialsGroupedView({
const allProviders = useContext(CredentialsProvidersContext);
const { userCredentialFields, systemCredentialFields } = useMemo(
- () =>
- splitCredentialFieldsBySystem(
- credentialFields,
- allProviders,
- inputCredentials,
- ),
- [credentialFields, allProviders, inputCredentials],
+ () => splitCredentialFieldsBySystem(credentialFields, allProviders),
+ [credentialFields, allProviders],
);
const hasSystemCredentials = systemCredentialFields.length > 0;
@@ -86,11 +81,13 @@ export function CredentialsGroupedView({
const providerNames = schema.credentials_provider || [];
const credentialTypes = schema.credentials_types || [];
const requiredScopes = schema.credentials_scopes;
+ const discriminatorValues = schema.discriminator_values;
const savedCredential = findSavedCredentialByProviderAndType(
providerNames,
credentialTypes,
requiredScopes,
allProviders,
+ discriminatorValues,
);
if (savedCredential) {
diff --git a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts
index 5f439d3a32..2d8d001a72 100644
--- a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts
+++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers.ts
@@ -23,10 +23,35 @@ function hasRequiredScopes(
return true;
}
+/** Check if a credential matches the discriminator values (e.g. MCP server URL). */
+function matchesDiscriminatorValues(
+ credential: { host?: string | null; provider: string; type: string },
+ discriminatorValues?: string[],
+) {
+ // MCP OAuth2 credentials must match by server URL
+ if (credential.type === "oauth2" && credential.provider === "mcp") {
+ if (!discriminatorValues || discriminatorValues.length === 0) return false;
+ return (
+ credential.host != null && discriminatorValues.includes(credential.host)
+ );
+ }
+ // Host-scoped credentials match by host
+ if (credential.type === "host_scoped" && credential.host) {
+ if (!discriminatorValues || discriminatorValues.length === 0) return true;
+ return discriminatorValues.some((v) => {
+ try {
+ return new URL(v).hostname === credential.host;
+ } catch {
+ return false;
+ }
+ });
+ }
+ return true;
+}
+
export function splitCredentialFieldsBySystem(
credentialFields: CredentialField[],
allProviders: CredentialsProvidersContextType | null,
- inputCredentials?: Record,
) {
if (!allProviders || credentialFields.length === 0) {
return {
@@ -52,17 +77,9 @@ export function splitCredentialFieldsBySystem(
}
}
- const sortByUnsetFirst = (a: CredentialField, b: CredentialField) => {
- const aIsSet = Boolean(inputCredentials?.[a[0]]);
- const bIsSet = Boolean(inputCredentials?.[b[0]]);
-
- if (aIsSet === bIsSet) return 0;
- return aIsSet ? 1 : -1;
- };
-
return {
- userCredentialFields: userFields.sort(sortByUnsetFirst),
- systemCredentialFields: systemFields.sort(sortByUnsetFirst),
+ userCredentialFields: userFields,
+ systemCredentialFields: systemFields,
};
}
@@ -160,6 +177,7 @@ export function findSavedCredentialByProviderAndType(
credentialTypes: string[],
requiredScopes: string[] | undefined,
allProviders: CredentialsProvidersContextType | null,
+ discriminatorValues?: string[],
): SavedCredential | undefined {
for (const providerName of providerNames) {
const providerData = allProviders?.[providerName];
@@ -176,9 +194,14 @@ export function findSavedCredentialByProviderAndType(
credentialTypes.length === 0 ||
credentialTypes.includes(credential.type);
const scopesMatch = hasRequiredScopes(credential, requiredScopes);
+ const hostMatches = matchesDiscriminatorValues(
+ credential,
+ discriminatorValues,
+ );
if (!typeMatches) continue;
if (!scopesMatch) continue;
+ if (!hostMatches) continue;
matchingCredentials.push(credential as SavedCredential);
}
@@ -190,9 +213,14 @@ export function findSavedCredentialByProviderAndType(
credentialTypes.length === 0 ||
credentialTypes.includes(credential.type);
const scopesMatch = hasRequiredScopes(credential, requiredScopes);
+ const hostMatches = matchesDiscriminatorValues(
+ credential,
+ discriminatorValues,
+ );
if (!typeMatches) continue;
if (!scopesMatch) continue;
+ if (!hostMatches) continue;
matchingCredentials.push(credential as SavedCredential);
}
@@ -214,6 +242,7 @@ export function findSavedUserCredentialByProviderAndType(
credentialTypes: string[],
requiredScopes: string[] | undefined,
allProviders: CredentialsProvidersContextType | null,
+ discriminatorValues?: string[],
): SavedCredential | undefined {
for (const providerName of providerNames) {
const providerData = allProviders?.[providerName];
@@ -230,9 +259,14 @@ export function findSavedUserCredentialByProviderAndType(
credentialTypes.length === 0 ||
credentialTypes.includes(credential.type);
const scopesMatch = hasRequiredScopes(credential, requiredScopes);
+ const hostMatches = matchesDiscriminatorValues(
+ credential,
+ discriminatorValues,
+ );
if (!typeMatches) continue;
if (!scopesMatch) continue;
+ if (!hostMatches) continue;
matchingCredentials.push(credential as SavedCredential);
}
diff --git a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/useCredentialsInput.ts b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/useCredentialsInput.ts
index 509713ff1e..9ab2e08141 100644
--- a/autogpt_platform/frontend/src/components/contextual/CredentialsInput/useCredentialsInput.ts
+++ b/autogpt_platform/frontend/src/components/contextual/CredentialsInput/useCredentialsInput.ts
@@ -5,14 +5,14 @@ import {
BlockIOCredentialsSubSchema,
CredentialsMetaInput,
} from "@/lib/autogpt-server-api/types";
+import { postV2InitiateOauthLoginForAnMcpServer } from "@/app/api/__generated__/endpoints/mcp/mcp";
+import { openOAuthPopup } from "@/lib/oauth-popup";
import { useQueryClient } from "@tanstack/react-query";
import { useEffect, useRef, useState } from "react";
import {
filterSystemCredentials,
getActionButtonText,
getSystemCredentials,
- OAUTH_TIMEOUT_MS,
- OAuthPopupResultMessage,
} from "./helpers";
export type CredentialsInputState = ReturnType;
@@ -57,6 +57,14 @@ export function useCredentialsInput({
const queryClient = useQueryClient();
const credentials = useCredentials(schema, siblingInputs);
const hasAttemptedAutoSelect = useRef(false);
+ const oauthAbortRef = useRef<((reason?: string) => void) | null>(null);
+
+ // Clean up on unmount
+ useEffect(() => {
+ return () => {
+ oauthAbortRef.current?.();
+ };
+ }, []);
const deleteCredentialsMutation = useDeleteV1DeleteCredentials({
mutation: {
@@ -81,11 +89,14 @@ export function useCredentialsInput({
}
}, [credentials, onLoaded]);
- // Unselect credential if not available
+ // Unselect credential if not available in the loaded credential list.
+ // Skip when no credentials have been loaded yet (empty list could mean
+ // the provider data hasn't finished loading, not that the credential is invalid).
useEffect(() => {
if (readOnly) return;
if (!credentials || !("savedCredentials" in credentials)) return;
const availableCreds = credentials.savedCredentials;
+ if (availableCreds.length === 0) return;
if (
selectedCredential &&
!availableCreds.some((c) => c.id === selectedCredential.id)
@@ -110,7 +121,9 @@ export function useCredentialsInput({
if (hasAttemptedAutoSelect.current) return;
hasAttemptedAutoSelect.current = true;
- if (isOptional) return;
+ // Auto-select if exactly one credential matches.
+ // For optional fields with multiple options, let the user choose.
+ if (isOptional && savedCreds.length > 1) return;
const cred = savedCreds[0];
onSelectCredential({
@@ -148,7 +161,9 @@ export function useCredentialsInput({
supportsHostScoped,
savedCredentials,
oAuthCallback,
+ mcpOAuthCallback,
isSystemProvider,
+ discriminatorValue,
} = credentials;
// Split credentials into user and system
@@ -157,72 +172,66 @@ export function useCredentialsInput({
async function handleOAuthLogin() {
setOAuthError(null);
- const { login_url, state_token } = await api.oAuthLogin(
- provider,
- schema.credentials_scopes,
- );
- setOAuth2FlowInProgress(true);
- const popup = window.open(login_url, "_blank", "popup=true");
- if (!popup) {
- throw new Error(
- "Failed to open popup window. Please allow popups for this site.",
+ // Abort any previous OAuth flow
+ oauthAbortRef.current?.();
+
+ // MCP uses dynamic OAuth discovery per server URL
+ const isMCP = provider === "mcp" && !!discriminatorValue;
+
+ try {
+ let login_url: string;
+ let state_token: string;
+
+ if (isMCP) {
+ const mcpLoginResponse = await postV2InitiateOauthLoginForAnMcpServer({
+ server_url: discriminatorValue!,
+ });
+ if (mcpLoginResponse.status !== 200) throw mcpLoginResponse.data;
+ ({ login_url, state_token } = mcpLoginResponse.data);
+ } else {
+ ({ login_url, state_token } = await api.oAuthLogin(
+ provider,
+ schema.credentials_scopes,
+ ));
+ }
+
+ setOAuth2FlowInProgress(true);
+
+ const { promise, cleanup } = openOAuthPopup(login_url, {
+ stateToken: state_token,
+ useCrossOriginListeners: isMCP,
+ // Standard OAuth uses "oauth_popup_result", MCP uses "mcp_oauth_result"
+ acceptMessageTypes: isMCP
+ ? ["mcp_oauth_result"]
+ : ["oauth_popup_result"],
+ });
+
+ oauthAbortRef.current = cleanup.abort;
+ // Expose abort signal for the waiting modal's cancel button
+ const controller = new AbortController();
+ cleanup.signal.addEventListener("abort", () =>
+ controller.abort("completed"),
);
- }
+ setOAuthPopupController(controller);
- const controller = new AbortController();
- setOAuthPopupController(controller);
- controller.signal.onabort = () => {
- console.debug("OAuth flow aborted");
- setOAuth2FlowInProgress(false);
- popup.close();
- };
+ const result = await promise;
- const handleMessage = async (e: MessageEvent) => {
- console.debug("Message received:", e.data);
- if (
- typeof e.data != "object" ||
- !("message_type" in e.data) ||
- e.data.message_type !== "oauth_popup_result"
- ) {
- console.debug("Ignoring irrelevant message");
- return;
- }
+ // Exchange code for tokens via the provider (updates credential cache)
+ const credentialResult = isMCP
+ ? await mcpOAuthCallback(result.code, state_token)
+ : await oAuthCallback(result.code, result.state);
- if (!e.data.success) {
- console.error("OAuth flow failed:", e.data.message);
- setOAuthError(`OAuth flow failed: ${e.data.message}`);
- setOAuth2FlowInProgress(false);
- return;
- }
-
- if (e.data.state !== state_token) {
- console.error("Invalid state token received");
- setOAuthError("Invalid state token received");
- setOAuth2FlowInProgress(false);
- return;
- }
-
- try {
- console.debug("Processing OAuth callback");
- const credentials = await oAuthCallback(e.data.code, e.data.state);
- console.debug("OAuth callback processed successfully");
-
- // Check if the credential's scopes match the required scopes
+ // Check if the credential's scopes match the required scopes (skip for MCP)
+ if (!isMCP) {
const requiredScopes = schema.credentials_scopes;
if (requiredScopes && requiredScopes.length > 0) {
- const grantedScopes = new Set(credentials.scopes || []);
+ const grantedScopes = new Set(credentialResult.scopes || []);
const hasAllRequiredScopes = new Set(requiredScopes).isSubsetOf(
grantedScopes,
);
if (!hasAllRequiredScopes) {
- console.error(
- `Newly created OAuth credential for ${providerName} has insufficient scopes. Required:`,
- requiredScopes,
- "Granted:",
- credentials.scopes,
- );
setOAuthError(
"Connection failed: the granted permissions don't match what's required. " +
"Please contact the application administrator.",
@@ -230,38 +239,28 @@ export function useCredentialsInput({
return;
}
}
+ }
- onSelectCredential({
- id: credentials.id,
- type: "oauth2",
- title: credentials.title,
- provider,
- });
- } catch (error) {
- console.error("Error in OAuth callback:", error);
+ onSelectCredential({
+ id: credentialResult.id,
+ type: "oauth2",
+ title: credentialResult.title,
+ provider,
+ });
+ } catch (error) {
+ if (error instanceof Error && error.message === "OAuth flow timed out") {
+ setOAuthError("OAuth flow timed out");
+ } else {
setOAuthError(
- `Error in OAuth callback: ${
+ `OAuth error: ${
error instanceof Error ? error.message : String(error)
}`,
);
- } finally {
- console.debug("Finalizing OAuth flow");
- setOAuth2FlowInProgress(false);
- controller.abort("success");
}
- };
-
- console.debug("Adding message event listener");
- window.addEventListener("message", handleMessage, {
- signal: controller.signal,
- });
-
- setTimeout(() => {
- console.debug("OAuth flow timed out");
- controller.abort("timeout");
+ } finally {
setOAuth2FlowInProgress(false);
- setOAuthError("OAuth flow timed out");
- }, OAUTH_TIMEOUT_MS);
+ oauthAbortRef.current = null;
+ }
}
function handleActionButtonClick() {
diff --git a/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx b/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx
index 97478e9eaf..a8b3514d41 100644
--- a/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx
+++ b/autogpt_platform/frontend/src/components/renderers/InputRenderer/base/object/WrapIfAdditionalTemplate.tsx
@@ -80,7 +80,7 @@ export default function WrapIfAdditionalTemplate(
uiSchema={uiSchema}
/>
{!isHandleConnected && (
-
+
void;
+ /** The AbortController signal */
+ signal: AbortSignal;
+};
+
+/**
+ * Opens an OAuth popup and sets up listeners for the callback result.
+ *
+ * Opens a blank popup synchronously (to avoid popup blockers), then navigates
+ * it to the login URL. Returns a promise that resolves with the OAuth code/state.
+ *
+ * @param loginUrl - The OAuth authorization URL to navigate to
+ * @param options - Configuration for message handling
+ * @returns Object with `promise` (resolves with OAuth result) and `abort` (cancels flow)
+ */
+export function openOAuthPopup(
+ loginUrl: string,
+ options: OAuthPopupOptions,
+): { promise: Promise; cleanup: Cleanup } {
+ const {
+ stateToken,
+ useCrossOriginListeners = false,
+ broadcastChannelName = "mcp_oauth",
+ localStorageKey = "mcp_oauth_result",
+ acceptMessageTypes = ["oauth_popup_result", "mcp_oauth_result"],
+ timeout = DEFAULT_TIMEOUT_MS,
+ } = options;
+
+ const controller = new AbortController();
+
+ // Open popup synchronously (before any async work) to avoid browser popup blockers
+ const width = 500;
+ const height = 700;
+ const left = window.screenX + (window.outerWidth - width) / 2;
+ const top = window.screenY + (window.outerHeight - height) / 2;
+ const popup = window.open(
+ "about:blank",
+ "_blank",
+ `width=${width},height=${height},left=${left},top=${top},popup=true,scrollbars=yes`,
+ );
+
+ if (popup && !popup.closed) {
+ popup.location.href = loginUrl;
+ } else {
+ // Popup was blocked — open in new tab as fallback
+ window.open(loginUrl, "_blank");
+ }
+
+ // Close popup on abort
+ controller.signal.addEventListener("abort", () => {
+ if (popup && !popup.closed) popup.close();
+ });
+
+ // Clear any stale localStorage entry
+ if (useCrossOriginListeners) {
+ try {
+ localStorage.removeItem(localStorageKey);
+ } catch {}
+ }
+
+ const promise = new Promise((resolve, reject) => {
+ let handled = false;
+
+ const handleResult = (data: any) => {
+ if (handled) return; // Prevent double-handling
+
+ // Validate message type
+ const messageType = data?.message_type ?? data?.type;
+ if (!messageType || !acceptMessageTypes.includes(messageType)) return;
+
+ // Validate state token
+ if (data.state !== stateToken) {
+ // State mismatch — this message is for a different listener. Ignore silently.
+ return;
+ }
+
+ handled = true;
+
+ if (!data.success) {
+ reject(new Error(data.message || "OAuth authentication failed"));
+ } else {
+ resolve({ code: data.code, state: data.state });
+ }
+
+ controller.abort("completed");
+ };
+
+ // Listener: postMessage (works for same-origin popups)
+ window.addEventListener(
+ "message",
+ (event: MessageEvent) => {
+ if (typeof event.data === "object") {
+ handleResult(event.data);
+ }
+ },
+ { signal: controller.signal },
+ );
+
+ // Cross-origin listeners for MCP OAuth
+ if (useCrossOriginListeners) {
+ // Listener: BroadcastChannel (works across tabs/popups without opener)
+ try {
+ const bc = new BroadcastChannel(broadcastChannelName);
+ bc.onmessage = (event) => handleResult(event.data);
+ controller.signal.addEventListener("abort", () => bc.close());
+ } catch {}
+
+ // Listener: localStorage polling (most reliable cross-tab fallback)
+ const pollInterval = setInterval(() => {
+ try {
+ const stored = localStorage.getItem(localStorageKey);
+ if (stored) {
+ const data = JSON.parse(stored);
+ localStorage.removeItem(localStorageKey);
+ handleResult(data);
+ }
+ } catch {}
+ }, 500);
+ controller.signal.addEventListener("abort", () =>
+ clearInterval(pollInterval),
+ );
+ }
+
+ // Timeout
+ const timeoutId = setTimeout(() => {
+ if (!handled) {
+ handled = true;
+ reject(new Error("OAuth flow timed out"));
+ controller.abort("timeout");
+ }
+ }, timeout);
+ controller.signal.addEventListener("abort", () => clearTimeout(timeoutId));
+ });
+
+ return {
+ promise,
+ cleanup: {
+ abort: (reason?: string) => controller.abort(reason || "canceled"),
+ signal: controller.signal,
+ },
+ };
+}
diff --git a/autogpt_platform/frontend/src/middleware.ts b/autogpt_platform/frontend/src/middleware.ts
index af1c823295..8cec8a2645 100644
--- a/autogpt_platform/frontend/src/middleware.ts
+++ b/autogpt_platform/frontend/src/middleware.ts
@@ -18,6 +18,6 @@ export const config = {
* Note: /auth/authorize and /auth/integrations/* ARE protected and need
* middleware to run for authentication checks.
*/
- "/((?!_next/static|_next/image|favicon.ico|auth/callback|.*\\.(?:svg|png|jpg|jpeg|gif|webp)$).*)",
+ "/((?!_next/static|_next/image|favicon.ico|auth/callback|auth/integrations/mcp_callback|.*\\.(?:svg|png|jpg|jpeg|gif|webp)$).*)",
],
};
diff --git a/autogpt_platform/frontend/src/providers/agent-credentials/credentials-provider.tsx b/autogpt_platform/frontend/src/providers/agent-credentials/credentials-provider.tsx
index e47cc65e13..a426d8f667 100644
--- a/autogpt_platform/frontend/src/providers/agent-credentials/credentials-provider.tsx
+++ b/autogpt_platform/frontend/src/providers/agent-credentials/credentials-provider.tsx
@@ -8,6 +8,7 @@ import {
HostScopedCredentials,
UserPasswordCredentials,
} from "@/lib/autogpt-server-api";
+import { postV2ExchangeOauthCodeForMcpTokens } from "@/app/api/__generated__/endpoints/mcp/mcp";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { toDisplayName } from "@/providers/agent-credentials/helper";
@@ -38,6 +39,11 @@ export type CredentialsProviderData = {
code: string,
state_token: string,
) => Promise;
+ /** MCP-specific OAuth callback that uses dynamic per-server OAuth discovery. */
+ mcpOAuthCallback: (
+ code: string,
+ state_token: string,
+ ) => Promise;
createAPIKeyCredentials: (
credentials: APIKeyCredentialsCreatable,
) => Promise;
@@ -120,6 +126,35 @@ export default function CredentialsProvider({
[api, addCredentials, onFailToast],
);
+ /** Exchanges an MCP OAuth code for tokens and adds the result to the internal credentials store. */
+ const mcpOAuthCallback = useCallback(
+ async (
+ code: string,
+ state_token: string,
+ ): Promise => {
+ try {
+ const response = await postV2ExchangeOauthCodeForMcpTokens({
+ code,
+ state_token,
+ });
+ if (response.status !== 200) throw response.data;
+ const credsMeta: CredentialsMetaResponse = {
+ ...response.data,
+ title: response.data.title ?? undefined,
+ scopes: response.data.scopes ?? undefined,
+ username: response.data.username ?? undefined,
+ host: response.data.host ?? undefined,
+ };
+ addCredentials("mcp", credsMeta);
+ return credsMeta;
+ } catch (error) {
+ onFailToast("complete MCP OAuth authentication")(error);
+ throw error;
+ }
+ },
+ [addCredentials, onFailToast],
+ );
+
/** Wraps `BackendAPI.createAPIKeyCredentials`, and adds the result to the internal credentials store. */
const createAPIKeyCredentials = useCallback(
async (
@@ -258,6 +293,7 @@ export default function CredentialsProvider({
isSystemProvider: systemProviders.has(provider),
oAuthCallback: (code: string, state_token: string) =>
oAuthCallback(provider, code, state_token),
+ mcpOAuthCallback,
createAPIKeyCredentials: (
credentials: APIKeyCredentialsCreatable,
) => createAPIKeyCredentials(provider, credentials),
@@ -286,6 +322,7 @@ export default function CredentialsProvider({
createHostScopedCredentials,
deleteCredentials,
oAuthCallback,
+ mcpOAuthCallback,
onFailToast,
]);
diff --git a/autogpt_platform/frontend/src/tests/pages/build.page.ts b/autogpt_platform/frontend/src/tests/pages/build.page.ts
index 9370288f8e..3bb9552b82 100644
--- a/autogpt_platform/frontend/src/tests/pages/build.page.ts
+++ b/autogpt_platform/frontend/src/tests/pages/build.page.ts
@@ -528,6 +528,9 @@ export class BuildPage extends BasePage {
async getBlocksToSkip(): Promise {
return [
(await this.getGithubTriggerBlockDetails()).map((b) => b.id),
+ // MCP Tool block requires an interactive dialog (server URL + OAuth) before
+ // it can be placed, so it can't be tested via the standard "add block" flow.
+ "a0a4b1c2-d3e4-4f56-a7b8-c9d0e1f2a3b4",
].flat();
}
diff --git a/docs/integrations/README.md b/docs/integrations/README.md
index a471ef3533..00d4b0c73a 100644
--- a/docs/integrations/README.md
+++ b/docs/integrations/README.md
@@ -56,12 +56,16 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [File Store](block-integrations/basic.md#file-store) | Downloads and stores a file from a URL, data URI, or local path |
| [Find In Dictionary](block-integrations/basic.md#find-in-dictionary) | A block that looks up a value in a dictionary, list, or object by key or index and returns the corresponding value |
| [Find In List](block-integrations/basic.md#find-in-list) | Finds the index of the value in the list |
+| [Flatten List](block-integrations/basic.md#flatten-list) | Flattens a nested list structure into a single flat list |
| [Get All Memories](block-integrations/basic.md#get-all-memories) | Retrieve all memories from Mem0 with optional conversation filtering |
| [Get Latest Memory](block-integrations/basic.md#get-latest-memory) | Retrieve the latest memory from Mem0 with optional key filtering |
| [Get List Item](block-integrations/basic.md#get-list-item) | Returns the element at the given index |
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution for human review |
+| [Interleave Lists](block-integrations/basic.md#interleave-lists) | Interleaves elements from multiple lists in round-robin fashion, alternating between sources |
+| [List Difference](block-integrations/basic.md#list-difference) | Computes the difference between two lists |
+| [List Intersection](block-integrations/basic.md#list-intersection) | Computes the intersection of two lists, returning only elements present in both |
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
@@ -84,6 +88,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Store Value](block-integrations/basic.md#store-value) | A basic block that stores and forwards a value throughout workflows, allowing it to be reused without changes across multiple blocks |
| [Universal Type Converter](block-integrations/basic.md#universal-type-converter) | This block is used to convert a value to a universal type |
| [XML Parser](block-integrations/basic.md#xml-parser) | Parses XML using gravitasml to tokenize and coverts it to dict |
+| [Zip Lists](block-integrations/basic.md#zip-lists) | Zips multiple lists together into a list of grouped elements |
## Data Processing
@@ -467,6 +472,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
| [Github Update Comment](block-integrations/github/issues.md#github-update-comment) | A block that updates an existing comment on a GitHub issue or pull request |
| [Github Update File](block-integrations/github/repo.md#github-update-file) | This block updates an existing file in a GitHub repository |
| [Instantiate Code Sandbox](block-integrations/misc.md#instantiate-code-sandbox) | Instantiate a sandbox environment with internet access in which you can execute code with the Execute Code Step block |
+| [MCP Tool](block-integrations/mcp/block.md#mcp-tool) | Connect to any MCP server and execute its tools |
| [Slant3D Order Webhook](block-integrations/slant3d/webhook.md#slant3d-order-webhook) | This block triggers on Slant3D order status updates and outputs the event details, including tracking information when orders are shipped |
## Media Generation
diff --git a/docs/integrations/SUMMARY.md b/docs/integrations/SUMMARY.md
index f481ae2e0a..3ad4bf2c6d 100644
--- a/docs/integrations/SUMMARY.md
+++ b/docs/integrations/SUMMARY.md
@@ -84,6 +84,7 @@
* [Linear Projects](block-integrations/linear/projects.md)
* [LLM](block-integrations/llm.md)
* [Logic](block-integrations/logic.md)
+* [Mcp Block](block-integrations/mcp/block.md)
* [Misc](block-integrations/misc.md)
* [Notion Create Page](block-integrations/notion/create_page.md)
* [Notion Read Database](block-integrations/notion/read_database.md)
diff --git a/docs/integrations/block-integrations/basic.md b/docs/integrations/block-integrations/basic.md
index 08def38ede..e032690edc 100644
--- a/docs/integrations/block-integrations/basic.md
+++ b/docs/integrations/block-integrations/basic.md
@@ -637,7 +637,7 @@ This enables extensibility by allowing custom blocks to be added without modifyi
## Concatenate Lists
### What it is
-Concatenates multiple lists into a single list. All elements from all input lists are combined in order.
+Concatenates multiple lists into a single list. All elements from all input lists are combined in order. Supports optional deduplication and None removal.
### How it works
@@ -651,6 +651,8 @@ The block includes validation to ensure each item is actually a list. If a non-l
| Input | Description | Type | Required |
|-------|-------------|------|----------|
| lists | A list of lists to concatenate together. All lists will be combined in order into a single list. | List[List[Any]] | Yes |
+| deduplicate | If True, remove duplicate elements from the concatenated result while preserving order. | bool | No |
+| remove_none | If True, remove None values from the concatenated result. | bool | No |
### Outputs
@@ -658,6 +660,7 @@ The block includes validation to ensure each item is actually a list. If a non-l
|--------|-------------|------|
| error | Error message if concatenation failed due to invalid input types. | str |
| concatenated_list | The concatenated list containing all elements from all input lists in order. | List[Any] |
+| length | The total number of elements in the concatenated list. | int |
### Possible use case
@@ -820,6 +823,45 @@ This enables conditional logic based on list membership and helps locate items f
---
+## Flatten List
+
+### What it is
+Flattens a nested list structure into a single flat list. Supports configurable maximum flattening depth.
+
+### How it works
+
+This block recursively traverses a nested list and extracts all leaf elements into a single flat list. You can control how deep the flattening goes with the max_depth parameter: set it to -1 to flatten completely, or to a positive integer to flatten only that many levels.
+
+The block also reports the original nesting depth of the input, which is useful for understanding the structure of data coming from sources with varying levels of nesting.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| nested_list | A potentially nested list to flatten into a single-level list. | List[Any] | Yes |
+| max_depth | Maximum depth to flatten. -1 means flatten completely. 1 means flatten only one level. | int | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if flattening failed. | str |
+| flattened_list | The flattened list with all nested elements extracted. | List[Any] |
+| length | The number of elements in the flattened list. | int |
+| original_depth | The maximum nesting depth of the original input list. | int |
+
+### Possible use case
+
+**Normalizing API Responses**: Flatten nested JSON arrays from different API endpoints into a uniform single-level list for consistent processing.
+
+**Aggregating Nested Results**: Combine results from recursive file searches or nested category trees into a flat list of items for display or export.
+
+**Data Pipeline Cleanup**: Simplify deeply nested data structures from multiple transformation steps into a clean flat list before final output.
+
+
+---
+
## Get All Memories
### What it is
@@ -1012,6 +1054,120 @@ This enables human oversight at critical points in automated workflows, ensuring
---
+## Interleave Lists
+
+### What it is
+Interleaves elements from multiple lists in round-robin fashion, alternating between sources.
+
+### How it works
+
+This block takes elements from each input list in round-robin order, picking one element from each list in turn. For example, given `[[1, 2, 3], ['a', 'b', 'c']]`, it produces `[1, 'a', 2, 'b', 3, 'c']`.
+
+When lists have different lengths, shorter lists stop contributing once exhausted, and remaining elements from longer lists continue to be added in order.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| lists | A list of lists to interleave. Elements will be taken in round-robin order. | List[List[Any]] | Yes |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if interleaving failed. | str |
+| interleaved_list | The interleaved list with elements alternating from each input list. | List[Any] |
+| length | The total number of elements in the interleaved list. | int |
+
+### Possible use case
+
+**Balanced Content Mixing**: Alternate between content from different sources (e.g., mixing promotional and organic posts) for a balanced feed.
+
+**Round-Robin Scheduling**: Distribute tasks evenly across workers or queues by interleaving items from separate task lists.
+
+**Multi-Language Output**: Weave together translated text segments with their original counterparts for side-by-side comparison.
+
+
+---
+
+## List Difference
+
+### What it is
+Computes the difference between two lists. Returns elements in the first list not found in the second, or symmetric difference.
+
+### How it works
+
+This block compares two lists and returns elements from list_a that do not appear in list_b. It uses hash-based lookup for efficient comparison. When symmetric mode is enabled, it returns elements that are in either list but not in both.
+
+The order of elements from list_a is preserved in the output, and elements from list_b are appended when using symmetric difference.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| list_a | The primary list to check elements from. | List[Any] | Yes |
+| list_b | The list to subtract. Elements found here will be removed from list_a. | List[Any] | Yes |
+| symmetric | If True, compute symmetric difference (elements in either list but not both). | bool | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed. | str |
+| difference | Elements from list_a not found in list_b (or symmetric difference if enabled). | List[Any] |
+| length | The number of elements in the difference result. | int |
+
+### Possible use case
+
+**Change Detection**: Compare a current list of records against a previous snapshot to find newly added or removed items.
+
+**Exclusion Filtering**: Remove items from a list that appear in a blocklist or already-processed list to avoid duplicates.
+
+**Data Sync**: Identify which items exist in one system but not another to determine what needs to be synced.
+
+
+---
+
+## List Intersection
+
+### What it is
+Computes the intersection of two lists, returning only elements present in both.
+
+### How it works
+
+This block finds elements that appear in both input lists by hashing elements from list_b for efficient lookup, then checking each element of list_a against that set. The output preserves the order from list_a and removes duplicates.
+
+This is useful for finding common items between two datasets without needing to manually iterate or compare.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| list_a | The first list to intersect. | List[Any] | Yes |
+| list_b | The second list to intersect. | List[Any] | Yes |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the operation failed. | str |
+| intersection | Elements present in both list_a and list_b. | List[Any] |
+| length | The number of elements in the intersection. | int |
+
+### Possible use case
+
+**Finding Common Tags**: Identify shared tags or categories between two items for recommendation or grouping purposes.
+
+**Mutual Connections**: Find users or contacts that appear in both of two different lists, such as shared friends or overlapping team members.
+
+**Feature Comparison**: Determine which features or capabilities are supported by both of two systems or products.
+
+
+---
+
## List Is Empty
### What it is
@@ -1452,3 +1608,42 @@ This makes XML data accessible using standard dictionary operations, allowing yo
---
+
+## Zip Lists
+
+### What it is
+Zips multiple lists together into a list of grouped elements. Supports padding to longest or truncating to shortest.
+
+### How it works
+
+This block pairs up corresponding elements from multiple input lists into sub-lists. For example, zipping `[[1, 2, 3], ['a', 'b', 'c']]` produces `[[1, 'a'], [2, 'b'], [3, 'c']]`.
+
+By default, the result is truncated to the length of the shortest input list. Enable pad_to_longest to instead pad shorter lists with a fill_value so no elements from longer lists are lost.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| lists | A list of lists to zip together. Corresponding elements will be grouped. | List[List[Any]] | Yes |
+| pad_to_longest | If True, pad shorter lists with fill_value to match the longest list. If False, truncate to shortest. | bool | No |
+| fill_value | Value to use for padding when pad_to_longest is True. | Fill Value | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if zipping failed. | str |
+| zipped_list | The zipped list of grouped elements. | List[List[Any]] |
+| length | The number of groups in the zipped result. | int |
+
+### Possible use case
+
+**Creating Key-Value Pairs**: Combine a list of field names with a list of values to build structured records or dictionaries.
+
+**Parallel Data Alignment**: Pair up corresponding items from separate data sources (e.g., names and email addresses) for processing together.
+
+**Table Row Construction**: Group column data into rows by zipping each column's values together for CSV export or display.
+
+
+---
diff --git a/docs/integrations/block-integrations/mcp/block.md b/docs/integrations/block-integrations/mcp/block.md
new file mode 100644
index 0000000000..6858e42e94
--- /dev/null
+++ b/docs/integrations/block-integrations/mcp/block.md
@@ -0,0 +1,40 @@
+# Mcp Block
+
+Blocks for connecting to and executing tools on MCP (Model Context Protocol) servers.
+
+
+## MCP Tool
+
+### What it is
+Connect to any MCP server and execute its tools. Provide a server URL, select a tool, and pass arguments dynamically.
+
+### How it works
+
+The block uses JSON-RPC 2.0 over HTTP to communicate with MCP servers. When configuring, it sends an `initialize` request followed by `tools/list` to discover available tools and their input schemas. On execution, it calls `tools/call` with the selected tool name and arguments, then extracts text, image, or resource content from the response.
+
+Authentication is handled via OAuth 2.0 when the server requires it. The block supports optional credentials — public servers work without authentication, while protected servers trigger a standard OAuth flow with PKCE. Tokens are automatically refreshed when they expire.
+
+
+### Inputs
+
+| Input | Description | Type | Required |
+|-------|-------------|------|----------|
+| server_url | URL of the MCP server (Streamable HTTP endpoint) | str | Yes |
+| selected_tool | The MCP tool to execute | str | No |
+| tool_arguments | Arguments to pass to the selected MCP tool. The fields here are defined by the tool's input schema. | Dict[str, Any] | No |
+
+### Outputs
+
+| Output | Description | Type |
+|--------|-------------|------|
+| error | Error message if the tool call failed | str |
+| result | The result returned by the MCP tool | Result |
+
+### Possible use case
+
+- **Connecting to third-party APIs**: Use an MCP server like Sentry or Linear to query issues, create tickets, or manage projects without building custom integrations.
+- **AI-powered tool execution**: Chain MCP tool calls with AI blocks to let agents dynamically discover and use external tools based on task requirements.
+- **Data retrieval from knowledge bases**: Connect to MCP servers like DeepWiki to search documentation, retrieve code context, or query structured knowledge bases.
+
+
+---
diff --git a/plans/SECRT-1950-claude-ci-optimizations.md b/plans/SECRT-1950-claude-ci-optimizations.md
new file mode 100644
index 0000000000..15d1419b0e
--- /dev/null
+++ b/plans/SECRT-1950-claude-ci-optimizations.md
@@ -0,0 +1,165 @@
+# Implementation Plan: SECRT-1950 - Apply E2E CI Optimizations to Claude Code Workflows
+
+## Ticket
+[SECRT-1950](https://linear.app/autogpt/issue/SECRT-1950)
+
+## Summary
+Apply Pwuts's CI performance optimizations from PR #12090 to Claude Code workflows.
+
+## Reference PR
+https://github.com/Significant-Gravitas/AutoGPT/pull/12090
+
+---
+
+## Analysis
+
+### Current State (claude.yml)
+
+**pnpm caching (lines 104-118):**
+```yaml
+- name: Set up Node.js
+ uses: actions/setup-node@v6
+ with:
+ node-version: "22"
+
+- name: Enable corepack
+ run: corepack enable
+
+- name: Set pnpm store directory
+ run: |
+ pnpm config set store-dir ~/.pnpm-store
+ echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
+
+- name: Cache frontend dependencies
+ uses: actions/cache@v5
+ with:
+ path: ~/.pnpm-store
+ key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
+ restore-keys: |
+ ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml') }}
+ ${{ runner.os }}-pnpm-
+```
+
+**Docker setup (lines 134-165):**
+- Uses `docker-buildx-action@v3`
+- Has manual Docker image caching via `actions/cache`
+- Runs `docker compose up` without buildx bake optimization
+
+### Pwuts's Optimizations (PR #12090)
+
+1. **Simplified pnpm caching** - Use `setup-node` built-in cache:
+```yaml
+- name: Enable corepack
+ run: corepack enable
+
+- name: Set up Node
+ uses: actions/setup-node@v6
+ with:
+ node-version: "22.18.0"
+ cache: "pnpm"
+ cache-dependency-path: autogpt_platform/frontend/pnpm-lock.yaml
+```
+
+2. **Docker build caching via buildx bake**:
+```yaml
+- name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ with:
+ driver: docker-container
+ driver-opts: network=host
+
+- name: Expose GHA cache to docker buildx CLI
+ uses: crazy-max/ghaction-github-runtime@v3
+
+- name: Build Docker images (with cache)
+ run: |
+ pip install pyyaml
+ docker compose -f docker-compose.yml config > docker-compose.resolved.yml
+ python ../.github/workflows/scripts/docker-ci-fix-compose-build-cache.py \
+ --source docker-compose.resolved.yml \
+ --cache-from "type=gha" \
+ --cache-to "type=gha,mode=max" \
+ ...
+ docker buildx bake --allow=fs.read=.. -f docker-compose.resolved.yml --load
+```
+
+---
+
+## Proposed Changes
+
+### 1. Update pnpm caching in `claude.yml`
+
+**Before:**
+- Manual cache key generation
+- Separate `actions/cache` step
+- Manual pnpm store directory config
+
+**After:**
+- Use `setup-node` built-in `cache: "pnpm"` option
+- Remove manual cache step
+- Keep `corepack enable` before `setup-node`
+
+### 2. Update Docker build in `claude.yml`
+
+**Before:**
+- Manual Docker layer caching via `actions/cache` with `/tmp/.buildx-cache`
+- Simple `docker compose build`
+
+**After:**
+- Use `crazy-max/ghaction-github-runtime@v3` to expose GHA cache
+- Use `docker-ci-fix-compose-build-cache.py` script
+- Build with `docker buildx bake`
+
+### 3. Apply same changes to other Claude workflows
+
+- `claude-dependabot.yml` - Check if it has similar patterns
+- `claude-ci-failure-auto-fix.yml` - Check if it has similar patterns
+- `copilot-setup-steps.yml` - Reusable workflow, may be the source of truth
+
+---
+
+## Files to Modify
+
+1. `.github/workflows/claude.yml`
+2. `.github/workflows/claude-dependabot.yml` (if applicable)
+3. `.github/workflows/claude-ci-failure-auto-fix.yml` (if applicable)
+
+## Dependencies
+
+- PR #12090 must be merged first (provides the `docker-ci-fix-compose-build-cache.py` script)
+- Backend Dockerfile optimizations (already in PR #12090)
+
+---
+
+## Test Plan
+
+1. Create PR with changes
+2. Trigger Claude workflow manually or via `@claude` mention on a test issue
+3. Compare CI runtime before/after
+4. Verify Claude agent still works correctly (can checkout, build, run tests)
+
+---
+
+## Risk Assessment
+
+**Low risk:**
+- These are CI infrastructure changes, not code changes
+- If caching fails, builds fall back to uncached (slower but works)
+- Changes mirror proven patterns from PR #12090
+
+---
+
+## Questions for Reviewer
+
+1. Should we wait for PR #12090 to merge before creating this PR?
+2. Does `copilot-setup-steps.yml` need updating, or is it a separate concern?
+3. Any concerns about cache key collisions between frontend E2E and Claude workflows?
+
+---
+
+## Verified
+
+- ✅ **`claude-dependabot.yml`**: Has same pnpm caching pattern as `claude.yml` (manual `actions/cache`) — NEEDS UPDATE
+- ✅ **`claude-ci-failure-auto-fix.yml`**: Simple workflow with no pnpm or Docker caching — NO CHANGES NEEDED
+- ✅ **Script path**: `docker-ci-fix-compose-build-cache.py` will be at `.github/workflows/scripts/` after PR #12090 merges
+- ✅ **Test seed caching**: NOT APPLICABLE — Claude workflows spin up a dev environment but don't run E2E tests with pre-seeded data. The seed caching in PR #12090 is specific to the frontend E2E test suite which needs consistent test data. Claude just needs the services running.