refactor(platform): replace custom LLM converter with CoPilot agent-generator

Instead of a single-shot LLM call that reinvents agent generation,
the workflow import now:
1. Parses the external workflow (format detection + describers — unchanged)
2. Builds a structured CoPilot prompt from the WorkflowDescription
3. Returns the prompt to the frontend
4. Frontend redirects to /copilot with auto-submit

This reuses CoPilot's battle-tested agentic pipeline (multi-turn tool use,
block schema inspection, AgentFixer, AgentValidator) for reliable conversion.

Removed: custom LLM client, block catalog builder, retry logic, save logic.
The converter.py is now ~50 lines (prompt builder) instead of ~260.
This commit is contained in:
Zamil Majdy
2026-03-17 13:45:16 +07:00
parent 5c8d6271be
commit f101b11e25
7 changed files with 93 additions and 427 deletions

View File

@@ -1,13 +1,13 @@
"""API endpoint for importing external workflows."""
"""API endpoint for importing external workflows via CoPilot."""
import logging
from typing import Annotated, Any
from typing import Any
import pydantic
from autogpt_libs.auth import get_user_id, requires_user
from autogpt_libs.auth import requires_user
from fastapi import APIRouter, HTTPException, Security
from backend.copilot.workflow_import.converter import convert_workflow
from backend.copilot.workflow_import.converter import build_copilot_prompt
from backend.copilot.workflow_import.describers import describe_workflow
from backend.copilot.workflow_import.format_detector import (
SourcePlatform,
@@ -25,7 +25,6 @@ class ImportWorkflowRequest(pydantic.BaseModel):
workflow_json: dict[str, Any] | None = None
template_url: str | None = None
save: bool = True
@pydantic.model_validator(mode="after")
def check_exactly_one_source(self) -> "ImportWorkflowRequest":
@@ -41,14 +40,15 @@ class ImportWorkflowRequest(pydantic.BaseModel):
class ImportWorkflowResponse(pydantic.BaseModel):
"""Response from importing an external workflow."""
"""Response from parsing an external workflow.
graph: dict[str, Any]
graph_id: str | None = None
library_agent_id: str | None = None
Returns a CoPilot prompt that the frontend uses to redirect the user
to CoPilot, where the agentic agent-generator handles the conversion.
"""
copilot_prompt: str
source_format: str
source_name: str
conversion_notes: list[str] = []
@router.post(
@@ -58,14 +58,12 @@ class ImportWorkflowResponse(pydantic.BaseModel):
)
async def import_workflow(
request: ImportWorkflowRequest,
user_id: Annotated[str, Security(get_user_id)],
) -> ImportWorkflowResponse:
"""Import a workflow from another automation platform and convert it to an
AutoGPT agent.
"""Parse an external workflow and return a CoPilot prompt.
Accepts either raw workflow JSON or a template URL (n8n only for now).
The workflow is parsed, described, and then converted to an AutoGPT graph
using LLM-powered block mapping.
The workflow is parsed and described, then a structured prompt is returned
for CoPilot's agent-generator to handle the actual conversion.
"""
# Step 1: Get the raw workflow JSON
if request.template_url is not None:
@@ -92,42 +90,11 @@ async def import_workflow(
# Step 3: Describe the workflow
desc = describe_workflow(workflow_json, fmt)
# Step 4: Convert to AutoGPT agent
try:
agent_json, conversion_notes = await convert_workflow(desc)
except ValueError as e:
raise HTTPException(
status_code=502,
detail=f"Workflow conversion failed: {e}",
) from e
# Step 5: Optionally save
graph_id = None
library_agent_id = None
if request.save:
from backend.copilot.tools.agent_generator.core import save_agent_to_library
try:
created_graph, library_agent = await save_agent_to_library(
agent_json, user_id
)
graph_id = created_graph.id
library_agent_id = library_agent.id
conversion_notes.append(f"Agent saved as '{created_graph.name}'")
except Exception as e:
logger.error("Failed to save imported agent: %s", e, exc_info=True)
raise HTTPException(
status_code=500,
detail="Workflow was converted but could not be saved. "
"Please try again.",
) from e
# Step 4: Build CoPilot prompt
prompt = build_copilot_prompt(desc)
return ImportWorkflowResponse(
graph=agent_json,
graph_id=graph_id,
library_agent_id=library_agent_id,
copilot_prompt=prompt,
source_format=fmt.value,
source_name=desc.name,
conversion_notes=conversion_notes,
)

View File

@@ -1,6 +1,6 @@
"""Tests for workflow_import.py API endpoint."""
from unittest.mock import AsyncMock, MagicMock
from unittest.mock import AsyncMock
import fastapi
import pytest
@@ -63,87 +63,56 @@ def setup_app_auth(mock_jwt_user):
app.dependency_overrides.clear()
@pytest.fixture()
def mock_converter(mocker):
"""Mock the LLM converter to avoid actual LLM calls."""
agent_json = {
"name": "Converted Agent",
"description": "Test agent",
"version": 1,
"is_active": True,
"nodes": [],
"links": [],
}
return mocker.patch(
"backend.api.features.workflow_import.convert_workflow",
new_callable=AsyncMock,
return_value=(agent_json, ["Applied 2 auto-fixes"]),
)
@pytest.fixture()
def mock_save(mocker):
"""Mock save_agent_to_library."""
graph = MagicMock()
graph.id = "graph-123"
graph.name = "Converted Agent"
library_agent = MagicMock()
library_agent.id = "lib-456"
return mocker.patch(
"backend.copilot.tools.agent_generator.core.save_agent_to_library",
new_callable=AsyncMock,
return_value=(graph, library_agent),
)
class TestImportWorkflow:
def test_import_n8n_workflow(self, mock_converter, mock_save):
def test_import_n8n_workflow(self):
response = client.post(
"/workflow",
json={"workflow_json": N8N_WORKFLOW, "save": True},
json={"workflow_json": N8N_WORKFLOW},
)
assert response.status_code == 200
data = response.json()
assert data["source_format"] == "n8n"
assert data["source_name"] == "Email on Webhook"
assert data["graph_id"] == "graph-123"
mock_converter.assert_called_once()
assert "copilot_prompt" in data
assert "n8n" in data["copilot_prompt"]
assert "Email on Webhook" in data["copilot_prompt"]
def test_import_make_workflow(self, mock_converter, mock_save):
def test_import_make_workflow(self):
response = client.post(
"/workflow",
json={"workflow_json": MAKE_WORKFLOW, "save": True},
json={"workflow_json": MAKE_WORKFLOW},
)
assert response.status_code == 200
data = response.json()
assert data["source_format"] == "make"
assert data["source_name"] == "Sheets to Calendar"
assert "copilot_prompt" in data
def test_import_zapier_workflow(self, mock_converter, mock_save):
def test_import_zapier_workflow(self):
response = client.post(
"/workflow",
json={"workflow_json": ZAPIER_WORKFLOW, "save": True},
json={"workflow_json": ZAPIER_WORKFLOW},
)
assert response.status_code == 200
data = response.json()
assert data["source_format"] == "zapier"
assert data["source_name"] == "Gmail to Slack"
assert "copilot_prompt" in data
def test_import_without_save(self, mock_converter, mock_save):
def test_prompt_includes_steps(self):
response = client.post(
"/workflow",
json={"workflow_json": N8N_WORKFLOW, "save": False},
json={"workflow_json": N8N_WORKFLOW},
)
assert response.status_code == 200
data = response.json()
assert data["graph_id"] is None
assert data["library_agent_id"] is None
mock_save.assert_not_called()
prompt = response.json()["copilot_prompt"]
# Should include step details from the workflow
assert "Webhook" in prompt or "webhook" in prompt
assert "Gmail" in prompt or "gmail" in prompt
def test_no_source_provided(self):
response = client.post(
"/workflow",
json={"save": True},
json={},
)
assert response.status_code == 422 # Pydantic validation error
@@ -153,47 +122,18 @@ class TestImportWorkflow:
json={
"workflow_json": N8N_WORKFLOW,
"template_url": "https://n8n.io/workflows/123",
"save": True,
},
)
assert response.status_code == 422
def test_unknown_format_returns_400(self, mock_converter):
def test_unknown_format_returns_400(self):
response = client.post(
"/workflow",
json={"workflow_json": {"foo": "bar"}, "save": False},
json={"workflow_json": {"foo": "bar"}},
)
assert response.status_code == 400
assert "Could not detect workflow format" in response.json()["detail"]
def test_converter_failure_returns_502(self, mocker):
mocker.patch(
"backend.api.features.workflow_import.convert_workflow",
new_callable=AsyncMock,
side_effect=ValueError("LLM call failed"),
)
response = client.post(
"/workflow",
json={"workflow_json": N8N_WORKFLOW, "save": False},
)
assert response.status_code == 502
assert "LLM call failed" in response.json()["detail"]
def test_save_failure_returns_500(self, mock_converter, mocker):
mocker.patch(
"backend.copilot.tools.agent_generator.core.save_agent_to_library",
new_callable=AsyncMock,
side_effect=RuntimeError("DB connection failed"),
)
response = client.post(
"/workflow",
json={"workflow_json": N8N_WORKFLOW, "save": True},
)
assert response.status_code == 500
assert "could not be saved" in response.json()["detail"]
# Ensure internal error details are not leaked
assert "DB connection failed" not in response.json()["detail"]
def test_url_fetch_bad_url_returns_400(self, mocker):
mocker.patch(
"backend.api.features.workflow_import.fetch_n8n_template",
@@ -202,7 +142,7 @@ class TestImportWorkflow:
)
response = client.post(
"/workflow",
json={"template_url": "https://bad-url.com", "save": False},
json={"template_url": "https://bad-url.com"},
)
assert response.status_code == 400
assert "Invalid URL format" in response.json()["detail"]
@@ -215,22 +155,19 @@ class TestImportWorkflow:
)
response = client.post(
"/workflow",
json={"template_url": "https://n8n.io/workflows/123", "save": False},
json={"template_url": "https://n8n.io/workflows/123"},
)
assert response.status_code == 502
assert "n8n API returned 500" in response.json()["detail"]
def test_response_model_shape(self, mock_converter, mock_save):
def test_response_model_shape(self):
response = client.post(
"/workflow",
json={"workflow_json": N8N_WORKFLOW, "save": True},
json={"workflow_json": N8N_WORKFLOW},
)
data = response.json()
# Verify all expected fields are present
assert "graph" in data
assert "graph_id" in data
assert "library_agent_id" in data
assert "copilot_prompt" in data
assert "source_format" in data
assert "source_name" in data
assert "conversion_notes" in data
assert isinstance(data["conversion_notes"], list)
assert isinstance(data["copilot_prompt"], str)
assert len(data["copilot_prompt"]) > 0

View File

@@ -1,15 +1,16 @@
"""Workflow import module.
Converts workflows from n8n, Make.com, and Zapier into AutoGPT agent graphs.
Parses workflows from n8n, Make.com, and Zapier into structured descriptions,
then builds CoPilot prompts for the agentic agent-generator to handle conversion.
"""
from .converter import convert_workflow
from .converter import build_copilot_prompt
from .format_detector import SourcePlatform, detect_format
from .models import WorkflowDescription
__all__ = [
"SourcePlatform",
"WorkflowDescription",
"convert_workflow",
"build_copilot_prompt",
"detect_format",
]

View File

@@ -1,262 +1,49 @@
"""LLM-powered conversion of external workflows to AutoGPT agent graphs.
"""Build a CoPilot prompt from a WorkflowDescription.
Uses the CoPilot's LLM client to generate AutoGPT agent JSON from a structured
WorkflowDescription, then validates and fixes via the existing pipeline.
Instead of a custom single-shot LLM conversion, we generate a structured
prompt that CoPilot's existing agentic agent-generator handles. This reuses
the multi-turn tool-use pipeline (find_block, create_agent, fixer, validator)
for reliable workflow-to-agent conversion.
"""
import functools
import json
import logging
import pathlib
import threading
from typing import Any
from backend.copilot.config import ChatConfig
from backend.copilot.tools.agent_generator.blocks import get_blocks_as_dicts
from .models import WorkflowDescription
logger = logging.getLogger(__name__)
_AGENT_GUIDE_PATH = (
pathlib.Path(__file__).resolve().parents[1] / "sdk" / "agent_generation_guide.md"
)
def build_copilot_prompt(desc: WorkflowDescription) -> str:
"""Build a CoPilot prompt from a parsed WorkflowDescription.
_MAX_RETRIES = 1
# Cached LLM client — created once on first use, guarded by lock
_llm_client: Any = None
_llm_config: ChatConfig | None = None
_llm_lock = threading.Lock()
def _get_llm_client() -> tuple[Any, ChatConfig]:
"""Return a cached LangfuseAsyncOpenAI client (thread-safe)."""
global _llm_client, _llm_config
if _llm_client is not None:
assert _llm_config is not None
return _llm_client, _llm_config
with _llm_lock:
if _llm_client is None:
from langfuse.openai import (
AsyncOpenAI as LangfuseAsyncOpenAI, # pyright: ignore[reportPrivateImportUsage]
)
_llm_config = ChatConfig()
_llm_client = LangfuseAsyncOpenAI(
api_key=_llm_config.api_key, base_url=_llm_config.base_url
)
assert _llm_config is not None
return _llm_client, _llm_config
@functools.lru_cache(maxsize=1)
def _load_agent_guide() -> str:
"""Load the agent generation guide markdown (cached after first read)."""
return _AGENT_GUIDE_PATH.read_text()
_MAX_CATALOG_BLOCKS = 200
_MAX_CATALOG_CHARS = 50_000
def _build_block_catalog(blocks: list[dict[str, Any]]) -> str:
"""Build a compact block catalog string for the LLM prompt.
Caps at _MAX_CATALOG_BLOCKS entries / _MAX_CATALOG_CHARS to avoid
exceeding the LLM context window.
"""
lines: list[str] = []
total_len = 0
for b in blocks[:_MAX_CATALOG_BLOCKS]:
desc = (b.get("description") or "")[:200]
line = f"- **{b['name']}** (id: `{b['id']}`): {desc}"
total_len += len(line) + 1
if total_len > _MAX_CATALOG_CHARS:
lines.append(f"... and {len(blocks) - len(lines)} more blocks")
break
lines.append(line)
return "\n".join(lines)
def _build_conversion_prompt(
desc: WorkflowDescription,
block_catalog: str,
agent_guide: str,
error_feedback: str | None = None,
) -> list[dict[str, str]]:
"""Build the messages for the LLM conversion call."""
steps_text = ""
for step in desc.steps:
conns = (
f" -> connects to steps {step.connections_to}"
if step.connections_to
else ""
)
params_str = (
f" (params: {json.dumps(step.parameters, default=str)[:300]})"
if step.parameters
else ""
)
steps_text += (
f" {step.order}. [{step.service}] {step.action}{params_str}{conns}\n"
)
system_msg = f"""You are an expert at converting automation workflows into AutoGPT agent graphs.
Your task: Convert the workflow described below into a valid AutoGPT agent JSON.
## Agent Generation Guide
{agent_guide}
## Available AutoGPT Blocks
{block_catalog}
## Instructions
1. Map each workflow step to the most appropriate AutoGPT block(s)
2. If no exact block match exists, use the closest alternative (e.g., HttpRequestBlock for generic API calls)
3. Every agent MUST have at least one AgentInputBlock and one AgentOutputBlock
4. Wire blocks together with links matching the original workflow's data flow
5. Set meaningful input_default values based on the workflow's parameters
6. Position nodes with 800+ X-unit spacing
7. Return ONLY valid JSON — no markdown fences, no explanation"""
user_msg = f"""Convert this {desc.source_format.value} workflow to an AutoGPT agent:
**Name**: {desc.name}
**Description**: {desc.description}
**Trigger**: {desc.trigger_type or 'Manual'}
**Steps**:
{steps_text}
Generate the complete AutoGPT agent JSON with nodes and links."""
if error_feedback:
user_msg += f"""
IMPORTANT: Your previous attempt had validation errors. Fix them:
{error_feedback}"""
return [
{"role": "system", "content": system_msg},
{"role": "user", "content": user_msg},
]
async def convert_workflow(
desc: WorkflowDescription,
) -> tuple[dict[str, Any], list[str]]:
"""Convert a WorkflowDescription into an AutoGPT agent JSON.
The prompt describes the external workflow in enough detail for CoPilot's
agent-generator to recreate it as an AutoGPT agent graph.
Args:
desc: Structured description of the source workflow.
Returns:
Tuple of (agent_json dict, conversion_notes list).
Raises:
ValueError: If conversion fails after retries.
A user-facing prompt string for CoPilot.
"""
client, config = _get_llm_client()
blocks = get_blocks_as_dicts()
block_catalog = _build_block_catalog(blocks)
agent_guide = _load_agent_guide()
conversion_notes: list[str] = []
error_feedback: str | None = None
for attempt in range(_MAX_RETRIES + 1):
messages = _build_conversion_prompt(
desc, block_catalog, agent_guide, error_feedback
steps_text = ""
for step in desc.steps:
conns = (
f" → connects to steps {step.connections_to}" if step.connections_to else ""
)
params_str = ""
if step.parameters:
truncated = json.dumps(step.parameters, default=str)[:300]
params_str = f" (params: {truncated})"
steps_text += (
f" {step.order}. [{step.service}] {step.action}{params_str}{conns}\n"
)
try:
response = await client.chat.completions.create(
model=config.model,
messages=messages, # type: ignore[arg-type]
temperature=0.2,
max_tokens=8192,
)
except Exception as e:
raise ValueError(f"LLM call failed: {e}") from e
trigger_line = f"Trigger: {desc.trigger_type}" if desc.trigger_type else ""
if not response.choices:
raise ValueError("LLM returned no choices")
raw_content = response.choices[0].message.content or ""
return f"""I want to import a workflow from {desc.source_format.value} and recreate it as an AutoGPT agent.
# Strip markdown fences if present
content = raw_content.strip()
if content.startswith("```"):
lines = content.split("\n")
# Remove opening fence line (e.g. ```json)
lines = lines[1:]
# Find closing fence and truncate everything after it
for idx, line in enumerate(lines):
if line.strip() == "```":
lines = lines[:idx]
break
content = "\n".join(lines)
**Workflow name**: {desc.name}
**Description**: {desc.description}
{trigger_line}
try:
agent_json = json.loads(content)
except json.JSONDecodeError as e:
if attempt < _MAX_RETRIES:
error_feedback = f"Invalid JSON: {e}"
conversion_notes.append(
f"Retry {attempt + 1}: LLM output was not valid JSON"
)
continue
raise ValueError(
f"LLM produced invalid JSON after {_MAX_RETRIES + 1} attempts: {e}"
) from e
# Set metadata
agent_json.setdefault("name", desc.name)
agent_json.setdefault(
"description",
f"Imported from {desc.source_format.value}: {desc.description}",
)
agent_json.setdefault("version", 1)
agent_json.setdefault("is_active", True)
# Auto-fix (lazy import to avoid heavy module load at import time)
try:
from backend.copilot.tools.agent_generator.fixer import AgentFixer
fixer = AgentFixer()
agent_json = fixer.apply_all_fixes(agent_json, blocks)
fixes = fixer.get_fixes_applied()
if fixes:
conversion_notes.append(f"Applied {len(fixes)} auto-fixes")
logger.info("Applied %d auto-fixes to imported agent", len(fixes))
except Exception as e:
logger.warning("Auto-fix failed: %s", e)
conversion_notes.append(f"Auto-fix warning: {e}")
# Validate (lazy import to avoid heavy module load at import time)
try:
from backend.copilot.tools.agent_generator.validator import AgentValidator
validator = AgentValidator()
is_valid, _ = validator.validate(agent_json, blocks)
if not is_valid:
errors = validator.errors
if attempt < _MAX_RETRIES:
error_feedback = "\n".join(f"- {e}" for e in errors[:5])
conversion_notes.append(
f"Retry {attempt + 1}: validation errors found"
)
continue
# On final attempt, return with warnings rather than failing
conversion_notes.extend(f"Validation warning: {e}" for e in errors[:5])
conversion_notes.append("Agent may need manual fixes in the builder")
except Exception as e:
logger.warning("Validation exception: %s", e)
conversion_notes.append(f"Validation could not complete: {e}")
return agent_json, conversion_notes
raise ValueError("Conversion failed after all retries")
**Steps** (from the original {desc.source_format.value} workflow):
{steps_text}
Please build an AutoGPT agent that replicates this workflow. Map each step to the most appropriate AutoGPT block(s), wire them together, and save it.""".strip()

View File

@@ -143,10 +143,10 @@ export default function LibraryImportWorkflowDialog() {
{isConverting ? (
<div className="flex items-center gap-2">
<LoadingSpinner size="small" className="text-white" />
<span>Converting workflow...</span>
<span>Parsing workflow...</span>
</div>
) : (
"Import & Convert"
"Import to CoPilot"
)}
</Button>
</Form>

View File

@@ -31,7 +31,7 @@ export function useLibraryImportWorkflowDialog() {
let body: ImportWorkflowRequest;
if (importMode === "url" && values.templateUrl) {
body = { template_url: values.templateUrl, save: true };
body = { template_url: values.templateUrl };
} else if (importMode === "file" && values.workflowFile) {
// Decode base64 file to JSON
const base64Match = values.workflowFile.match(
@@ -42,7 +42,7 @@ export function useLibraryImportWorkflowDialog() {
}
const jsonString = atob(base64Match[1]);
const workflowJson = JSON.parse(jsonString);
body = { workflow_json: workflowJson, save: true };
body = { workflow_json: workflowJson };
} else {
throw new Error("Please provide a workflow file or template URL");
}
@@ -55,21 +55,14 @@ export function useLibraryImportWorkflowDialog() {
setIsOpen(false);
form.reset();
const notes = data.conversion_notes || [];
const hasWarnings = notes.some(
(n: string) => n.includes("warning") || n.includes("Warning"),
);
toast({
title: "Workflow Imported",
description: hasWarnings
? `Imported from ${data.source_format} with warnings. Check the builder for details.`
: `Successfully imported "${data.source_name}" from ${data.source_format}`,
title: "Workflow Parsed",
description: `Detected ${data.source_format} workflow "${data.source_name}". Redirecting to CoPilot...`,
});
if (data.graph_id) {
router.push(`/build?flowID=${data.graph_id}`);
}
// Redirect to CoPilot with the prompt pre-filled and auto-submitted
const encodedPrompt = encodeURIComponent(data.copilot_prompt);
router.push(`/copilot?autosubmit=true#prompt=${encodedPrompt}`);
} catch (error) {
console.error("Import failed:", error);
toast({
@@ -77,7 +70,7 @@ export function useLibraryImportWorkflowDialog() {
description:
error instanceof Error
? error.message
: "Failed to import workflow. Please check the file format.",
: "Failed to parse workflow. Please check the file format.",
variant: "destructive",
duration: 5000,
});

View File

@@ -2924,7 +2924,7 @@
"post": {
"tags": ["v2", "import"],
"summary": "Import a workflow from another tool (n8n, Make.com, Zapier)",
"description": "Import a workflow from another automation platform and convert it to an\nAutoGPT agent.\n\nAccepts either raw workflow JSON or a template URL (n8n only for now).\nThe workflow is parsed, described, and then converted to an AutoGPT graph\nusing LLM-powered block mapping.",
"description": "Parse an external workflow and return a CoPilot prompt.\n\nAccepts either raw workflow JSON or a template URL (n8n only for now).\nThe workflow is parsed and described, then a structured prompt is returned\nfor CoPilot's agent-generator to handle the actual conversion.",
"operationId": "postV2Import a workflow from another tool (n8n, make.com, zapier)",
"requestBody": {
"content": {
@@ -10023,8 +10023,7 @@
"template_url": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Template Url"
},
"save": { "type": "boolean", "title": "Save", "default": true }
}
},
"type": "object",
"title": "ImportWorkflowRequest",
@@ -10032,32 +10031,14 @@
},
"ImportWorkflowResponse": {
"properties": {
"graph": {
"additionalProperties": true,
"type": "object",
"title": "Graph"
},
"graph_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Graph Id"
},
"library_agent_id": {
"anyOf": [{ "type": "string" }, { "type": "null" }],
"title": "Library Agent Id"
},
"copilot_prompt": { "type": "string", "title": "Copilot Prompt" },
"source_format": { "type": "string", "title": "Source Format" },
"source_name": { "type": "string", "title": "Source Name" },
"conversion_notes": {
"items": { "type": "string" },
"type": "array",
"title": "Conversion Notes",
"default": []
}
"source_name": { "type": "string", "title": "Source Name" }
},
"type": "object",
"required": ["graph", "source_format", "source_name"],
"required": ["copilot_prompt", "source_format", "source_name"],
"title": "ImportWorkflowResponse",
"description": "Response from importing an external workflow."
"description": "Response from parsing an external workflow.\n\nReturns a CoPilot prompt that the frontend uses to redirect the user\nto CoPilot, where the agentic agent-generator handles the conversion."
},
"InputValidationErrorResponse": {
"properties": {