mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-06 21:05:13 -05:00
Compare commits
26 Commits
fix/code-r
...
ntindle/go
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4f81bc4fc | ||
|
|
c5abc01f25 | ||
|
|
8b7053c1de | ||
|
|
e00c1202ad | ||
|
|
8fddc9d71f | ||
|
|
3d1cd03fc8 | ||
|
|
e7ebe42306 | ||
|
|
e0fab7e34e | ||
|
|
29ee85c86f | ||
|
|
85b6520710 | ||
|
|
bfa942e032 | ||
|
|
11256076d8 | ||
|
|
3ca2387631 | ||
|
|
ed07f02738 | ||
|
|
b121030c94 | ||
|
|
c22c18374d | ||
|
|
e40233a3ac | ||
|
|
3ae5eabf9d | ||
|
|
a077ba9f03 | ||
|
|
5401d54eaa | ||
|
|
5ac89d7c0b | ||
|
|
4f908d5cb3 | ||
|
|
c1aa684743 | ||
|
|
7e5b84cc5c | ||
|
|
09cb313211 | ||
|
|
c026485023 |
16
.github/workflows/platform-frontend-ci.yml
vendored
16
.github/workflows/platform-frontend-ci.yml
vendored
@@ -27,11 +27,20 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||
components-changed: ${{ steps.filter.outputs.components }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check for component changes
|
||||
uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
components:
|
||||
- 'autogpt_platform/frontend/src/components/**'
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
@@ -90,8 +99,11 @@ jobs:
|
||||
chromatic:
|
||||
runs-on: ubuntu-latest
|
||||
needs: setup
|
||||
# Only run on dev branch pushes or PRs targeting dev
|
||||
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
|
||||
# Disabled: to re-enable, remove 'false &&' from the condition below
|
||||
if: >-
|
||||
false
|
||||
&& (github.ref == 'refs/heads/dev' || github.base_ref == 'dev')
|
||||
&& needs.setup.outputs.components-changed == 'true'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
@@ -152,6 +152,7 @@ REPLICATE_API_KEY=
|
||||
REVID_API_KEY=
|
||||
SCREENSHOTONE_API_KEY=
|
||||
UNREAL_SPEECH_API_KEY=
|
||||
ELEVENLABS_API_KEY=
|
||||
|
||||
# Data & Search Services
|
||||
E2B_API_KEY=
|
||||
|
||||
3
autogpt_platform/backend/.gitignore
vendored
3
autogpt_platform/backend/.gitignore
vendored
@@ -19,3 +19,6 @@ load-tests/*.json
|
||||
load-tests/*.log
|
||||
load-tests/node_modules/*
|
||||
migrations/*/rollback*.sql
|
||||
|
||||
# Workspace files
|
||||
workspaces/
|
||||
|
||||
@@ -62,10 +62,12 @@ ENV POETRY_HOME=/opt/poetry \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
|
||||
# Install Python without upgrading system-managed packages
|
||||
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3.13 \
|
||||
python3-pip \
|
||||
ffmpeg \
|
||||
imagemagick \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy only necessary files from builder
|
||||
|
||||
@@ -11,7 +11,7 @@ class ChatConfig(BaseSettings):
|
||||
|
||||
# OpenAI API Configuration
|
||||
model: str = Field(
|
||||
default="anthropic/claude-opus-4.5", description="Default model to use"
|
||||
default="anthropic/claude-opus-4.6", description="Default model to use"
|
||||
)
|
||||
title_model: str = Field(
|
||||
default="openai/gpt-4o-mini",
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import uuid as uuid_module
|
||||
from asyncio import CancelledError
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
import openai
|
||||
|
||||
from backend.util.prompt import compress_context
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.util.prompt import CompressResult
|
||||
|
||||
@@ -36,7 +33,7 @@ from backend.data.understanding import (
|
||||
get_business_understanding,
|
||||
)
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.settings import AppEnvironment, Settings
|
||||
|
||||
from . import db as chat_db
|
||||
from . import stream_registry
|
||||
@@ -225,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str:
|
||||
try:
|
||||
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
||||
# Use asyncio.to_thread to avoid blocking the event loop
|
||||
# In non-production environments, fetch the latest prompt version
|
||||
# instead of the production-labeled version for easier testing
|
||||
label = (
|
||||
None
|
||||
if settings.config.app_env == AppEnvironment.PRODUCTION
|
||||
else "latest"
|
||||
)
|
||||
prompt = await asyncio.to_thread(
|
||||
langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0
|
||||
langfuse.get_prompt,
|
||||
config.langfuse_prompt_name,
|
||||
label=label,
|
||||
cache_ttl_seconds=0,
|
||||
)
|
||||
return prompt.compile(users_information=context)
|
||||
except Exception as e:
|
||||
@@ -470,6 +477,8 @@ async def stream_chat_completion(
|
||||
should_retry = False
|
||||
|
||||
# Generate unique IDs for AI SDK protocol
|
||||
import uuid as uuid_module
|
||||
|
||||
message_id = str(uuid_module.uuid4())
|
||||
text_block_id = str(uuid_module.uuid4())
|
||||
|
||||
@@ -619,6 +628,9 @@ async def stream_chat_completion(
|
||||
total_tokens=chunk.totalTokens,
|
||||
)
|
||||
)
|
||||
elif isinstance(chunk, StreamHeartbeat):
|
||||
# Pass through heartbeat to keep SSE connection alive
|
||||
yield chunk
|
||||
else:
|
||||
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
||||
|
||||
@@ -827,6 +839,10 @@ async def _manage_context_window(
|
||||
Returns:
|
||||
CompressResult with compacted messages and metadata
|
||||
"""
|
||||
import openai
|
||||
|
||||
from backend.util.prompt import compress_context
|
||||
|
||||
# Convert messages to dict format
|
||||
messages_dict = []
|
||||
for msg in messages:
|
||||
@@ -1137,6 +1153,8 @@ async def _yield_tool_call(
|
||||
KeyError: If expected tool call fields are missing
|
||||
TypeError: If tool call structure is invalid
|
||||
"""
|
||||
import uuid as uuid_module
|
||||
|
||||
tool_name = tool_calls[yield_idx]["function"]["name"]
|
||||
tool_call_id = tool_calls[yield_idx]["id"]
|
||||
|
||||
@@ -1757,6 +1775,8 @@ async def _generate_llm_continuation_with_streaming(
|
||||
after a tool result is saved. Chunks are published to the stream registry
|
||||
so reconnecting clients can receive them.
|
||||
"""
|
||||
import uuid as uuid_module
|
||||
|
||||
try:
|
||||
# Load fresh session from DB (bypass cache to get the updated tool result)
|
||||
await invalidate_session_cache(session_id)
|
||||
@@ -1792,6 +1812,10 @@ async def _generate_llm_continuation_with_streaming(
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
|
||||
# Make streaming LLM call (no tools - just text response)
|
||||
from typing import cast
|
||||
|
||||
from openai.types.chat import ChatCompletionMessageParam
|
||||
|
||||
# Generate unique IDs for AI SDK protocol
|
||||
message_id = str(uuid_module.uuid4())
|
||||
text_block_id = str(uuid_module.uuid4())
|
||||
|
||||
@@ -7,15 +7,7 @@ from typing import Any, NotRequired, TypedDict
|
||||
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data.graph import (
|
||||
Graph,
|
||||
Link,
|
||||
Node,
|
||||
create_graph,
|
||||
get_graph,
|
||||
get_graph_all_versions,
|
||||
get_store_listed_graphs,
|
||||
)
|
||||
from backend.data.graph import Graph, Link, Node, get_graph, get_store_listed_graphs
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
|
||||
from .service import (
|
||||
@@ -28,8 +20,6 @@ from .service import (
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565"
|
||||
|
||||
|
||||
class ExecutionSummary(TypedDict):
|
||||
"""Summary of a single execution for quality assessment."""
|
||||
@@ -669,45 +659,6 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph:
|
||||
)
|
||||
|
||||
|
||||
def _reassign_node_ids(graph: Graph) -> None:
|
||||
"""Reassign all node and link IDs to new UUIDs.
|
||||
|
||||
This is needed when creating a new version to avoid unique constraint violations.
|
||||
"""
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
|
||||
|
||||
for node in graph.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
for link in graph.links:
|
||||
link.id = str(uuid.uuid4())
|
||||
if link.source_id in id_map:
|
||||
link.source_id = id_map[link.source_id]
|
||||
if link.sink_id in id_map:
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
|
||||
def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None:
|
||||
"""Populate user_id in AgentExecutorBlock nodes.
|
||||
|
||||
The external agent generator creates AgentExecutorBlock nodes with empty user_id.
|
||||
This function fills in the actual user_id so sub-agents run with correct permissions.
|
||||
|
||||
Args:
|
||||
agent_json: Agent JSON dict (modified in place)
|
||||
user_id: User ID to set
|
||||
"""
|
||||
for node in agent_json.get("nodes", []):
|
||||
if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID:
|
||||
input_default = node.get("input_default") or {}
|
||||
if not input_default.get("user_id"):
|
||||
input_default["user_id"] = user_id
|
||||
node["input_default"] = input_default
|
||||
logger.debug(
|
||||
f"Set user_id for AgentExecutorBlock node {node.get('id')}"
|
||||
)
|
||||
|
||||
|
||||
async def save_agent_to_library(
|
||||
agent_json: dict[str, Any], user_id: str, is_update: bool = False
|
||||
) -> tuple[Graph, Any]:
|
||||
@@ -721,35 +672,10 @@ async def save_agent_to_library(
|
||||
Returns:
|
||||
Tuple of (created Graph, LibraryAgent)
|
||||
"""
|
||||
# Populate user_id in AgentExecutorBlock nodes before conversion
|
||||
_populate_agent_executor_user_ids(agent_json, user_id)
|
||||
|
||||
graph = json_to_graph(agent_json)
|
||||
|
||||
if is_update:
|
||||
if graph.id:
|
||||
existing_versions = await get_graph_all_versions(graph.id, user_id)
|
||||
if existing_versions:
|
||||
latest_version = max(v.version for v in existing_versions)
|
||||
graph.version = latest_version + 1
|
||||
_reassign_node_ids(graph)
|
||||
logger.info(f"Updating agent {graph.id} to version {graph.version}")
|
||||
else:
|
||||
graph.id = str(uuid.uuid4())
|
||||
graph.version = 1
|
||||
_reassign_node_ids(graph)
|
||||
logger.info(f"Creating new agent with ID {graph.id}")
|
||||
|
||||
created_graph = await create_graph(graph, user_id)
|
||||
|
||||
library_agents = await library_db.create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
return created_graph, library_agents[0]
|
||||
return await library_db.update_graph_in_library(graph, user_id)
|
||||
return await library_db.create_graph_in_library(graph, user_id)
|
||||
|
||||
|
||||
def graph_to_json(graph: Graph) -> dict[str, Any]:
|
||||
|
||||
@@ -206,9 +206,9 @@ async def search_agents(
|
||||
]
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Try different keywords or browse the marketplace."
|
||||
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else f"No agents matching '{query}' found in your library."
|
||||
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
return NoResultsResponse(
|
||||
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
||||
@@ -224,10 +224,10 @@ async def search_agents(
|
||||
message = (
|
||||
"Now you have found some options for the user to choose from. "
|
||||
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
||||
"Please ask the user if they would like to use any of these agents."
|
||||
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute."
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
|
||||
)
|
||||
|
||||
return AgentsFoundResponse(
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_generator import (
|
||||
@@ -30,26 +28,6 @@ from .models import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CreateAgentInput(BaseModel):
|
||||
"""Input parameters for the create_agent tool."""
|
||||
|
||||
description: str = ""
|
||||
context: str = ""
|
||||
save: bool = True
|
||||
# Internal async processing params (passed by long-running tool handler)
|
||||
_operation_id: str | None = None
|
||||
_task_id: str | None = None
|
||||
|
||||
@field_validator("description", "context", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str:
|
||||
"""Strip whitespace from string fields."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow _operation_id, _task_id from kwargs
|
||||
|
||||
|
||||
class CreateAgentTool(BaseTool):
|
||||
"""Tool for creating agents from natural language descriptions."""
|
||||
|
||||
@@ -107,7 +85,7 @@ class CreateAgentTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the create_agent tool.
|
||||
|
||||
@@ -116,14 +94,16 @@ class CreateAgentTool(BaseTool):
|
||||
2. Generate agent JSON (external service handles fixing and validation)
|
||||
3. Preview or save based on the save parameter
|
||||
"""
|
||||
params = CreateAgentInput(**kwargs)
|
||||
description = kwargs.get("description", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
# Extract async processing params
|
||||
# Extract async processing params (passed by long-running tool handler)
|
||||
operation_id = kwargs.get("_operation_id")
|
||||
task_id = kwargs.get("_task_id")
|
||||
|
||||
if not params.description:
|
||||
if not description:
|
||||
return ErrorResponse(
|
||||
message="Please provide a description of what the agent should do.",
|
||||
error="Missing description parameter",
|
||||
@@ -135,7 +115,7 @@ class CreateAgentTool(BaseTool):
|
||||
try:
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=params.description,
|
||||
search_query=description,
|
||||
include_marketplace=True,
|
||||
)
|
||||
logger.debug(
|
||||
@@ -146,7 +126,7 @@ class CreateAgentTool(BaseTool):
|
||||
|
||||
try:
|
||||
decomposition_result = await decompose_goal(
|
||||
params.description, params.context, library_agents
|
||||
description, context, library_agents
|
||||
)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
@@ -162,7 +142,7 @@ class CreateAgentTool(BaseTool):
|
||||
return ErrorResponse(
|
||||
message="Failed to analyze the goal. The agent generation service may be unavailable. Please try again.",
|
||||
error="decomposition_failed",
|
||||
details={"description": params.description[:100]},
|
||||
details={"description": description[:100]},
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -178,7 +158,7 @@ class CreateAgentTool(BaseTool):
|
||||
message=user_message,
|
||||
error=f"decomposition_failed:{error_type}",
|
||||
details={
|
||||
"description": params.description[:100],
|
||||
"description": description[:100],
|
||||
"service_error": error_msg,
|
||||
"error_type": error_type,
|
||||
},
|
||||
@@ -264,7 +244,7 @@ class CreateAgentTool(BaseTool):
|
||||
return ErrorResponse(
|
||||
message="Failed to generate the agent. The agent generation service may be unavailable. Please try again.",
|
||||
error="generation_failed",
|
||||
details={"description": params.description[:100]},
|
||||
details={"description": description[:100]},
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -286,7 +266,7 @@ class CreateAgentTool(BaseTool):
|
||||
message=user_message,
|
||||
error=f"generation_failed:{error_type}",
|
||||
details={
|
||||
"description": params.description[:100],
|
||||
"description": description[:100],
|
||||
"service_error": error_msg,
|
||||
"error_type": error_type,
|
||||
},
|
||||
@@ -311,7 +291,7 @@ class CreateAgentTool(BaseTool):
|
||||
node_count = len(agent_json.get("nodes", []))
|
||||
link_count = len(agent_json.get("links", []))
|
||||
|
||||
if not params.save:
|
||||
if not save:
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've generated an agent called '{agent_name}' with {node_count} blocks. "
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.api.features.store.exceptions import AgentNotFoundError
|
||||
@@ -29,23 +27,6 @@ from .models import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CustomizeAgentInput(BaseModel):
|
||||
"""Input parameters for the customize_agent tool."""
|
||||
|
||||
agent_id: str = ""
|
||||
modifications: str = ""
|
||||
context: str = ""
|
||||
save: bool = True
|
||||
|
||||
@field_validator("agent_id", "modifications", "context", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str:
|
||||
"""Strip whitespace from string fields."""
|
||||
if isinstance(v, str):
|
||||
return v.strip()
|
||||
return v if v is not None else ""
|
||||
|
||||
|
||||
class CustomizeAgentTool(BaseTool):
|
||||
"""Tool for customizing marketplace/template agents using natural language."""
|
||||
|
||||
@@ -111,7 +92,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the customize_agent tool.
|
||||
|
||||
@@ -121,17 +102,20 @@ class CustomizeAgentTool(BaseTool):
|
||||
3. Call customize_template with the modification request
|
||||
4. Preview or save based on the save parameter
|
||||
"""
|
||||
params = CustomizeAgentInput(**kwargs)
|
||||
agent_id = kwargs.get("agent_id", "").strip()
|
||||
modifications = kwargs.get("modifications", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not params.agent_id:
|
||||
if not agent_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide the marketplace agent ID (e.g., 'creator/agent-name').",
|
||||
error="missing_agent_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.modifications:
|
||||
if not modifications:
|
||||
return ErrorResponse(
|
||||
message="Please describe how you want to customize this agent.",
|
||||
error="missing_modifications",
|
||||
@@ -139,11 +123,11 @@ class CustomizeAgentTool(BaseTool):
|
||||
)
|
||||
|
||||
# Parse agent_id in format "creator/slug"
|
||||
parts = [p.strip() for p in params.agent_id.split("/")]
|
||||
parts = [p.strip() for p in agent_id.split("/")]
|
||||
if len(parts) != 2 or not parts[0] or not parts[1]:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Invalid agent ID format: '{params.agent_id}'. "
|
||||
f"Invalid agent ID format: '{agent_id}'. "
|
||||
"Expected format is 'creator/agent-name' "
|
||||
"(e.g., 'autogpt/newsletter-writer')."
|
||||
),
|
||||
@@ -161,14 +145,14 @@ class CustomizeAgentTool(BaseTool):
|
||||
except AgentNotFoundError:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Could not find marketplace agent '{params.agent_id}'. "
|
||||
f"Could not find marketplace agent '{agent_id}'. "
|
||||
"Please check the agent ID and try again."
|
||||
),
|
||||
error="agent_not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching marketplace agent {params.agent_id}: {e}")
|
||||
logger.error(f"Error fetching marketplace agent {agent_id}: {e}")
|
||||
return ErrorResponse(
|
||||
message="Failed to fetch the marketplace agent. Please try again.",
|
||||
error="fetch_error",
|
||||
@@ -178,7 +162,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
if not agent_details.store_listing_version_id:
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"The agent '{params.agent_id}' does not have an available version. "
|
||||
f"The agent '{agent_id}' does not have an available version. "
|
||||
"Please try a different agent."
|
||||
),
|
||||
error="no_version_available",
|
||||
@@ -190,7 +174,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
graph = await store_db.get_agent(agent_details.store_listing_version_id)
|
||||
template_agent = graph_to_json(graph)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching agent graph for {params.agent_id}: {e}")
|
||||
logger.error(f"Error fetching agent graph for {agent_id}: {e}")
|
||||
return ErrorResponse(
|
||||
message="Failed to fetch the agent configuration. Please try again.",
|
||||
error="graph_fetch_error",
|
||||
@@ -201,8 +185,8 @@ class CustomizeAgentTool(BaseTool):
|
||||
try:
|
||||
result = await customize_template(
|
||||
template_agent=template_agent,
|
||||
modification_request=params.modifications,
|
||||
context=params.context,
|
||||
modification_request=modifications,
|
||||
context=context,
|
||||
)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
return ErrorResponse(
|
||||
@@ -214,7 +198,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error calling customize_template for {params.agent_id}: {e}")
|
||||
logger.error(f"Error calling customize_template for {agent_id}: {e}")
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Failed to customize the agent due to a service error. "
|
||||
@@ -235,25 +219,55 @@ class CustomizeAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Handle response using match/case for cleaner pattern matching
|
||||
return await self._handle_customization_result(
|
||||
result=result,
|
||||
params=params,
|
||||
agent_details=agent_details,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
# Handle error response
|
||||
if isinstance(result, dict) and result.get("type") == "error":
|
||||
error_msg = result.get("error", "Unknown error")
|
||||
error_type = result.get("error_type", "unknown")
|
||||
user_message = get_user_message_for_error(
|
||||
error_type,
|
||||
operation="customize the agent",
|
||||
llm_parse_message=(
|
||||
"The AI had trouble customizing the agent. "
|
||||
"Please try again or simplify your request."
|
||||
),
|
||||
validation_message=(
|
||||
"The customized agent failed validation. "
|
||||
"Please try rephrasing your request."
|
||||
),
|
||||
error_details=error_msg,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=user_message,
|
||||
error=f"customization_failed:{error_type}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
async def _handle_customization_result(
|
||||
self,
|
||||
result: dict[str, Any],
|
||||
params: CustomizeAgentInput,
|
||||
agent_details: Any,
|
||||
user_id: str | None,
|
||||
session_id: str | None,
|
||||
) -> ToolResponseBase:
|
||||
"""Handle the result from customize_template using pattern matching."""
|
||||
# Ensure result is a dict
|
||||
# Handle clarifying questions
|
||||
if isinstance(result, dict) and result.get("type") == "clarifying_questions":
|
||||
questions = result.get("questions") or []
|
||||
if not isinstance(questions, list):
|
||||
logger.error(
|
||||
f"Unexpected clarifying questions format: {type(questions)}"
|
||||
)
|
||||
questions = []
|
||||
return ClarificationNeededResponse(
|
||||
message=(
|
||||
"I need some more information to customize this agent. "
|
||||
"Please answer the following questions:"
|
||||
),
|
||||
questions=[
|
||||
ClarifyingQuestion(
|
||||
question=q.get("question", ""),
|
||||
keyword=q.get("keyword", ""),
|
||||
example=q.get("example"),
|
||||
)
|
||||
for q in questions
|
||||
if isinstance(q, dict)
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Result should be the customized agent JSON
|
||||
if not isinstance(result, dict):
|
||||
logger.error(f"Unexpected customize_template response type: {type(result)}")
|
||||
return ErrorResponse(
|
||||
@@ -262,77 +276,8 @@ class CustomizeAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
result_type = result.get("type")
|
||||
customized_agent = result
|
||||
|
||||
match result_type:
|
||||
case "error":
|
||||
error_msg = result.get("error", "Unknown error")
|
||||
error_type = result.get("error_type", "unknown")
|
||||
user_message = get_user_message_for_error(
|
||||
error_type,
|
||||
operation="customize the agent",
|
||||
llm_parse_message=(
|
||||
"The AI had trouble customizing the agent. "
|
||||
"Please try again or simplify your request."
|
||||
),
|
||||
validation_message=(
|
||||
"The customized agent failed validation. "
|
||||
"Please try rephrasing your request."
|
||||
),
|
||||
error_details=error_msg,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=user_message,
|
||||
error=f"customization_failed:{error_type}",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
case "clarifying_questions":
|
||||
questions_data = result.get("questions") or []
|
||||
if not isinstance(questions_data, list):
|
||||
logger.error(
|
||||
f"Unexpected clarifying questions format: {type(questions_data)}"
|
||||
)
|
||||
questions_data = []
|
||||
|
||||
questions = [
|
||||
ClarifyingQuestion(
|
||||
question=q.get("question", "") if isinstance(q, dict) else "",
|
||||
keyword=q.get("keyword", "") if isinstance(q, dict) else "",
|
||||
example=q.get("example") if isinstance(q, dict) else None,
|
||||
)
|
||||
for q in questions_data
|
||||
if isinstance(q, dict)
|
||||
]
|
||||
|
||||
return ClarificationNeededResponse(
|
||||
message=(
|
||||
"I need some more information to customize this agent. "
|
||||
"Please answer the following questions:"
|
||||
),
|
||||
questions=questions,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
case _:
|
||||
# Default case: result is the customized agent JSON
|
||||
return await self._save_or_preview_agent(
|
||||
customized_agent=result,
|
||||
params=params,
|
||||
agent_details=agent_details,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
async def _save_or_preview_agent(
|
||||
self,
|
||||
customized_agent: dict[str, Any],
|
||||
params: CustomizeAgentInput,
|
||||
agent_details: Any,
|
||||
user_id: str | None,
|
||||
session_id: str | None,
|
||||
) -> ToolResponseBase:
|
||||
"""Save or preview the customized agent based on params.save."""
|
||||
agent_name = customized_agent.get(
|
||||
"name", f"Customized {agent_details.agent_name}"
|
||||
)
|
||||
@@ -342,7 +287,7 @@ class CustomizeAgentTool(BaseTool):
|
||||
node_count = len(nodes) if isinstance(nodes, list) else 0
|
||||
link_count = len(links) if isinstance(links, list) else 0
|
||||
|
||||
if not params.save:
|
||||
if not save:
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've customized the agent '{agent_details.agent_name}'. "
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_generator import (
|
||||
@@ -29,20 +27,6 @@ from .models import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EditAgentInput(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
agent_id: str = ""
|
||||
changes: str = ""
|
||||
context: str = ""
|
||||
save: bool = True
|
||||
|
||||
@field_validator("agent_id", "changes", "context", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str:
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class EditAgentTool(BaseTool):
|
||||
"""Tool for editing existing agents using natural language."""
|
||||
|
||||
@@ -106,7 +90,7 @@ class EditAgentTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the edit_agent tool.
|
||||
|
||||
@@ -115,32 +99,35 @@ class EditAgentTool(BaseTool):
|
||||
2. Generate updated agent (external service handles fixing and validation)
|
||||
3. Preview or save based on the save parameter
|
||||
"""
|
||||
params = EditAgentInput(**kwargs)
|
||||
agent_id = kwargs.get("agent_id", "").strip()
|
||||
changes = kwargs.get("changes", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
# Extract async processing params (passed by long-running tool handler)
|
||||
operation_id = kwargs.get("_operation_id")
|
||||
task_id = kwargs.get("_task_id")
|
||||
|
||||
if not params.agent_id:
|
||||
if not agent_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide the agent ID to edit.",
|
||||
error="Missing agent_id parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.changes:
|
||||
if not changes:
|
||||
return ErrorResponse(
|
||||
message="Please describe what changes you want to make.",
|
||||
error="Missing changes parameter",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
current_agent = await get_agent_as_json(params.agent_id, user_id)
|
||||
current_agent = await get_agent_as_json(agent_id, user_id)
|
||||
|
||||
if current_agent is None:
|
||||
return ErrorResponse(
|
||||
message=f"Could not find agent '{params.agent_id}' in your library.",
|
||||
message=f"Could not find agent with ID '{agent_id}' in your library.",
|
||||
error="agent_not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -151,7 +138,7 @@ class EditAgentTool(BaseTool):
|
||||
graph_id = current_agent.get("id")
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
user_id=user_id,
|
||||
search_query=params.changes,
|
||||
search_query=changes,
|
||||
exclude_graph_id=graph_id,
|
||||
include_marketplace=True,
|
||||
)
|
||||
@@ -161,11 +148,9 @@ class EditAgentTool(BaseTool):
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
|
||||
update_request = params.changes
|
||||
if params.context:
|
||||
update_request = (
|
||||
f"{params.changes}\n\nAdditional context:\n{params.context}"
|
||||
)
|
||||
update_request = changes
|
||||
if context:
|
||||
update_request = f"{changes}\n\nAdditional context:\n{context}"
|
||||
|
||||
try:
|
||||
result = await generate_agent_patch(
|
||||
@@ -189,7 +174,7 @@ class EditAgentTool(BaseTool):
|
||||
return ErrorResponse(
|
||||
message="Failed to generate changes. The agent generation service may be unavailable or timed out. Please try again.",
|
||||
error="update_generation_failed",
|
||||
details={"agent_id": params.agent_id, "changes": params.changes[:100]},
|
||||
details={"agent_id": agent_id, "changes": changes[:100]},
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -221,8 +206,8 @@ class EditAgentTool(BaseTool):
|
||||
message=user_message,
|
||||
error=f"update_generation_failed:{error_type}",
|
||||
details={
|
||||
"agent_id": params.agent_id,
|
||||
"changes": params.changes[:100],
|
||||
"agent_id": agent_id,
|
||||
"changes": changes[:100],
|
||||
"service_error": error_msg,
|
||||
"error_type": error_type,
|
||||
},
|
||||
@@ -254,7 +239,7 @@ class EditAgentTool(BaseTool):
|
||||
node_count = len(updated_agent.get("nodes", []))
|
||||
link_count = len(updated_agent.get("links", []))
|
||||
|
||||
if not params.save:
|
||||
if not save:
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've updated the agent. "
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_search import search_agents
|
||||
@@ -11,18 +9,6 @@ from .base import BaseTool
|
||||
from .models import ToolResponseBase
|
||||
|
||||
|
||||
class FindAgentInput(BaseModel):
|
||||
"""Input parameters for the find_agent tool."""
|
||||
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from query."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class FindAgentTool(BaseTool):
|
||||
"""Tool for discovering agents from the marketplace."""
|
||||
|
||||
@@ -50,11 +36,10 @@ class FindAgentTool(BaseTool):
|
||||
}
|
||||
|
||||
async def _execute(
|
||||
self, user_id: str | None, session: ChatSession, **kwargs: Any
|
||||
self, user_id: str | None, session: ChatSession, **kwargs
|
||||
) -> ToolResponseBase:
|
||||
params = FindAgentInput(**kwargs)
|
||||
return await search_agents(
|
||||
query=params.query,
|
||||
query=kwargs.get("query", "").strip(),
|
||||
source="marketplace",
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -2,7 +2,6 @@ import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ContentType
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
@@ -19,18 +18,6 @@ from backend.data.block import get_block
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FindBlockInput(BaseModel):
|
||||
"""Input parameters for the find_block tool."""
|
||||
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from query."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class FindBlockTool(BaseTool):
|
||||
"""Tool for searching available blocks."""
|
||||
|
||||
@@ -72,24 +59,24 @@ class FindBlockTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search for blocks matching the query.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
BlockListResponse: List of matching blocks
|
||||
NoResultsResponse: No blocks found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = FindBlockInput(**kwargs)
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id
|
||||
|
||||
if not params.query:
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query",
|
||||
session_id=session_id,
|
||||
@@ -98,7 +85,7 @@ class FindBlockTool(BaseTool):
|
||||
try:
|
||||
# Search for blocks using hybrid search
|
||||
results, total = await unified_hybrid_search(
|
||||
query=params.query,
|
||||
query=query,
|
||||
content_types=[ContentType.BLOCK],
|
||||
page=1,
|
||||
page_size=10,
|
||||
@@ -106,7 +93,7 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
if not results:
|
||||
return NoResultsResponse(
|
||||
message=f"No blocks found for '{params.query}'",
|
||||
message=f"No blocks found for '{query}'",
|
||||
suggestions=[
|
||||
"Try broader keywords like 'email', 'http', 'text', 'ai'",
|
||||
"Check spelling of technical terms",
|
||||
@@ -178,7 +165,7 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
if not blocks:
|
||||
return NoResultsResponse(
|
||||
message=f"No blocks found for '{params.query}'",
|
||||
message=f"No blocks found for '{query}'",
|
||||
suggestions=[
|
||||
"Try broader keywords like 'email', 'http', 'text', 'ai'",
|
||||
],
|
||||
@@ -187,13 +174,13 @@ class FindBlockTool(BaseTool):
|
||||
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{params.query}'. "
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
query=params.query,
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .agent_search import search_agents
|
||||
@@ -11,15 +9,6 @@ from .base import BaseTool
|
||||
from .models import ToolResponseBase
|
||||
|
||||
|
||||
class FindLibraryAgentInput(BaseModel):
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class FindLibraryAgentTool(BaseTool):
|
||||
"""Tool for searching agents in the user's library."""
|
||||
|
||||
@@ -53,11 +42,10 @@ class FindLibraryAgentTool(BaseTool):
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self, user_id: str | None, session: ChatSession, **kwargs: Any
|
||||
self, user_id: str | None, session: ChatSession, **kwargs
|
||||
) -> ToolResponseBase:
|
||||
params = FindLibraryAgentInput(**kwargs)
|
||||
return await search_agents(
|
||||
query=params.query,
|
||||
query=kwargs.get("query", "").strip(),
|
||||
source="library",
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -4,8 +4,6 @@ import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
from backend.api.features.chat.tools.models import (
|
||||
@@ -20,18 +18,6 @@ logger = logging.getLogger(__name__)
|
||||
DOCS_BASE_URL = "https://docs.agpt.co"
|
||||
|
||||
|
||||
class GetDocPageInput(BaseModel):
|
||||
"""Input parameters for the get_doc_page tool."""
|
||||
|
||||
path: str = ""
|
||||
|
||||
@field_validator("path", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from path."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class GetDocPageTool(BaseTool):
|
||||
"""Tool for fetching full content of a documentation page."""
|
||||
|
||||
@@ -89,23 +75,23 @@ class GetDocPageTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Fetch full content of a documentation page.
|
||||
|
||||
Args:
|
||||
user_id: User ID (not required for docs)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
path: Path to the documentation file
|
||||
|
||||
Returns:
|
||||
DocPageResponse: Full document content
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = GetDocPageInput(**kwargs)
|
||||
path = kwargs.get("path", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not params.path:
|
||||
if not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide a documentation path.",
|
||||
error="Missing path parameter",
|
||||
@@ -113,7 +99,7 @@ class GetDocPageTool(BaseTool):
|
||||
)
|
||||
|
||||
# Sanitize path to prevent directory traversal
|
||||
if ".." in params.path or params.path.startswith("/"):
|
||||
if ".." in path or path.startswith("/"):
|
||||
return ErrorResponse(
|
||||
message="Invalid documentation path.",
|
||||
error="invalid_path",
|
||||
@@ -121,11 +107,11 @@ class GetDocPageTool(BaseTool):
|
||||
)
|
||||
|
||||
docs_root = self._get_docs_root()
|
||||
full_path = docs_root / params.path
|
||||
full_path = docs_root / path
|
||||
|
||||
if not full_path.exists():
|
||||
return ErrorResponse(
|
||||
message=f"Documentation page not found: {params.path}",
|
||||
message=f"Documentation page not found: {path}",
|
||||
error="not_found",
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -142,19 +128,19 @@ class GetDocPageTool(BaseTool):
|
||||
|
||||
try:
|
||||
content = full_path.read_text(encoding="utf-8")
|
||||
title = self._extract_title(content, params.path)
|
||||
title = self._extract_title(content, path)
|
||||
|
||||
return DocPageResponse(
|
||||
message=f"Retrieved documentation page: {title}",
|
||||
title=title,
|
||||
path=params.path,
|
||||
path=path,
|
||||
content=content,
|
||||
doc_url=self._make_doc_url(params.path),
|
||||
doc_url=self._make_doc_url(path),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read documentation page {params.path}: {e}")
|
||||
logger.error(f"Failed to read documentation page {path}: {e}")
|
||||
return ErrorResponse(
|
||||
message=f"Failed to read documentation page: {str(e)}",
|
||||
error="read_failed",
|
||||
|
||||
@@ -5,7 +5,6 @@ import uuid
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
@@ -30,25 +29,6 @@ from .utils import build_missing_credentials_from_field_info
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RunBlockInput(BaseModel):
|
||||
"""Input parameters for the run_block tool."""
|
||||
|
||||
block_id: str = ""
|
||||
input_data: dict[str, Any] = {}
|
||||
|
||||
@field_validator("block_id", mode="before")
|
||||
@classmethod
|
||||
def strip_block_id(cls, v: Any) -> str:
|
||||
"""Strip whitespace from block_id."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
@field_validator("input_data", mode="before")
|
||||
@classmethod
|
||||
def ensure_dict(cls, v: Any) -> dict[str, Any]:
|
||||
"""Ensure input_data is a dict."""
|
||||
return v if isinstance(v, dict) else {}
|
||||
|
||||
|
||||
class RunBlockTool(BaseTool):
|
||||
"""Tool for executing a block and returning its outputs."""
|
||||
|
||||
@@ -182,29 +162,37 @@ class RunBlockTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute a block with the given input data.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
block_id: Block UUID to execute
|
||||
input_data: Input values for the block
|
||||
|
||||
Returns:
|
||||
BlockOutputResponse: Block execution outputs
|
||||
SetupRequirementsResponse: Missing credentials
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = RunBlockInput(**kwargs)
|
||||
block_id = kwargs.get("block_id", "").strip()
|
||||
input_data = kwargs.get("input_data", {})
|
||||
session_id = session.session_id
|
||||
|
||||
if not params.block_id:
|
||||
if not block_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide a block_id",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not isinstance(input_data, dict):
|
||||
return ErrorResponse(
|
||||
message="input_data must be an object",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required",
|
||||
@@ -212,25 +200,23 @@ class RunBlockTool(BaseTool):
|
||||
)
|
||||
|
||||
# Get the block
|
||||
block = get_block(params.block_id)
|
||||
block = get_block(block_id)
|
||||
if not block:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{params.block_id}' not found",
|
||||
message=f"Block '{block_id}' not found",
|
||||
session_id=session_id,
|
||||
)
|
||||
if block.disabled:
|
||||
return ErrorResponse(
|
||||
message=f"Block '{params.block_id}' is disabled",
|
||||
message=f"Block '{block_id}' is disabled",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Executing block {block.name} ({params.block_id}) for user {user_id}"
|
||||
)
|
||||
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
|
||||
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
matched_credentials, missing_credentials = await self._check_block_credentials(
|
||||
user_id, block, params.input_data
|
||||
user_id, block, input_data
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
@@ -248,7 +234,7 @@ class RunBlockTool(BaseTool):
|
||||
),
|
||||
session_id=session_id,
|
||||
setup_info=SetupInfo(
|
||||
agent_id=params.block_id,
|
||||
agent_id=block_id,
|
||||
agent_name=block.name,
|
||||
user_readiness=UserReadiness(
|
||||
has_all_credentials=False,
|
||||
@@ -277,7 +263,7 @@ class RunBlockTool(BaseTool):
|
||||
# - node_exec_id = unique per block execution
|
||||
synthetic_graph_id = f"copilot-session-{session.session_id}"
|
||||
synthetic_graph_exec_id = f"copilot-session-{session.session_id}"
|
||||
synthetic_node_id = f"copilot-node-{params.block_id}"
|
||||
synthetic_node_id = f"copilot-node-{block_id}"
|
||||
synthetic_node_exec_id = (
|
||||
f"copilot-{session.session_id}-{uuid.uuid4().hex[:8]}"
|
||||
)
|
||||
@@ -312,8 +298,8 @@ class RunBlockTool(BaseTool):
|
||||
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
# Inject metadata into input_data (for validation)
|
||||
if field_name not in params.input_data:
|
||||
params.input_data[field_name] = cred_meta.model_dump()
|
||||
if field_name not in input_data:
|
||||
input_data[field_name] = cred_meta.model_dump()
|
||||
|
||||
# Fetch actual credentials and pass as kwargs (for execution)
|
||||
actual_credentials = await creds_manager.get(
|
||||
@@ -330,14 +316,14 @@ class RunBlockTool(BaseTool):
|
||||
# Execute the block and collect outputs
|
||||
outputs: dict[str, list[Any]] = defaultdict(list)
|
||||
async for output_name, output_data in block.execute(
|
||||
params.input_data,
|
||||
input_data,
|
||||
**exec_kwargs,
|
||||
):
|
||||
outputs[output_name].append(output_data)
|
||||
|
||||
return BlockOutputResponse(
|
||||
message=f"Block '{block.name}' executed successfully",
|
||||
block_id=params.block_id,
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
outputs=dict(outputs),
|
||||
success=True,
|
||||
|
||||
@@ -4,7 +4,6 @@ import logging
|
||||
from typing import Any
|
||||
|
||||
from prisma.enums import ContentType
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool
|
||||
@@ -29,18 +28,6 @@ MAX_RESULTS = 5
|
||||
SNIPPET_LENGTH = 200
|
||||
|
||||
|
||||
class SearchDocsInput(BaseModel):
|
||||
"""Input parameters for the search_docs tool."""
|
||||
|
||||
query: str = ""
|
||||
|
||||
@field_validator("query", mode="before")
|
||||
@classmethod
|
||||
def strip_string(cls, v: Any) -> str:
|
||||
"""Strip whitespace from query."""
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
|
||||
class SearchDocsTool(BaseTool):
|
||||
"""Tool for searching AutoGPT platform documentation."""
|
||||
|
||||
@@ -104,24 +91,24 @@ class SearchDocsTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search documentation and return relevant sections.
|
||||
|
||||
Args:
|
||||
user_id: User ID (not required for docs)
|
||||
session: Chat session
|
||||
**kwargs: Tool parameters
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
DocSearchResultsResponse: List of matching documentation sections
|
||||
NoResultsResponse: No results found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
params = SearchDocsInput(**kwargs)
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
if not params.query:
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query.",
|
||||
error="Missing query parameter",
|
||||
@@ -131,7 +118,7 @@ class SearchDocsTool(BaseTool):
|
||||
try:
|
||||
# Search using hybrid search for DOCUMENTATION content type only
|
||||
results, total = await unified_hybrid_search(
|
||||
query=params.query,
|
||||
query=query,
|
||||
content_types=[ContentType.DOCUMENTATION],
|
||||
page=1,
|
||||
page_size=MAX_RESULTS * 2, # Fetch extra for deduplication
|
||||
@@ -140,7 +127,7 @@ class SearchDocsTool(BaseTool):
|
||||
|
||||
if not results:
|
||||
return NoResultsResponse(
|
||||
message=f"No documentation found for '{params.query}'.",
|
||||
message=f"No documentation found for '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use more general terms",
|
||||
@@ -175,7 +162,7 @@ class SearchDocsTool(BaseTool):
|
||||
|
||||
if not deduplicated:
|
||||
return NoResultsResponse(
|
||||
message=f"No documentation found for '{params.query}'.",
|
||||
message=f"No documentation found for '{query}'.",
|
||||
suggestions=[
|
||||
"Try different keywords",
|
||||
"Use more general terms",
|
||||
@@ -208,7 +195,7 @@ class SearchDocsTool(BaseTool):
|
||||
message=f"Found {len(doc_results)} relevant documentation sections.",
|
||||
results=doc_results,
|
||||
count=len(doc_results),
|
||||
query=params.query,
|
||||
query=query,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -6,9 +6,13 @@ from typing import Any
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.library import model as library_model
|
||||
from backend.api.features.store import db as store_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import Credentials, CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.data.model import (
|
||||
CredentialsFieldInfo,
|
||||
CredentialsMetaInput,
|
||||
HostScopedCredentials,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
@@ -39,14 +43,8 @@ async def fetch_graph_from_store_slug(
|
||||
return None, None
|
||||
|
||||
# Get the graph from store listing version
|
||||
graph_meta = await store_db.get_available_graph(
|
||||
store_agent.store_listing_version_id
|
||||
)
|
||||
graph = await graph_db.get_graph(
|
||||
graph_id=graph_meta.id,
|
||||
version=graph_meta.version,
|
||||
user_id=None, # Public access
|
||||
include_subgraphs=True,
|
||||
graph = await store_db.get_available_graph(
|
||||
store_agent.store_listing_version_id, hide_nodes=False
|
||||
)
|
||||
return graph, store_agent
|
||||
|
||||
@@ -119,11 +117,11 @@ def build_missing_credentials_from_graph(
|
||||
preserving all supported credential types for each field.
|
||||
"""
|
||||
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
||||
aggregated_fields = graph.aggregate_credentials_inputs()
|
||||
aggregated_fields = graph.regular_credentials_inputs
|
||||
|
||||
return {
|
||||
field_key: _serialize_missing_credential(field_key, field_info)
|
||||
for field_key, (field_info, _node_fields) in aggregated_fields.items()
|
||||
for field_key, (field_info, _, _) in aggregated_fields.items()
|
||||
if field_key not in matched_keys
|
||||
}
|
||||
|
||||
@@ -246,7 +244,7 @@ async def match_user_credentials_to_graph(
|
||||
missing_creds: list[str] = []
|
||||
|
||||
# Get aggregated credentials requirements from the graph
|
||||
aggregated_creds = graph.aggregate_credentials_inputs()
|
||||
aggregated_creds = graph.regular_credentials_inputs
|
||||
logger.debug(
|
||||
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
||||
)
|
||||
@@ -264,7 +262,8 @@ async def match_user_credentials_to_graph(
|
||||
# provider is in the set of acceptable providers.
|
||||
for credential_field_name, (
|
||||
credential_requirements,
|
||||
_node_fields,
|
||||
_,
|
||||
_,
|
||||
) in aggregated_creds.items():
|
||||
# Find first matching credential by provider, type, and scopes
|
||||
matching_cred = next(
|
||||
@@ -273,7 +272,14 @@ async def match_user_credentials_to_graph(
|
||||
for cred in available_creds
|
||||
if cred.provider in credential_requirements.provider
|
||||
and cred.type in credential_requirements.supported_types
|
||||
and _credential_has_required_scopes(cred, credential_requirements)
|
||||
and (
|
||||
cred.type != "oauth2"
|
||||
or _credential_has_required_scopes(cred, credential_requirements)
|
||||
)
|
||||
and (
|
||||
cred.type != "host_scoped"
|
||||
or _credential_is_for_host(cred, credential_requirements)
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
@@ -318,19 +324,10 @@ async def match_user_credentials_to_graph(
|
||||
|
||||
|
||||
def _credential_has_required_scopes(
|
||||
credential: Credentials,
|
||||
credential: OAuth2Credentials,
|
||||
requirements: CredentialsFieldInfo,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a credential has all the scopes required by the block.
|
||||
|
||||
For OAuth2 credentials, verifies that the credential's scopes are a superset
|
||||
of the required scopes. For other credential types, returns True (no scope check).
|
||||
"""
|
||||
# Only OAuth2 credentials have scopes to check
|
||||
if credential.type != "oauth2":
|
||||
return True
|
||||
|
||||
"""Check if an OAuth2 credential has all the scopes required by the input."""
|
||||
# If no scopes are required, any credential matches
|
||||
if not requirements.required_scopes:
|
||||
return True
|
||||
@@ -339,6 +336,22 @@ def _credential_has_required_scopes(
|
||||
return set(credential.scopes).issuperset(requirements.required_scopes)
|
||||
|
||||
|
||||
def _credential_is_for_host(
|
||||
credential: HostScopedCredentials,
|
||||
requirements: CredentialsFieldInfo,
|
||||
) -> bool:
|
||||
"""Check if a host-scoped credential matches the host required by the input."""
|
||||
# We need to know the host to match host-scoped credentials to.
|
||||
# Graph.aggregate_credentials_inputs() adds the node's set URL value (if any)
|
||||
# to discriminator_values. No discriminator_values -> no host to match against.
|
||||
if not requirements.discriminator_values:
|
||||
return True
|
||||
|
||||
# Check that credential host matches required host.
|
||||
# Host-scoped credential inputs are grouped by host, so any item from the set works.
|
||||
return credential.matches_url(list(requirements.discriminator_values)[0])
|
||||
|
||||
|
||||
async def check_user_has_required_credentials(
|
||||
user_id: str,
|
||||
required_credentials: list[CredentialsMetaInput],
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
"""Tests for chat tools utility functions."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
|
||||
def _make_regular_field() -> CredentialsFieldInfo:
|
||||
return CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
|
||||
def test_build_missing_credentials_excludes_auto_creds():
|
||||
"""
|
||||
build_missing_credentials_from_graph() should use regular_credentials_inputs
|
||||
and thus exclude auto_credentials from the "missing" set.
|
||||
"""
|
||||
from backend.api.features.chat.tools.utils import (
|
||||
build_missing_credentials_from_graph,
|
||||
)
|
||||
|
||||
regular_field = _make_regular_field()
|
||||
|
||||
mock_graph = MagicMock()
|
||||
# regular_credentials_inputs should only return the non-auto field
|
||||
mock_graph.regular_credentials_inputs = {
|
||||
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
||||
}
|
||||
|
||||
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
|
||||
|
||||
# Should include the regular credential
|
||||
assert "github_api_key" in result
|
||||
# Should NOT include the auto_credential (not in regular_credentials_inputs)
|
||||
assert "google_oauth2" not in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_match_user_credentials_excludes_auto_creds():
|
||||
"""
|
||||
match_user_credentials_to_graph() should use regular_credentials_inputs
|
||||
and thus exclude auto_credentials from matching.
|
||||
"""
|
||||
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
|
||||
|
||||
regular_field = _make_regular_field()
|
||||
|
||||
mock_graph = MagicMock()
|
||||
mock_graph.id = "test-graph"
|
||||
# regular_credentials_inputs returns only non-auto fields
|
||||
mock_graph.regular_credentials_inputs = {
|
||||
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
||||
}
|
||||
|
||||
# Mock the credentials manager to return no credentials
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
|
||||
) as MockCredsMgr:
|
||||
mock_store = AsyncMock()
|
||||
mock_store.get_all_creds.return_value = []
|
||||
MockCredsMgr.return_value.store = mock_store
|
||||
|
||||
matched, missing = await match_user_credentials_to_graph(
|
||||
user_id="test-user", graph=mock_graph
|
||||
)
|
||||
|
||||
# No credentials available, so github should be missing
|
||||
assert len(matched) == 0
|
||||
assert len(missing) == 1
|
||||
assert "github_api_key" in missing[0]
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
import base64
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.data.workspace import get_or_create_workspace
|
||||
@@ -78,65 +78,6 @@ class WorkspaceDeleteResponse(ToolResponseBase):
|
||||
success: bool
|
||||
|
||||
|
||||
# Input models for workspace tools
|
||||
class ListWorkspaceFilesInput(BaseModel):
|
||||
"""Input parameters for list_workspace_files tool."""
|
||||
|
||||
path_prefix: str | None = None
|
||||
limit: int = 50
|
||||
include_all_sessions: bool = False
|
||||
|
||||
@field_validator("path_prefix", mode="before")
|
||||
@classmethod
|
||||
def strip_path(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class ReadWorkspaceFileInput(BaseModel):
|
||||
"""Input parameters for read_workspace_file tool."""
|
||||
|
||||
file_id: str | None = None
|
||||
path: str | None = None
|
||||
force_download_url: bool = False
|
||||
|
||||
@field_validator("file_id", "path", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class WriteWorkspaceFileInput(BaseModel):
|
||||
"""Input parameters for write_workspace_file tool."""
|
||||
|
||||
filename: str = ""
|
||||
content_base64: str = ""
|
||||
path: str | None = None
|
||||
mime_type: str | None = None
|
||||
overwrite: bool = False
|
||||
|
||||
@field_validator("filename", "content_base64", mode="before")
|
||||
@classmethod
|
||||
def strip_required(cls, v: Any) -> str:
|
||||
return v.strip() if isinstance(v, str) else (v if v is not None else "")
|
||||
|
||||
@field_validator("path", "mime_type", mode="before")
|
||||
@classmethod
|
||||
def strip_optional(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class DeleteWorkspaceFileInput(BaseModel):
|
||||
"""Input parameters for delete_workspace_file tool."""
|
||||
|
||||
file_id: str | None = None
|
||||
path: str | None = None
|
||||
|
||||
@field_validator("file_id", "path", mode="before")
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> str | None:
|
||||
return v.strip() if isinstance(v, str) else None
|
||||
|
||||
|
||||
class ListWorkspaceFilesTool(BaseTool):
|
||||
"""Tool for listing files in user's workspace."""
|
||||
|
||||
@@ -190,9 +131,8 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = ListWorkspaceFilesInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -201,7 +141,9 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
limit = min(params.limit, 100)
|
||||
path_prefix: Optional[str] = kwargs.get("path_prefix")
|
||||
limit = min(kwargs.get("limit", 50), 100)
|
||||
include_all_sessions: bool = kwargs.get("include_all_sessions", False)
|
||||
|
||||
try:
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
@@ -209,13 +151,13 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
manager = WorkspaceManager(user_id, workspace.id, session_id)
|
||||
|
||||
files = await manager.list_files(
|
||||
path=params.path_prefix,
|
||||
path=path_prefix,
|
||||
limit=limit,
|
||||
include_all_sessions=params.include_all_sessions,
|
||||
include_all_sessions=include_all_sessions,
|
||||
)
|
||||
total = await manager.get_file_count(
|
||||
path=params.path_prefix,
|
||||
include_all_sessions=params.include_all_sessions,
|
||||
path=path_prefix,
|
||||
include_all_sessions=include_all_sessions,
|
||||
)
|
||||
|
||||
file_infos = [
|
||||
@@ -229,9 +171,7 @@ class ListWorkspaceFilesTool(BaseTool):
|
||||
for f in files
|
||||
]
|
||||
|
||||
scope_msg = (
|
||||
"all sessions" if params.include_all_sessions else "current session"
|
||||
)
|
||||
scope_msg = "all sessions" if include_all_sessions else "current session"
|
||||
return WorkspaceFileListResponse(
|
||||
files=file_infos,
|
||||
total_count=total,
|
||||
@@ -319,9 +259,8 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = ReadWorkspaceFileInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -330,7 +269,11 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.file_id and not params.path:
|
||||
file_id: Optional[str] = kwargs.get("file_id")
|
||||
path: Optional[str] = kwargs.get("path")
|
||||
force_download_url: bool = kwargs.get("force_download_url", False)
|
||||
|
||||
if not file_id and not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide either file_id or path",
|
||||
session_id=session_id,
|
||||
@@ -342,21 +285,21 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
manager = WorkspaceManager(user_id, workspace.id, session_id)
|
||||
|
||||
# Get file info
|
||||
if params.file_id:
|
||||
file_info = await manager.get_file_info(params.file_id)
|
||||
if file_id:
|
||||
file_info = await manager.get_file_info(file_id)
|
||||
if file_info is None:
|
||||
return ErrorResponse(
|
||||
message=f"File not found: {params.file_id}",
|
||||
message=f"File not found: {file_id}",
|
||||
session_id=session_id,
|
||||
)
|
||||
target_file_id = params.file_id
|
||||
target_file_id = file_id
|
||||
else:
|
||||
# path is guaranteed to be non-None here due to the check above
|
||||
assert params.path is not None
|
||||
file_info = await manager.get_file_info_by_path(params.path)
|
||||
assert path is not None
|
||||
file_info = await manager.get_file_info_by_path(path)
|
||||
if file_info is None:
|
||||
return ErrorResponse(
|
||||
message=f"File not found at path: {params.path}",
|
||||
message=f"File not found at path: {path}",
|
||||
session_id=session_id,
|
||||
)
|
||||
target_file_id = file_info.id
|
||||
@@ -366,7 +309,7 @@ class ReadWorkspaceFileTool(BaseTool):
|
||||
is_text_file = self._is_text_mime_type(file_info.mimeType)
|
||||
|
||||
# Return inline content for small text files (unless force_download_url)
|
||||
if is_small_file and is_text_file and not params.force_download_url:
|
||||
if is_small_file and is_text_file and not force_download_url:
|
||||
content = await manager.read_file_by_id(target_file_id)
|
||||
content_b64 = base64.b64encode(content).decode("utf-8")
|
||||
|
||||
@@ -486,9 +429,8 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = WriteWorkspaceFileInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -497,13 +439,19 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.filename:
|
||||
filename: str = kwargs.get("filename", "")
|
||||
content_b64: str = kwargs.get("content_base64", "")
|
||||
path: Optional[str] = kwargs.get("path")
|
||||
mime_type: Optional[str] = kwargs.get("mime_type")
|
||||
overwrite: bool = kwargs.get("overwrite", False)
|
||||
|
||||
if not filename:
|
||||
return ErrorResponse(
|
||||
message="Please provide a filename",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.content_base64:
|
||||
if not content_b64:
|
||||
return ErrorResponse(
|
||||
message="Please provide content_base64",
|
||||
session_id=session_id,
|
||||
@@ -511,7 +459,7 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
|
||||
# Decode content
|
||||
try:
|
||||
content = base64.b64decode(params.content_base64)
|
||||
content = base64.b64decode(content_b64)
|
||||
except Exception:
|
||||
return ErrorResponse(
|
||||
message="Invalid base64-encoded content",
|
||||
@@ -528,7 +476,7 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
|
||||
try:
|
||||
# Virus scan
|
||||
await scan_content_safe(content, filename=params.filename)
|
||||
await scan_content_safe(content, filename=filename)
|
||||
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
# Pass session_id for session-scoped file access
|
||||
@@ -536,10 +484,10 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
|
||||
file_record = await manager.write_file(
|
||||
content=content,
|
||||
filename=params.filename,
|
||||
path=params.path,
|
||||
mime_type=params.mime_type,
|
||||
overwrite=params.overwrite,
|
||||
filename=filename,
|
||||
path=path,
|
||||
mime_type=mime_type,
|
||||
overwrite=overwrite,
|
||||
)
|
||||
|
||||
return WorkspaceWriteResponse(
|
||||
@@ -609,9 +557,8 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs: Any,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
params = DeleteWorkspaceFileInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
@@ -620,7 +567,10 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not params.file_id and not params.path:
|
||||
file_id: Optional[str] = kwargs.get("file_id")
|
||||
path: Optional[str] = kwargs.get("path")
|
||||
|
||||
if not file_id and not path:
|
||||
return ErrorResponse(
|
||||
message="Please provide either file_id or path",
|
||||
session_id=session_id,
|
||||
@@ -633,15 +583,15 @@ class DeleteWorkspaceFileTool(BaseTool):
|
||||
|
||||
# Determine the file_id to delete
|
||||
target_file_id: str
|
||||
if params.file_id:
|
||||
target_file_id = params.file_id
|
||||
if file_id:
|
||||
target_file_id = file_id
|
||||
else:
|
||||
# path is guaranteed to be non-None here due to the check above
|
||||
assert params.path is not None
|
||||
file_info = await manager.get_file_info_by_path(params.path)
|
||||
assert path is not None
|
||||
file_info = await manager.get_file_info_by_path(path)
|
||||
if file_info is None:
|
||||
return ErrorResponse(
|
||||
message=f"File not found at path: {params.path}",
|
||||
message=f"File not found at path: {path}",
|
||||
session_id=session_id,
|
||||
)
|
||||
target_file_id = file_info.id
|
||||
|
||||
@@ -19,7 +19,10 @@ from backend.data.graph import GraphSettings
|
||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||
on_graph_activate,
|
||||
on_graph_deactivate,
|
||||
)
|
||||
from backend.util.clients import get_scheduler_client
|
||||
from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError
|
||||
from backend.util.json import SafeJson
|
||||
@@ -371,7 +374,7 @@ async def get_library_agent_by_graph_id(
|
||||
|
||||
|
||||
async def add_generated_agent_image(
|
||||
graph: graph_db.BaseGraph,
|
||||
graph: graph_db.GraphBaseMeta,
|
||||
user_id: str,
|
||||
library_agent_id: str,
|
||||
) -> Optional[prisma.models.LibraryAgent]:
|
||||
@@ -537,6 +540,92 @@ async def update_agent_version_in_library(
|
||||
return library_model.LibraryAgent.from_db(lib)
|
||||
|
||||
|
||||
async def create_graph_in_library(
|
||||
graph: graph_db.Graph,
|
||||
user_id: str,
|
||||
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||
"""Create a new graph and add it to the user's library."""
|
||||
graph.version = 1
|
||||
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
|
||||
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||
|
||||
library_agents = await create_library_agent(
|
||||
graph=created_graph,
|
||||
user_id=user_id,
|
||||
sensitive_action_safe_mode=True,
|
||||
create_library_agents_for_sub_graphs=False,
|
||||
)
|
||||
|
||||
if created_graph.is_active:
|
||||
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||
|
||||
return created_graph, library_agents[0]
|
||||
|
||||
|
||||
async def update_graph_in_library(
|
||||
graph: graph_db.Graph,
|
||||
user_id: str,
|
||||
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||
"""Create a new version of an existing graph and update the library entry."""
|
||||
existing_versions = await graph_db.get_graph_all_versions(graph.id, user_id)
|
||||
current_active_version = (
|
||||
next((v for v in existing_versions if v.is_active), None)
|
||||
if existing_versions
|
||||
else None
|
||||
)
|
||||
graph.version = (
|
||||
max(v.version for v in existing_versions) + 1 if existing_versions else 1
|
||||
)
|
||||
|
||||
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
|
||||
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||
|
||||
library_agent = await get_library_agent_by_graph_id(user_id, created_graph.id)
|
||||
if not library_agent:
|
||||
raise NotFoundError(f"Library agent not found for graph {created_graph.id}")
|
||||
|
||||
library_agent = await update_library_agent_version_and_settings(
|
||||
user_id, created_graph
|
||||
)
|
||||
|
||||
if created_graph.is_active:
|
||||
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=created_graph.id,
|
||||
version=created_graph.version,
|
||||
user_id=user_id,
|
||||
)
|
||||
if current_active_version:
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
return created_graph, library_agent
|
||||
|
||||
|
||||
async def update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
"""Update library agent to point to new graph version and sync settings."""
|
||||
library = await update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
async def update_library_agent(
|
||||
library_agent_id: str,
|
||||
user_id: str,
|
||||
@@ -1014,7 +1103,7 @@ async def create_preset_from_graph_execution(
|
||||
raise NotFoundError(
|
||||
f"Graph #{graph_execution.graph_id} not found or accessible"
|
||||
)
|
||||
elif len(graph.aggregate_credentials_inputs()) > 0:
|
||||
elif len(graph.regular_credentials_inputs) > 0:
|
||||
raise ValueError(
|
||||
f"Graph execution #{graph_exec_id} can't be turned into a preset "
|
||||
"because it was run before this feature existed "
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Literal
|
||||
from typing import Any, Literal, overload
|
||||
|
||||
import fastapi
|
||||
import prisma.enums
|
||||
@@ -11,8 +11,8 @@ import prisma.types
|
||||
|
||||
from backend.data.db import transaction
|
||||
from backend.data.graph import (
|
||||
GraphMeta,
|
||||
GraphModel,
|
||||
GraphModelWithoutNodes,
|
||||
get_graph,
|
||||
get_graph_as_admin,
|
||||
get_sub_graphs,
|
||||
@@ -334,7 +334,22 @@ async def get_store_agent_details(
|
||||
raise DatabaseError("Failed to fetch agent details") from e
|
||||
|
||||
|
||||
async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
||||
@overload
|
||||
async def get_available_graph(
|
||||
store_listing_version_id: str, hide_nodes: Literal[False]
|
||||
) -> GraphModel: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def get_available_graph(
|
||||
store_listing_version_id: str, hide_nodes: Literal[True] = True
|
||||
) -> GraphModelWithoutNodes: ...
|
||||
|
||||
|
||||
async def get_available_graph(
|
||||
store_listing_version_id: str,
|
||||
hide_nodes: bool = True,
|
||||
) -> GraphModelWithoutNodes | GraphModel:
|
||||
try:
|
||||
# Get avaialble, non-deleted store listing version
|
||||
store_listing_version = (
|
||||
@@ -344,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
||||
"isAvailable": True,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include={"AgentGraph": {"include": {"Nodes": True}}},
|
||||
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -354,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
||||
detail=f"Store listing version {store_listing_version_id} not found",
|
||||
)
|
||||
|
||||
return GraphModel.from_db(store_listing_version.AgentGraph).meta()
|
||||
return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
|
||||
store_listing_version.AgentGraph
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent: {e}")
|
||||
|
||||
@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
|
||||
StyleType,
|
||||
UpscaleOption,
|
||||
)
|
||||
from backend.data.graph import BaseGraph
|
||||
from backend.data.graph import GraphBaseMeta
|
||||
from backend.data.model import CredentialsMetaInput, ProviderName
|
||||
from backend.integrations.credentials_store import ideogram_credentials
|
||||
from backend.util.request import Requests
|
||||
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
|
||||
DIGITAL_ART = "digital art"
|
||||
|
||||
|
||||
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||
if settings.config.use_agent_image_generation_v2:
|
||||
return await generate_agent_image_v2(graph=agent)
|
||||
else:
|
||||
return await generate_agent_image_v1(agent=agent)
|
||||
|
||||
|
||||
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||
"""
|
||||
Generate an image for an agent using Ideogram model.
|
||||
Returns:
|
||||
@@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
description = f"{name} ({graph.description})" if graph.description else name
|
||||
|
||||
prompt = (
|
||||
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
|
||||
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
|
||||
f"along with recognizable objects directly associated with the primary function of a {name}. "
|
||||
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
|
||||
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
|
||||
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
|
||||
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
|
||||
f"prioritizing clear visual storytelling and thematic clarity above all else."
|
||||
"Create a visually striking retro-futuristic vector pop art illustration "
|
||||
f'prominently featuring "{name}" in bold typography. The image clearly and '
|
||||
f"literally depicts a {description}, along with recognizable objects directly "
|
||||
f"associated with the primary function of a {name}. "
|
||||
f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
|
||||
f"clearly conveying the purpose of a {name}. "
|
||||
"Maintain vibrant, limited-palette colors, sharp vector lines, "
|
||||
"geometric shapes, flat illustration techniques, and solid colors "
|
||||
"without gradients or shading. Preserve a retro-futuristic aesthetic "
|
||||
"influenced by mid-century futurism and 1960s psychedelia, "
|
||||
"prioritizing clear visual storytelling and thematic clarity above all else."
|
||||
)
|
||||
|
||||
custom_colors = [
|
||||
@@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
return io.BytesIO(response.content)
|
||||
|
||||
|
||||
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||
"""
|
||||
Generate an image for an agent using Flux model via Replicate API.
|
||||
|
||||
Args:
|
||||
agent (Graph): The agent to generate an image for
|
||||
agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
|
||||
|
||||
Returns:
|
||||
io.BytesIO: The generated image as bytes
|
||||
@@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
||||
raise ValueError("Missing Replicate API key in settings")
|
||||
|
||||
# Construct prompt from agent details
|
||||
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
|
||||
prompt = (
|
||||
"Create a visually engaging app store thumbnail for the AI agent "
|
||||
"that highlights what it does in a clear and captivating way:\n"
|
||||
f"- **Name**: {agent.name}\n"
|
||||
f"- **Description**: {agent.description}\n"
|
||||
f"Focus on showcasing its core functionality with an appealing design."
|
||||
)
|
||||
|
||||
# Set up Replicate client
|
||||
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)
|
||||
|
||||
@@ -278,7 +278,7 @@ async def get_agent(
|
||||
)
|
||||
async def get_graph_meta_by_store_listing_version_id(
|
||||
store_listing_version_id: str,
|
||||
) -> backend.data.graph.GraphMeta:
|
||||
) -> backend.data.graph.GraphModelWithoutNodes:
|
||||
"""
|
||||
Get Agent Graph from Store Listing Version ID.
|
||||
"""
|
||||
|
||||
@@ -101,7 +101,6 @@ from backend.util.timezone_utils import (
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
|
||||
from .library import db as library_db
|
||||
from .library import model as library_model
|
||||
from .store.model import StoreAgentDetails
|
||||
|
||||
|
||||
@@ -823,18 +822,16 @@ async def update_graph(
|
||||
graph: graph_db.Graph,
|
||||
user_id: Annotated[str, Security(get_user_id)],
|
||||
) -> graph_db.GraphModel:
|
||||
# Sanity check
|
||||
if graph.id and graph.id != graph_id:
|
||||
raise HTTPException(400, detail="Graph ID does not match ID in URI")
|
||||
|
||||
# Determine new version
|
||||
existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
|
||||
if not existing_versions:
|
||||
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
|
||||
latest_version_number = max(g.version for g in existing_versions)
|
||||
graph.version = latest_version_number + 1
|
||||
|
||||
graph.version = max(g.version for g in existing_versions) + 1
|
||||
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
||||
|
||||
graph = graph_db.make_graph_model(graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
graph.validate_graph(for_run=False)
|
||||
@@ -842,27 +839,23 @@ async def update_graph(
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
if new_graph_version.is_active:
|
||||
# Keep the library agent up to date with the new active version
|
||||
await _update_library_agent_version_and_settings(user_id, new_graph_version)
|
||||
|
||||
# Handle activation of the new graph first to ensure continuity
|
||||
await library_db.update_library_agent_version_and_settings(
|
||||
user_id, new_graph_version
|
||||
)
|
||||
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
|
||||
# Ensure new version is the only active version
|
||||
await graph_db.set_graph_active_version(
|
||||
graph_id=graph_id, version=new_graph_version.version, user_id=user_id
|
||||
)
|
||||
if current_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||
|
||||
# Fetch new graph version *with sub-graphs* (needed for credentials input schema)
|
||||
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
||||
graph_id,
|
||||
new_graph_version.version,
|
||||
user_id=user_id,
|
||||
include_subgraphs=True,
|
||||
)
|
||||
assert new_graph_version_with_subgraphs # make type checker happy
|
||||
assert new_graph_version_with_subgraphs
|
||||
return new_graph_version_with_subgraphs
|
||||
|
||||
|
||||
@@ -900,33 +893,15 @@ async def set_graph_active_version(
|
||||
)
|
||||
|
||||
# Keep the library agent up to date with the new active version
|
||||
await _update_library_agent_version_and_settings(user_id, new_active_graph)
|
||||
await library_db.update_library_agent_version_and_settings(
|
||||
user_id, new_active_graph
|
||||
)
|
||||
|
||||
if current_active_graph and current_active_graph.version != new_active_version:
|
||||
# Handle deactivation of the previously active version
|
||||
await on_graph_deactivate(current_active_graph, user_id=user_id)
|
||||
|
||||
|
||||
async def _update_library_agent_version_and_settings(
|
||||
user_id: str, agent_graph: graph_db.GraphModel
|
||||
) -> library_model.LibraryAgent:
|
||||
library = await library_db.update_agent_version_in_library(
|
||||
user_id, agent_graph.id, agent_graph.version
|
||||
)
|
||||
updated_settings = GraphSettings.from_graph(
|
||||
graph=agent_graph,
|
||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||
)
|
||||
if updated_settings != library.settings:
|
||||
library = await library_db.update_library_agent(
|
||||
library_agent_id=library.id,
|
||||
user_id=user_id,
|
||||
settings=updated_settings,
|
||||
)
|
||||
return library
|
||||
|
||||
|
||||
@v1_router.patch(
|
||||
path="/graphs/{graph_id}/settings",
|
||||
summary="Update graph settings",
|
||||
|
||||
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""ElevenLabs integration blocks - test credentials and shared utilities."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="elevenlabs",
|
||||
api_key=SecretStr("mock-elevenlabs-api-key"),
|
||||
title="Mock ElevenLabs API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
ElevenLabsCredentials = APIKeyCredentials
|
||||
ElevenLabsCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||
]
|
||||
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""Text encoding block for converting special characters to escape sequences."""
|
||||
|
||||
import codecs
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TextEncoderBlock(Block):
|
||||
"""
|
||||
Encodes a string by converting special characters into escape sequences.
|
||||
|
||||
This block is the inverse of TextDecoderBlock. It takes text containing
|
||||
special characters (like newlines, tabs, etc.) and converts them into
|
||||
their escape sequence representations (e.g., newline becomes \\n).
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
"""Input schema for TextEncoderBlock."""
|
||||
|
||||
text: str = SchemaField(
|
||||
description="A string containing special characters to be encoded",
|
||||
placeholder="Your text with newlines and quotes to encode",
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
"""Output schema for TextEncoderBlock."""
|
||||
|
||||
encoded_text: str = SchemaField(
|
||||
description="The encoded text with special characters converted to escape sequences"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if encoding fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5185f32e-4b65-4ecf-8fbb-873f003f09d6",
|
||||
description="Encodes a string by converting special characters into escape sequences",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=TextEncoderBlock.Input,
|
||||
output_schema=TextEncoderBlock.Output,
|
||||
test_input={
|
||||
"text": """Hello
|
||||
World!
|
||||
This is a "quoted" string."""
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"encoded_text",
|
||||
"""Hello\\nWorld!\\nThis is a "quoted" string.""",
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Encode the input text by converting special characters to escape sequences.
|
||||
|
||||
Args:
|
||||
input_data: The input containing the text to encode.
|
||||
**kwargs: Additional keyword arguments (unused).
|
||||
|
||||
Yields:
|
||||
The encoded text with escape sequences, or an error message if encoding fails.
|
||||
"""
|
||||
try:
|
||||
encoded_text = codecs.encode(input_data.text, "unicode_escape").decode(
|
||||
"utf-8"
|
||||
)
|
||||
yield "encoded_text", encoded_text
|
||||
except Exception as e:
|
||||
yield "error", f"Encoding error: {str(e)}"
|
||||
@@ -162,8 +162,16 @@ class LinearClient:
|
||||
"searchTerm": team_name,
|
||||
}
|
||||
|
||||
team_id = await self.query(query, variables)
|
||||
return team_id["teams"]["nodes"][0]["id"]
|
||||
result = await self.query(query, variables)
|
||||
nodes = result["teams"]["nodes"]
|
||||
|
||||
if not nodes:
|
||||
raise LinearAPIException(
|
||||
f"Team '{team_name}' not found. Check the team name or key and try again.",
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
return nodes[0]["id"]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
@@ -240,17 +248,44 @@ class LinearClient:
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
async def try_search_issues(self, term: str) -> list[Issue]:
|
||||
async def try_search_issues(
|
||||
self,
|
||||
term: str,
|
||||
max_results: int = 10,
|
||||
team_id: str | None = None,
|
||||
) -> list[Issue]:
|
||||
try:
|
||||
query = """
|
||||
query SearchIssues($term: String!, $includeComments: Boolean!) {
|
||||
searchIssues(term: $term, includeComments: $includeComments) {
|
||||
query SearchIssues(
|
||||
$term: String!,
|
||||
$first: Int,
|
||||
$teamId: String
|
||||
) {
|
||||
searchIssues(
|
||||
term: $term,
|
||||
first: $first,
|
||||
teamId: $teamId
|
||||
) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
priority
|
||||
createdAt
|
||||
state {
|
||||
id
|
||||
name
|
||||
type
|
||||
}
|
||||
project {
|
||||
id
|
||||
name
|
||||
}
|
||||
assignee {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -258,7 +293,8 @@ class LinearClient:
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"term": term,
|
||||
"includeComments": True,
|
||||
"first": max_results,
|
||||
"teamId": team_id,
|
||||
}
|
||||
|
||||
issues = await self.query(query, variables)
|
||||
|
||||
@@ -17,7 +17,7 @@ from ._config import (
|
||||
LinearScope,
|
||||
linear,
|
||||
)
|
||||
from .models import CreateIssueResponse, Issue
|
||||
from .models import CreateIssueResponse, Issue, State
|
||||
|
||||
|
||||
class LinearCreateIssueBlock(Block):
|
||||
@@ -135,9 +135,20 @@ class LinearSearchIssuesBlock(Block):
|
||||
description="Linear credentials with read permissions",
|
||||
required_scopes={LinearScope.READ},
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="Maximum number of results to return",
|
||||
default=10,
|
||||
ge=1,
|
||||
le=100,
|
||||
)
|
||||
team_name: str | None = SchemaField(
|
||||
description="Optional team name to filter results (e.g., 'Internal', 'Open Source')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
issues: list[Issue] = SchemaField(description="List of issues")
|
||||
error: str = SchemaField(description="Error message if the search failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -145,8 +156,11 @@ class LinearSearchIssuesBlock(Block):
|
||||
description="Searches for issues on Linear",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"term": "Test issue",
|
||||
"max_results": 10,
|
||||
"team_name": None,
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
@@ -156,10 +170,14 @@ class LinearSearchIssuesBlock(Block):
|
||||
[
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
state=State(
|
||||
id="state1", name="In Progress", type="started"
|
||||
),
|
||||
createdAt="2026-01-15T10:00:00.000Z",
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -168,10 +186,12 @@ class LinearSearchIssuesBlock(Block):
|
||||
"search_issues": lambda *args, **kwargs: [
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
state=State(id="state1", name="In Progress", type="started"),
|
||||
createdAt="2026-01-15T10:00:00.000Z",
|
||||
)
|
||||
]
|
||||
},
|
||||
@@ -181,10 +201,22 @@ class LinearSearchIssuesBlock(Block):
|
||||
async def search_issues(
|
||||
credentials: OAuth2Credentials | APIKeyCredentials,
|
||||
term: str,
|
||||
max_results: int = 10,
|
||||
team_name: str | None = None,
|
||||
) -> list[Issue]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Issue] = await client.try_search_issues(term=term)
|
||||
return response
|
||||
|
||||
# Resolve team name to ID if provided
|
||||
# Raises LinearAPIException with descriptive message if team not found
|
||||
team_id: str | None = None
|
||||
if team_name:
|
||||
team_id = await client.try_get_team_by_name(team_name=team_name)
|
||||
|
||||
return await client.try_search_issues(
|
||||
term=term,
|
||||
max_results=max_results,
|
||||
team_id=team_id,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
@@ -196,7 +228,10 @@ class LinearSearchIssuesBlock(Block):
|
||||
"""Execute the issue search"""
|
||||
try:
|
||||
issues = await self.search_issues(
|
||||
credentials=credentials, term=input_data.term
|
||||
credentials=credentials,
|
||||
term=input_data.term,
|
||||
max_results=input_data.max_results,
|
||||
team_name=input_data.team_name,
|
||||
)
|
||||
yield "issues", issues
|
||||
except LinearAPIException as e:
|
||||
|
||||
@@ -36,12 +36,21 @@ class Project(BaseModel):
|
||||
content: str | None = None
|
||||
|
||||
|
||||
class State(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
type: str | None = (
|
||||
None # Workflow state type (e.g., "triage", "backlog", "started", "completed", "canceled")
|
||||
)
|
||||
|
||||
|
||||
class Issue(BaseModel):
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None
|
||||
priority: int
|
||||
state: State | None = None
|
||||
project: Project | None = None
|
||||
createdAt: str | None = None
|
||||
comments: list[Comment] | None = None
|
||||
|
||||
@@ -115,6 +115,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||
CLAUDE_4_6_OPUS = "claude-opus-4-6"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
@@ -270,6 +271,9 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_4_6_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 128000, "Claude Opus 4.6", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-6
|
||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||
), # claude-opus-4-5-20251101
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Optional
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
media_in: MediaFileType = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = await store_media_file(
|
||||
file=input_data.media_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
assert execution_context.graph_exec_id is not None
|
||||
media_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_media_path
|
||||
)
|
||||
|
||||
# 2) Load the clip
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
yield "duration", clip.duration
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""
|
||||
Block for looping (repeating) a video clip until a given duration or number of loops.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
# Provide EITHER a `duration` or `n_loops` or both. We'll demonstrate `duration`.
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. If omitted, defaults to no looping.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. If omitted, defaults to 1 (no repeat).",
|
||||
default=None,
|
||||
ge=1,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: str = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the input video locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
looped_clip = clip
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = looped_clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = looped_clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_looped_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""
|
||||
Block that adds (attaches) an audio track to an existing video.
|
||||
Optionally scale the volume of the new track.
|
||||
"""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFileType = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
local_audio_path = await store_media_file(
|
||||
file=input_data.audio_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id)
|
||||
video_abspath = os.path.join(abs_temp_dir, local_video_path)
|
||||
audio_abspath = os.path.join(abs_temp_dir, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_audio_attached_{os.path.basename(local_video_path)}"
|
||||
)
|
||||
output_abspath = os.path.join(abs_temp_dir, output_filename)
|
||||
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
@@ -0,0 +1,77 @@
|
||||
import pytest
|
||||
|
||||
from backend.blocks.encoder_block import TextEncoderBlock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_basic():
|
||||
"""Test basic encoding of newlines and special characters."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="Hello\nWorld")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert result[0][1] == "Hello\\nWorld"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_multiple_escapes():
|
||||
"""Test encoding of multiple escape sequences."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(
|
||||
TextEncoderBlock.Input(text="Line1\nLine2\tTabbed\rCarriage")
|
||||
):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert "\\n" in result[0][1]
|
||||
assert "\\t" in result[0][1]
|
||||
assert "\\r" in result[0][1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_unicode():
|
||||
"""Test that unicode characters are handled correctly."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="Hello 世界\n")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
# Unicode characters should be escaped as \uXXXX sequences
|
||||
assert "\\n" in result[0][1]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_empty_string():
|
||||
"""Test encoding of an empty string."""
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
async for output in block.run(TextEncoderBlock.Input(text="")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "encoded_text"
|
||||
assert result[0][1] == ""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_encoder_error_handling():
|
||||
"""Test that encoding errors are handled gracefully."""
|
||||
from unittest.mock import patch
|
||||
|
||||
block = TextEncoderBlock()
|
||||
result = []
|
||||
|
||||
with patch("codecs.encode", side_effect=Exception("Mocked encoding error")):
|
||||
async for output in block.run(TextEncoderBlock.Input(text="test")):
|
||||
result.append(output)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0][0] == "error"
|
||||
assert "Mocked encoding error" in result[0][1]
|
||||
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Video editing blocks for AutoGPT Platform.
|
||||
|
||||
This module provides blocks for:
|
||||
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||
- Clipping/trimming video segments
|
||||
- Concatenating multiple videos
|
||||
- Adding text overlays
|
||||
- Adding AI-generated narration
|
||||
- Getting media duration
|
||||
- Looping videos
|
||||
- Adding audio to videos
|
||||
|
||||
Dependencies:
|
||||
- yt-dlp: For video downloading
|
||||
- moviepy: For video editing operations
|
||||
- elevenlabs: For AI narration (optional)
|
||||
"""
|
||||
|
||||
from backend.blocks.video.add_audio import AddAudioToVideoBlock
|
||||
from backend.blocks.video.clip import VideoClipBlock
|
||||
from backend.blocks.video.concat import VideoConcatBlock
|
||||
from backend.blocks.video.download import VideoDownloadBlock
|
||||
from backend.blocks.video.duration import MediaDurationBlock
|
||||
from backend.blocks.video.loop import LoopVideoBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.blocks.video.text_overlay import VideoTextOverlayBlock
|
||||
|
||||
__all__ = [
|
||||
"AddAudioToVideoBlock",
|
||||
"LoopVideoBlock",
|
||||
"MediaDurationBlock",
|
||||
"VideoClipBlock",
|
||||
"VideoConcatBlock",
|
||||
"VideoDownloadBlock",
|
||||
"VideoNarrationBlock",
|
||||
"VideoTextOverlayBlock",
|
||||
]
|
||||
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Shared utilities for video blocks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Known operation tags added by video blocks
|
||||
_VIDEO_OPS = (
|
||||
r"(?:clip|overlay|narrated|looped|concat|audio_attached|with_audio|narration)"
|
||||
)
|
||||
|
||||
# Matches: {node_exec_id}_{operation}_ where node_exec_id contains a UUID
|
||||
_BLOCK_PREFIX_RE = re.compile(
|
||||
r"^[a-zA-Z0-9_-]*"
|
||||
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
r"[a-zA-Z0-9_-]*"
|
||||
r"_" + _VIDEO_OPS + r"_"
|
||||
)
|
||||
|
||||
# Matches: a lone {node_exec_id}_ prefix (no operation keyword, e.g. download output)
|
||||
_UUID_PREFIX_RE = re.compile(
|
||||
r"^[a-zA-Z0-9_-]*"
|
||||
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
r"[a-zA-Z0-9_-]*_"
|
||||
)
|
||||
|
||||
|
||||
def extract_source_name(input_path: str, max_length: int = 50) -> str:
|
||||
"""Extract the original source filename by stripping block-generated prefixes.
|
||||
|
||||
Iteratively removes {node_exec_id}_{operation}_ prefixes that accumulate
|
||||
when chaining video blocks, recovering the original human-readable name.
|
||||
|
||||
Safe for plain filenames (no UUID -> no stripping).
|
||||
Falls back to "video" if everything is stripped.
|
||||
"""
|
||||
stem = Path(input_path).stem
|
||||
|
||||
# Pass 1: strip {node_exec_id}_{operation}_ prefixes iteratively
|
||||
while _BLOCK_PREFIX_RE.match(stem):
|
||||
stem = _BLOCK_PREFIX_RE.sub("", stem, count=1)
|
||||
|
||||
# Pass 2: strip a lone {node_exec_id}_ prefix (e.g. from download block)
|
||||
if _UUID_PREFIX_RE.match(stem):
|
||||
stem = _UUID_PREFIX_RE.sub("", stem, count=1)
|
||||
|
||||
if not stem:
|
||||
return "video"
|
||||
|
||||
return stem[:max_length]
|
||||
|
||||
|
||||
def get_video_codecs(output_path: str) -> tuple[str, str]:
|
||||
"""Get appropriate video and audio codecs based on output file extension.
|
||||
|
||||
Args:
|
||||
output_path: Path to the output file (used to determine extension)
|
||||
|
||||
Returns:
|
||||
Tuple of (video_codec, audio_codec)
|
||||
|
||||
Codec mappings:
|
||||
- .mp4: H.264 + AAC (universal compatibility)
|
||||
- .webm: VP8 + Vorbis (web streaming)
|
||||
- .mkv: H.264 + AAC (container supports many codecs)
|
||||
- .mov: H.264 + AAC (Apple QuickTime, widely compatible)
|
||||
- .m4v: H.264 + AAC (Apple iTunes/devices)
|
||||
- .avi: MPEG-4 + MP3 (legacy Windows)
|
||||
"""
|
||||
ext = os.path.splitext(output_path)[1].lower()
|
||||
|
||||
codec_map: dict[str, tuple[str, str]] = {
|
||||
".mp4": ("libx264", "aac"),
|
||||
".webm": ("libvpx", "libvorbis"),
|
||||
".mkv": ("libx264", "aac"),
|
||||
".mov": ("libx264", "aac"),
|
||||
".m4v": ("libx264", "aac"),
|
||||
".avi": ("mpeg4", "libmp3lame"),
|
||||
}
|
||||
|
||||
return codec_map.get(ext, ("libx264", "aac"))
|
||||
|
||||
|
||||
def strip_chapters_inplace(video_path: str) -> None:
|
||||
"""Strip chapter metadata from a media file in-place using ffmpeg.
|
||||
|
||||
MoviePy 2.x crashes with IndexError when parsing files with embedded
|
||||
chapter metadata (https://github.com/Zulko/moviepy/issues/2419).
|
||||
This strips chapters without re-encoding.
|
||||
|
||||
Args:
|
||||
video_path: Absolute path to the media file to strip chapters from.
|
||||
"""
|
||||
base, ext = os.path.splitext(video_path)
|
||||
tmp_path = base + ".tmp" + ext
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[
|
||||
"ffmpeg",
|
||||
"-y",
|
||||
"-i",
|
||||
video_path,
|
||||
"-map_chapters",
|
||||
"-1",
|
||||
"-codec",
|
||||
"copy",
|
||||
tmp_path,
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.warning(
|
||||
"ffmpeg chapter strip failed (rc=%d): %s",
|
||||
result.returncode,
|
||||
result.stderr,
|
||||
)
|
||||
return
|
||||
os.replace(tmp_path, video_path)
|
||||
except FileNotFoundError:
|
||||
logger.warning("ffmpeg not found; skipping chapter strip")
|
||||
finally:
|
||||
if os.path.exists(tmp_path):
|
||||
os.unlink(tmp_path)
|
||||
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""AddAudioToVideoBlock - Attach an audio track to a video file."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class AddAudioToVideoBlock(Block):
|
||||
"""Add (attach) an audio track to an existing video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Video input (URL, data URI, or local path)."
|
||||
)
|
||||
audio_in: MediaFileType = SchemaField(
|
||||
description="Audio input (URL, data URI, or local path)."
|
||||
)
|
||||
volume: float = SchemaField(
|
||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Final video (with attached audio), as a path or data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||
description="Block to attach an audio file to a video file using moviepy.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=AddAudioToVideoBlock.Input,
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
local_audio_path = await store_media_file(
|
||||
file=input_data.audio_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
video_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
audio_abspath = get_exec_file_path(graph_exec_id, local_audio_path)
|
||||
|
||||
# 2) Load video + audio with moviepy
|
||||
strip_chapters_inplace(video_abspath)
|
||||
strip_chapters_inplace(audio_abspath)
|
||||
video_clip = None
|
||||
audio_clip = None
|
||||
final_clip = None
|
||||
try:
|
||||
video_clip = VideoFileClip(video_abspath)
|
||||
audio_clip = AudioFileClip(audio_abspath)
|
||||
# Optionally scale volume
|
||||
if input_data.volume != 1.0:
|
||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||
|
||||
# 3) Attach the new audio track
|
||||
final_clip = video_clip.with_audio(audio_clip)
|
||||
|
||||
# 4) Write to output file
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_with_audio_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
final_clip.write_videofile(
|
||||
output_abspath, codec="libx264", audio_codec="aac"
|
||||
)
|
||||
finally:
|
||||
if final_clip:
|
||||
final_clip.close()
|
||||
if audio_clip:
|
||||
audio_clip.close()
|
||||
if video_clip:
|
||||
video_clip.close()
|
||||
|
||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""VideoClipBlock - Extract a segment from a video file."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoClipBlock(Block):
|
||||
"""Extract a time segment from a video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
start_time: float = SchemaField(description="Start time in seconds", ge=0.0)
|
||||
end_time: float = SchemaField(description="End time in seconds", ge=0.0)
|
||||
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||
description="Output format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Clipped video file (path or data URI)"
|
||||
)
|
||||
duration: float = SchemaField(description="Clip duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8f539119-e580-4d86-ad41-86fbcb22abb1",
|
||||
description="Extract a time segment from a video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"start_time": 0.0,
|
||||
"end_time": 10.0,
|
||||
},
|
||||
test_output=[("video_out", str), ("duration", float)],
|
||||
test_mock={
|
||||
"_clip_video": lambda *args: 10.0,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "clip_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _clip_video(
|
||||
self,
|
||||
video_abspath: str,
|
||||
output_abspath: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
) -> float:
|
||||
"""Extract a clip from a video. Extracted for testability."""
|
||||
clip = None
|
||||
subclip = None
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
clip = VideoFileClip(video_abspath)
|
||||
subclip = clip.subclipped(start_time, end_time)
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
subclip.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
return subclip.duration
|
||||
finally:
|
||||
if subclip:
|
||||
subclip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate time range
|
||||
if input_data.end_time <= input_data.start_time:
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_clip_{source}.{input_data.output_format}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
duration = self._clip_video(
|
||||
video_abspath,
|
||||
output_abspath,
|
||||
input_data.start_time,
|
||||
input_data.end_time,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "duration", duration
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to clip video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""VideoConcatBlock - Concatenate multiple video clips into one."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy import concatenate_videoclips
|
||||
from moviepy.video.fx import CrossFadeIn, CrossFadeOut, FadeIn, FadeOut
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoConcatBlock(Block):
|
||||
"""Merge multiple video clips into one continuous video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
videos: list[MediaFileType] = SchemaField(
|
||||
description="List of video files to concatenate (in order)"
|
||||
)
|
||||
transition: Literal["none", "crossfade", "fade_black"] = SchemaField(
|
||||
description="Transition between clips", default="none"
|
||||
)
|
||||
transition_duration: int = SchemaField(
|
||||
description="Transition duration in seconds",
|
||||
default=1,
|
||||
ge=0,
|
||||
advanced=True,
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||
description="Output format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Concatenated video file (path or data URI)"
|
||||
)
|
||||
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9b0f531a-1118-487f-aeec-3fa63ea8900a",
|
||||
description="Merge multiple video clips into one continuous video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"videos": ["/tmp/a.mp4", "/tmp/b.mp4"],
|
||||
},
|
||||
test_output=[
|
||||
("video_out", str),
|
||||
("total_duration", float),
|
||||
],
|
||||
test_mock={
|
||||
"_concat_videos": lambda *args: 20.0,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "concat_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _concat_videos(
|
||||
self,
|
||||
video_abspaths: list[str],
|
||||
output_abspath: str,
|
||||
transition: str,
|
||||
transition_duration: int,
|
||||
) -> float:
|
||||
"""Concatenate videos. Extracted for testability.
|
||||
|
||||
Returns:
|
||||
Total duration of the concatenated video.
|
||||
"""
|
||||
clips = []
|
||||
faded_clips = []
|
||||
final = None
|
||||
try:
|
||||
# Load clips
|
||||
for v in video_abspaths:
|
||||
strip_chapters_inplace(v)
|
||||
clips.append(VideoFileClip(v))
|
||||
|
||||
# Validate transition_duration against shortest clip
|
||||
if transition in {"crossfade", "fade_black"} and transition_duration > 0:
|
||||
min_duration = min(c.duration for c in clips)
|
||||
if transition_duration >= min_duration:
|
||||
raise BlockExecutionError(
|
||||
message=(
|
||||
f"transition_duration ({transition_duration}s) must be "
|
||||
f"shorter than the shortest clip ({min_duration:.2f}s)"
|
||||
),
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
if transition == "crossfade":
|
||||
for i, clip in enumerate(clips):
|
||||
effects = []
|
||||
if i > 0:
|
||||
effects.append(CrossFadeIn(transition_duration))
|
||||
if i < len(clips) - 1:
|
||||
effects.append(CrossFadeOut(transition_duration))
|
||||
if effects:
|
||||
clip = clip.with_effects(effects)
|
||||
faded_clips.append(clip)
|
||||
final = concatenate_videoclips(
|
||||
faded_clips,
|
||||
method="compose",
|
||||
padding=-transition_duration,
|
||||
)
|
||||
elif transition == "fade_black":
|
||||
for clip in clips:
|
||||
faded = clip.with_effects(
|
||||
[FadeIn(transition_duration), FadeOut(transition_duration)]
|
||||
)
|
||||
faded_clips.append(faded)
|
||||
final = concatenate_videoclips(faded_clips)
|
||||
else:
|
||||
final = concatenate_videoclips(clips)
|
||||
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
return final.duration
|
||||
finally:
|
||||
if final:
|
||||
final.close()
|
||||
for clip in faded_clips:
|
||||
clip.close()
|
||||
for clip in clips:
|
||||
clip.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate minimum clips
|
||||
if len(input_data.videos) < 2:
|
||||
raise BlockExecutionError(
|
||||
message="At least 2 videos are required for concatenation",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store all input videos locally
|
||||
video_abspaths = []
|
||||
for video in input_data.videos:
|
||||
local_path = await self._store_input_video(execution_context, video)
|
||||
video_abspaths.append(
|
||||
get_exec_file_path(execution_context.graph_exec_id, local_path)
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = (
|
||||
extract_source_name(video_abspaths[0]) if video_abspaths else "video"
|
||||
)
|
||||
output_filename = MediaFileType(
|
||||
f"{node_exec_id}_concat_{source}.{input_data.output_format}"
|
||||
)
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
total_duration = self._concat_videos(
|
||||
video_abspaths,
|
||||
output_abspath,
|
||||
input_data.transition,
|
||||
input_data.transition_duration,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "total_duration", total_duration
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to concatenate videos: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)."""
|
||||
|
||||
import os
|
||||
import typing
|
||||
from typing import Literal
|
||||
|
||||
import yt_dlp
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from yt_dlp import _Params
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoDownloadBlock(Block):
|
||||
"""Download video from URL using yt-dlp."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
url: str = SchemaField(
|
||||
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||
placeholder="https://www.youtube.com/watch?v=...",
|
||||
)
|
||||
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||
description="Video quality preference", default="720p"
|
||||
)
|
||||
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||
description="Output video format", default="mp4", advanced=True
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_file: MediaFileType = SchemaField(
|
||||
description="Downloaded video (path or data URI)"
|
||||
)
|
||||
duration: float = SchemaField(description="Video duration in seconds")
|
||||
title: str = SchemaField(description="Video title from source")
|
||||
source_url: str = SchemaField(description="Original source URL")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c35daabb-cd60-493b-b9ad-51f1fe4b50c4",
|
||||
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=True, # Disable until we can sandbox yt-dlp and handle security implications
|
||||
test_input={
|
||||
"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
|
||||
"quality": "480p",
|
||||
},
|
||||
test_output=[
|
||||
("video_file", str),
|
||||
("duration", float),
|
||||
("title", str),
|
||||
("source_url", str),
|
||||
],
|
||||
test_mock={
|
||||
"_download_video": lambda *args: (
|
||||
"video.mp4",
|
||||
212.0,
|
||||
"Test Video",
|
||||
),
|
||||
"_store_output_video": lambda *args, **kwargs: "video.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _get_format_string(self, quality: str) -> str:
|
||||
formats = {
|
||||
"best": "bestvideo+bestaudio/best",
|
||||
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||
"audio_only": "bestaudio/best",
|
||||
}
|
||||
return formats.get(quality, formats["720p"])
|
||||
|
||||
def _download_video(
|
||||
self,
|
||||
url: str,
|
||||
quality: str,
|
||||
output_format: str,
|
||||
output_dir: str,
|
||||
node_exec_id: str,
|
||||
) -> tuple[str, float, str]:
|
||||
"""Download video. Extracted for testability."""
|
||||
output_template = os.path.join(
|
||||
output_dir, f"{node_exec_id}_%(title).50s.%(ext)s"
|
||||
)
|
||||
|
||||
ydl_opts: "_Params" = {
|
||||
"format": f"{self._get_format_string(quality)}/best",
|
||||
"outtmpl": output_template,
|
||||
"merge_output_format": output_format,
|
||||
"quiet": True,
|
||||
"no_warnings": True,
|
||||
}
|
||||
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||
info = ydl.extract_info(url, download=True)
|
||||
video_path = ydl.prepare_filename(info)
|
||||
|
||||
# Handle format conversion in filename
|
||||
if not video_path.endswith(f".{output_format}"):
|
||||
video_path = video_path.rsplit(".", 1)[0] + f".{output_format}"
|
||||
|
||||
# Return just the filename, not the full path
|
||||
filename = os.path.basename(video_path)
|
||||
|
||||
return (
|
||||
filename,
|
||||
info.get("duration") or 0.0,
|
||||
info.get("title") or "Unknown",
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Get the exec file directory
|
||||
output_dir = get_exec_file_path(execution_context.graph_exec_id, "")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
filename, duration, title = self._download_video(
|
||||
input_data.url,
|
||||
input_data.quality,
|
||||
input_data.output_format,
|
||||
output_dir,
|
||||
node_exec_id,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, MediaFileType(filename)
|
||||
)
|
||||
|
||||
yield "video_file", video_out
|
||||
yield "duration", duration
|
||||
yield "title", title
|
||||
yield "source_url", input_data.url
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to download video: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""MediaDurationBlock - Get the duration of a media file."""
|
||||
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class MediaDurationBlock(Block):
|
||||
"""Get the duration of a media file (video or audio)."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
media_in: MediaFileType = SchemaField(
|
||||
description="Media input (URL, data URI, or local path)."
|
||||
)
|
||||
is_video: bool = SchemaField(
|
||||
description="Whether the media is a video (True) or audio (False).",
|
||||
default=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
duration: float = SchemaField(
|
||||
description="Duration of the media file (in seconds)."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||
description="Block to get the duration of a media file.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=MediaDurationBlock.Input,
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = await store_media_file(
|
||||
file=input_data.media_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
assert execution_context.graph_exec_id is not None
|
||||
media_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_media_path
|
||||
)
|
||||
|
||||
# 2) Strip chapters to avoid MoviePy crash, then load the clip
|
||||
strip_chapters_inplace(media_abspath)
|
||||
clip = None
|
||||
try:
|
||||
if input_data.is_video:
|
||||
clip = VideoFileClip(media_abspath)
|
||||
else:
|
||||
clip = AudioFileClip(media_abspath)
|
||||
|
||||
duration = clip.duration
|
||||
finally:
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
yield "duration", duration
|
||||
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""LoopVideoBlock - Loop a video to a given duration or number of repeats."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from moviepy.video.fx.Loop import Loop
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class LoopVideoBlock(Block):
|
||||
"""Loop (repeat) a video clip until a given duration or number of loops."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="The input video (can be a URL, data URI, or local path)."
|
||||
)
|
||||
duration: Optional[float] = SchemaField(
|
||||
description="Target duration (in seconds) to loop the video to. Either duration or n_loops must be provided.",
|
||||
default=None,
|
||||
ge=0.0,
|
||||
le=3600.0, # Max 1 hour to prevent disk exhaustion
|
||||
)
|
||||
n_loops: Optional[int] = SchemaField(
|
||||
description="Number of times to repeat the video. Either n_loops or duration must be provided.",
|
||||
default=None,
|
||||
ge=1,
|
||||
le=10, # Max 10 loops to prevent disk exhaustion
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Looped video returned either as a relative path or a data URI."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||
description="Block to loop a video to a given duration or number of repeats.",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=LoopVideoBlock.Input,
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
assert execution_context.node_exec_id is not None
|
||||
graph_exec_id = execution_context.graph_exec_id
|
||||
node_exec_id = execution_context.node_exec_id
|
||||
|
||||
# 1) Store the input video locally
|
||||
local_video_path = await store_media_file(
|
||||
file=input_data.video_in,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||
|
||||
# 2) Load the clip
|
||||
strip_chapters_inplace(input_abspath)
|
||||
clip = None
|
||||
looped_clip = None
|
||||
try:
|
||||
clip = VideoFileClip(input_abspath)
|
||||
|
||||
# 3) Apply the loop effect
|
||||
if input_data.duration:
|
||||
# Loop until we reach the specified duration
|
||||
looped_clip = clip.with_effects([Loop(duration=input_data.duration)])
|
||||
elif input_data.n_loops:
|
||||
looped_clip = clip.with_effects([Loop(n=input_data.n_loops)])
|
||||
else:
|
||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||
|
||||
assert isinstance(looped_clip, VideoFileClip)
|
||||
|
||||
# 4) Save the looped output
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_looped_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||
|
||||
looped_clip = looped_clip.with_audio(clip.audio)
|
||||
looped_clip.write_videofile(
|
||||
output_abspath, codec="libx264", audio_codec="aac"
|
||||
)
|
||||
finally:
|
||||
if looped_clip:
|
||||
looped_clip.close()
|
||||
if clip:
|
||||
clip.close()
|
||||
|
||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||
video_out = await store_media_file(
|
||||
file=output_filename,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
@@ -0,0 +1,267 @@
|
||||
"""VideoNarrationBlock - Generate AI voice narration and add to video."""
|
||||
|
||||
import os
|
||||
from typing import Literal
|
||||
|
||||
from elevenlabs import ElevenLabs
|
||||
from moviepy import CompositeAudioClip
|
||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.elevenlabs._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ElevenLabsCredentials,
|
||||
ElevenLabsCredentialsInput,
|
||||
)
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsField, SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoNarrationBlock(Block):
|
||||
"""Generate AI narration and add to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: ElevenLabsCredentialsInput = CredentialsField(
|
||||
description="ElevenLabs API key for voice synthesis"
|
||||
)
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
script: str = SchemaField(description="Narration script text")
|
||||
voice_id: str = SchemaField(
|
||||
description="ElevenLabs voice ID", default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||
)
|
||||
model_id: Literal[
|
||||
"eleven_multilingual_v2",
|
||||
"eleven_flash_v2_5",
|
||||
"eleven_turbo_v2_5",
|
||||
"eleven_turbo_v2",
|
||||
] = SchemaField(
|
||||
description="ElevenLabs TTS model",
|
||||
default="eleven_multilingual_v2",
|
||||
)
|
||||
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||
description="How to combine with original audio. 'ducking' applies stronger attenuation than 'mix'.",
|
||||
default="ducking",
|
||||
)
|
||||
narration_volume: float = SchemaField(
|
||||
description="Narration volume (0.0 to 2.0)",
|
||||
default=1.0,
|
||||
ge=0.0,
|
||||
le=2.0,
|
||||
advanced=True,
|
||||
)
|
||||
original_volume: float = SchemaField(
|
||||
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Video with narration (path or data URI)"
|
||||
)
|
||||
audio_file: MediaFileType = SchemaField(
|
||||
description="Generated audio file (path or data URI)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d036b53-859c-4b17-9826-ca340f736e0e",
|
||||
description="Generate AI narration and add to video",
|
||||
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"video_in": "/tmp/test.mp4",
|
||||
"script": "Hello world",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("video_out", str), ("audio_file", str)],
|
||||
test_mock={
|
||||
"_generate_narration_audio": lambda *args: b"mock audio content",
|
||||
"_add_narration_to_video": lambda *args: None,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "narrated_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _generate_narration_audio(
|
||||
self, api_key: str, script: str, voice_id: str, model_id: str
|
||||
) -> bytes:
|
||||
"""Generate narration audio via ElevenLabs API."""
|
||||
client = ElevenLabs(api_key=api_key)
|
||||
audio_generator = client.text_to_speech.convert(
|
||||
voice_id=voice_id,
|
||||
text=script,
|
||||
model_id=model_id,
|
||||
)
|
||||
# The SDK returns a generator, collect all chunks
|
||||
return b"".join(audio_generator)
|
||||
|
||||
def _add_narration_to_video(
|
||||
self,
|
||||
video_abspath: str,
|
||||
audio_abspath: str,
|
||||
output_abspath: str,
|
||||
mix_mode: str,
|
||||
narration_volume: float,
|
||||
original_volume: float,
|
||||
) -> None:
|
||||
"""Add narration audio to video. Extracted for testability."""
|
||||
video = None
|
||||
final = None
|
||||
narration_original = None
|
||||
narration_scaled = None
|
||||
original = None
|
||||
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
video = VideoFileClip(video_abspath)
|
||||
narration_original = AudioFileClip(audio_abspath)
|
||||
narration_scaled = narration_original.with_volume_scaled(narration_volume)
|
||||
narration = narration_scaled
|
||||
|
||||
if mix_mode == "replace":
|
||||
final_audio = narration
|
||||
elif mix_mode == "mix":
|
||||
if video.audio:
|
||||
original = video.audio.with_volume_scaled(original_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
else: # ducking - apply stronger attenuation
|
||||
if video.audio:
|
||||
# Ducking uses a much lower volume for original audio
|
||||
ducking_volume = original_volume * 0.3
|
||||
original = video.audio.with_volume_scaled(ducking_volume)
|
||||
final_audio = CompositeAudioClip([original, narration])
|
||||
else:
|
||||
final_audio = narration
|
||||
|
||||
final = video.with_audio(final_audio)
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
finally:
|
||||
if original:
|
||||
original.close()
|
||||
if narration_scaled:
|
||||
narration_scaled.close()
|
||||
if narration_original:
|
||||
narration_original.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: ElevenLabsCredentials,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Generate narration audio via ElevenLabs
|
||||
audio_content = self._generate_narration_audio(
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.script,
|
||||
input_data.voice_id,
|
||||
input_data.model_id,
|
||||
)
|
||||
|
||||
# Save audio to exec file path
|
||||
audio_filename = MediaFileType(f"{node_exec_id}_narration.mp3")
|
||||
audio_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, audio_filename
|
||||
)
|
||||
os.makedirs(os.path.dirname(audio_abspath), exist_ok=True)
|
||||
with open(audio_abspath, "wb") as f:
|
||||
f.write(audio_content)
|
||||
|
||||
# Add narration to video
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_narrated_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
self._add_narration_to_video(
|
||||
video_abspath,
|
||||
audio_abspath,
|
||||
output_abspath,
|
||||
input_data.mix_mode,
|
||||
input_data.narration_volume,
|
||||
input_data.original_volume,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
audio_out = await self._store_output_video(
|
||||
execution_context, audio_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
yield "audio_file", audio_out
|
||||
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add narration: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""VideoTextOverlayBlock - Add text overlay to video."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from moviepy import CompositeVideoClip, TextClip
|
||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||
|
||||
from backend.blocks.video._utils import (
|
||||
extract_source_name,
|
||||
get_video_codecs,
|
||||
strip_chapters_inplace,
|
||||
)
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.exceptions import BlockExecutionError
|
||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||
|
||||
|
||||
class VideoTextOverlayBlock(Block):
|
||||
"""Add text overlay/caption to video."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
video_in: MediaFileType = SchemaField(
|
||||
description="Input video (URL, data URI, or local path)"
|
||||
)
|
||||
text: str = SchemaField(description="Text to overlay on video")
|
||||
position: Literal[
|
||||
"top",
|
||||
"center",
|
||||
"bottom",
|
||||
"top-left",
|
||||
"top-right",
|
||||
"bottom-left",
|
||||
"bottom-right",
|
||||
] = SchemaField(description="Position of text on screen", default="bottom")
|
||||
start_time: float | None = SchemaField(
|
||||
description="When to show text (seconds). None = entire video",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
end_time: float | None = SchemaField(
|
||||
description="When to hide text (seconds). None = until end",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
font_size: int = SchemaField(
|
||||
description="Font size", default=48, ge=12, le=200, advanced=True
|
||||
)
|
||||
font_color: str = SchemaField(
|
||||
description="Font color (hex or name)", default="white", advanced=True
|
||||
)
|
||||
bg_color: str | None = SchemaField(
|
||||
description="Background color behind text (None for transparent)",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
video_out: MediaFileType = SchemaField(
|
||||
description="Video with text overlay (path or data URI)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8ef14de6-cc90-430a-8cfa-3a003be92454",
|
||||
description="Add text overlay/caption to video",
|
||||
categories={BlockCategory.MULTIMEDIA},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
disabled=True, # Disable until we can lockdown imagemagick security policy
|
||||
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||
test_output=[("video_out", str)],
|
||||
test_mock={
|
||||
"_add_text_overlay": lambda *args: None,
|
||||
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||
"_store_output_video": lambda *args, **kwargs: "overlay_test.mp4",
|
||||
},
|
||||
)
|
||||
|
||||
async def _store_input_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store input video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_local_processing",
|
||||
)
|
||||
|
||||
async def _store_output_video(
|
||||
self, execution_context: ExecutionContext, file: MediaFileType
|
||||
) -> MediaFileType:
|
||||
"""Store output video. Extracted for testability."""
|
||||
return await store_media_file(
|
||||
file=file,
|
||||
execution_context=execution_context,
|
||||
return_format="for_block_output",
|
||||
)
|
||||
|
||||
def _add_text_overlay(
|
||||
self,
|
||||
video_abspath: str,
|
||||
output_abspath: str,
|
||||
text: str,
|
||||
position: str,
|
||||
start_time: float | None,
|
||||
end_time: float | None,
|
||||
font_size: int,
|
||||
font_color: str,
|
||||
bg_color: str | None,
|
||||
) -> None:
|
||||
"""Add text overlay to video. Extracted for testability."""
|
||||
video = None
|
||||
final = None
|
||||
txt_clip = None
|
||||
try:
|
||||
strip_chapters_inplace(video_abspath)
|
||||
video = VideoFileClip(video_abspath)
|
||||
|
||||
txt_clip = TextClip(
|
||||
text=text,
|
||||
font_size=font_size,
|
||||
color=font_color,
|
||||
bg_color=bg_color,
|
||||
)
|
||||
|
||||
# Position mapping
|
||||
pos_map = {
|
||||
"top": ("center", "top"),
|
||||
"center": ("center", "center"),
|
||||
"bottom": ("center", "bottom"),
|
||||
"top-left": ("left", "top"),
|
||||
"top-right": ("right", "top"),
|
||||
"bottom-left": ("left", "bottom"),
|
||||
"bottom-right": ("right", "bottom"),
|
||||
}
|
||||
|
||||
txt_clip = txt_clip.with_position(pos_map[position])
|
||||
|
||||
# Set timing
|
||||
start = start_time or 0
|
||||
end = end_time or video.duration
|
||||
duration = max(0, end - start)
|
||||
txt_clip = txt_clip.with_start(start).with_end(end).with_duration(duration)
|
||||
|
||||
final = CompositeVideoClip([video, txt_clip])
|
||||
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||
final.write_videofile(
|
||||
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||
)
|
||||
|
||||
finally:
|
||||
if txt_clip:
|
||||
txt_clip.close()
|
||||
if final:
|
||||
final.close()
|
||||
if video:
|
||||
video.close()
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
execution_context: ExecutionContext,
|
||||
node_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# Validate time range if both are provided
|
||||
if (
|
||||
input_data.start_time is not None
|
||||
and input_data.end_time is not None
|
||||
and input_data.end_time <= input_data.start_time
|
||||
):
|
||||
raise BlockExecutionError(
|
||||
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
)
|
||||
|
||||
try:
|
||||
assert execution_context.graph_exec_id is not None
|
||||
|
||||
# Store the input video locally
|
||||
local_video_path = await self._store_input_video(
|
||||
execution_context, input_data.video_in
|
||||
)
|
||||
video_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, local_video_path
|
||||
)
|
||||
|
||||
# Build output path
|
||||
source = extract_source_name(local_video_path)
|
||||
output_filename = MediaFileType(f"{node_exec_id}_overlay_{source}.mp4")
|
||||
output_abspath = get_exec_file_path(
|
||||
execution_context.graph_exec_id, output_filename
|
||||
)
|
||||
|
||||
self._add_text_overlay(
|
||||
video_abspath,
|
||||
output_abspath,
|
||||
input_data.text,
|
||||
input_data.position,
|
||||
input_data.start_time,
|
||||
input_data.end_time,
|
||||
input_data.font_size,
|
||||
input_data.font_color,
|
||||
input_data.bg_color,
|
||||
)
|
||||
|
||||
# Return as workspace path or data URI based on context
|
||||
video_out = await self._store_output_video(
|
||||
execution_context, output_filename
|
||||
)
|
||||
|
||||
yield "video_out", video_out
|
||||
|
||||
except BlockExecutionError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise BlockExecutionError(
|
||||
message=f"Failed to add text overlay: {e}",
|
||||
block_name=self.name,
|
||||
block_id=str(self.id),
|
||||
) from e
|
||||
@@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
credentials: WebshareProxyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
yield "video_id", video_id
|
||||
try:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
transcript = self.get_transcript(video_id, credentials)
|
||||
transcript_text = self.format_transcript(transcript=transcript)
|
||||
|
||||
yield "transcript", transcript_text
|
||||
# Only yield after all operations succeed
|
||||
yield "video_id", video_id
|
||||
yield "transcript", transcript_text
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -246,7 +246,9 @@ class BlockSchema(BaseModel):
|
||||
f"is not of type {CredentialsMetaInput.__name__}"
|
||||
)
|
||||
|
||||
credentials_fields[field_name].validate_credentials_field_schema(cls)
|
||||
CredentialsMetaInput.validate_credentials_field_schema(
|
||||
cls.get_field_schema(field_name), field_name
|
||||
)
|
||||
|
||||
elif field_name in credentials_fields:
|
||||
raise KeyError(
|
||||
@@ -317,6 +319,8 @@ class BlockSchema(BaseModel):
|
||||
"credentials_provider": [config.get("provider", "google")],
|
||||
"credentials_types": [config.get("type", "oauth2")],
|
||||
"credentials_scopes": config.get("scopes"),
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": info["field_name"],
|
||||
}
|
||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||
auto_schema, by_alias=True
|
||||
|
||||
@@ -36,12 +36,14 @@ from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.blocks.video.narration import VideoNarrationBlock
|
||||
from backend.data.block import Block, BlockCost, BlockCostType
|
||||
from backend.integrations.credentials_store import (
|
||||
aiml_api_credentials,
|
||||
anthropic_credentials,
|
||||
apollo_credentials,
|
||||
did_credentials,
|
||||
elevenlabs_credentials,
|
||||
enrichlayer_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
@@ -78,6 +80,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_OPUS: 21,
|
||||
LlmModel.CLAUDE_4_SONNET: 5,
|
||||
LlmModel.CLAUDE_4_6_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||
@@ -639,4 +642,16 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
},
|
||||
),
|
||||
],
|
||||
VideoNarrationBlock: [
|
||||
BlockCost(
|
||||
cost_amount=5, # ElevenLabs TTS cost
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": elevenlabs_credentials.id,
|
||||
"provider": elevenlabs_credentials.provider,
|
||||
"type": elevenlabs_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
}
|
||||
|
||||
@@ -134,6 +134,16 @@ async def test_block_credit_reset(server: SpinTestServer):
|
||||
month1 = datetime.now(timezone.utc).replace(month=1, day=1)
|
||||
user_credit.time_now = lambda: month1
|
||||
|
||||
# IMPORTANT: Set updatedAt to December of previous year to ensure it's
|
||||
# in a different month than month1 (January). This fixes a timing bug
|
||||
# where if the test runs in early February, 35 days ago would be January,
|
||||
# matching the mocked month1 and preventing the refill from triggering.
|
||||
dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15)
|
||||
await UserBalance.prisma().update(
|
||||
where={"userId": DEFAULT_USER_ID},
|
||||
data={"updatedAt": dec_previous_year},
|
||||
)
|
||||
|
||||
# First call in month 1 should trigger refill
|
||||
balance = await user_credit.get_credits(DEFAULT_USER_ID)
|
||||
assert balance == REFILL_VALUE # Should get 1000 credits
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
|
||||
|
||||
from prisma.enums import SubmissionStatus
|
||||
from prisma.models import (
|
||||
@@ -20,7 +20,7 @@ from prisma.types import (
|
||||
AgentNodeLinkCreateInput,
|
||||
StoreListingVersionWhereInput,
|
||||
)
|
||||
from pydantic import BaseModel, BeforeValidator, Field, create_model
|
||||
from pydantic import BaseModel, BeforeValidator, Field
|
||||
from pydantic.fields import computed_field
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
@@ -30,7 +30,6 @@ from backend.data.db import prisma as db
|
||||
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
|
||||
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsFieldInfo,
|
||||
CredentialsMetaInput,
|
||||
is_credentials_field_name,
|
||||
@@ -45,7 +44,6 @@ from .block import (
|
||||
AnyBlockSchema,
|
||||
Block,
|
||||
BlockInput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
EmptySchema,
|
||||
get_block,
|
||||
@@ -113,10 +111,12 @@ class Link(BaseDbModel):
|
||||
|
||||
class Node(BaseDbModel):
|
||||
block_id: str
|
||||
input_default: BlockInput = {} # dict[input_name, default_value]
|
||||
metadata: dict[str, Any] = {}
|
||||
input_links: list[Link] = []
|
||||
output_links: list[Link] = []
|
||||
input_default: BlockInput = Field( # dict[input_name, default_value]
|
||||
default_factory=dict
|
||||
)
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
input_links: list[Link] = Field(default_factory=list)
|
||||
output_links: list[Link] = Field(default_factory=list)
|
||||
|
||||
@property
|
||||
def credentials_optional(self) -> bool:
|
||||
@@ -221,18 +221,33 @@ class NodeModel(Node):
|
||||
return result
|
||||
|
||||
|
||||
class BaseGraph(BaseDbModel):
|
||||
class GraphBaseMeta(BaseDbModel):
|
||||
"""
|
||||
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
|
||||
"""
|
||||
|
||||
version: int = 1
|
||||
is_active: bool = True
|
||||
name: str
|
||||
description: str
|
||||
instructions: str | None = None
|
||||
recommended_schedule_cron: str | None = None
|
||||
nodes: list[Node] = []
|
||||
links: list[Link] = []
|
||||
forked_from_id: str | None = None
|
||||
forked_from_version: int | None = None
|
||||
|
||||
|
||||
class BaseGraph(GraphBaseMeta):
|
||||
"""
|
||||
Graph with nodes, links, and computed I/O schema fields.
|
||||
|
||||
Used to represent sub-graphs within a `Graph`. Contains the full graph
|
||||
structure including nodes and links, plus computed fields for schemas
|
||||
and trigger info. Does NOT include user_id or created_at (see GraphModel).
|
||||
"""
|
||||
|
||||
nodes: list[Node] = Field(default_factory=list)
|
||||
links: list[Link] = Field(default_factory=list)
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def input_schema(self) -> dict[str, Any]:
|
||||
@@ -361,44 +376,78 @@ class GraphTriggerInfo(BaseModel):
|
||||
|
||||
|
||||
class Graph(BaseGraph):
|
||||
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs
|
||||
"""Creatable graph model used in API create/update endpoints."""
|
||||
|
||||
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
|
||||
|
||||
|
||||
class GraphMeta(GraphBaseMeta):
|
||||
"""
|
||||
Lightweight graph metadata model representing an existing graph from the database,
|
||||
for use in listings and summaries.
|
||||
|
||||
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
|
||||
Use for list endpoints where full graph data is not needed and performance matters.
|
||||
"""
|
||||
|
||||
id: str # type: ignore
|
||||
version: int # type: ignore
|
||||
user_id: str
|
||||
created_at: datetime
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, graph: "AgentGraph") -> Self:
|
||||
return cls(
|
||||
id=graph.id,
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
instructions=graph.instructions,
|
||||
recommended_schedule_cron=graph.recommendedScheduleCron,
|
||||
forked_from_id=graph.forkedFromId,
|
||||
forked_from_version=graph.forkedFromVersion,
|
||||
user_id=graph.userId,
|
||||
created_at=graph.createdAt,
|
||||
)
|
||||
|
||||
|
||||
class GraphModel(Graph, GraphMeta):
|
||||
"""
|
||||
Full graph model representing an existing graph from the database.
|
||||
|
||||
This is the primary model for working with persisted graphs. Includes all
|
||||
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
|
||||
Provides computed fields (input_schema, output_schema, etc.) used during
|
||||
set-up (frontend) and execution (backend).
|
||||
|
||||
Inherits from:
|
||||
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
|
||||
- `GraphMeta`: provides user_id, created_at for database records
|
||||
"""
|
||||
|
||||
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
|
||||
|
||||
@property
|
||||
def starting_nodes(self) -> list[NodeModel]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
|
||||
}
|
||||
return [
|
||||
node
|
||||
for node in self.nodes
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
@property
|
||||
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
||||
return cast(NodeModel, super().webhook_input_node)
|
||||
|
||||
@computed_field
|
||||
@property
|
||||
def credentials_input_schema(self) -> dict[str, Any]:
|
||||
schema = self._credentials_input_schema.jsonschema()
|
||||
|
||||
# Determine which credential fields are required based on credentials_optional metadata
|
||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
||||
required_fields = []
|
||||
|
||||
# Build a map of node_id -> node for quick lookup
|
||||
all_nodes = {node.id: node for node in self.nodes}
|
||||
for sub_graph in self.sub_graphs:
|
||||
for node in sub_graph.nodes:
|
||||
all_nodes[node.id] = node
|
||||
|
||||
for field_key, (
|
||||
_field_info,
|
||||
node_field_pairs,
|
||||
) in graph_credentials_inputs.items():
|
||||
# A field is required if ANY node using it has credentials_optional=False
|
||||
is_required = False
|
||||
for node_id, _field_name in node_field_pairs:
|
||||
node = all_nodes.get(node_id)
|
||||
if node and not node.credentials_optional:
|
||||
is_required = True
|
||||
break
|
||||
|
||||
if is_required:
|
||||
required_fields.append(field_key)
|
||||
|
||||
schema["required"] = required_fields
|
||||
return schema
|
||||
|
||||
@property
|
||||
def _credentials_input_schema(self) -> type[BlockSchema]:
|
||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
||||
graph_credentials_inputs = self.regular_credentials_inputs
|
||||
logger.debug(
|
||||
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
|
||||
f"{graph_credentials_inputs}"
|
||||
@@ -406,8 +455,8 @@ class Graph(BaseGraph):
|
||||
|
||||
# Warn if same-provider credentials inputs can't be combined (= bad UX)
|
||||
graph_cred_fields = list(graph_credentials_inputs.values())
|
||||
for i, (field, keys) in enumerate(graph_cred_fields):
|
||||
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]:
|
||||
for i, (field, keys, _) in enumerate(graph_cred_fields):
|
||||
for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
|
||||
if field.provider != other_field.provider:
|
||||
continue
|
||||
if ProviderName.HTTP in field.provider:
|
||||
@@ -423,31 +472,78 @@ class Graph(BaseGraph):
|
||||
f"keys: {keys} <> {other_keys}."
|
||||
)
|
||||
|
||||
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = {
|
||||
agg_field_key: (
|
||||
CredentialsMetaInput[
|
||||
Literal[tuple(field_info.provider)], # type: ignore
|
||||
Literal[tuple(field_info.supported_types)], # type: ignore
|
||||
],
|
||||
CredentialsField(
|
||||
required_scopes=set(field_info.required_scopes or []),
|
||||
discriminator=field_info.discriminator,
|
||||
discriminator_mapping=field_info.discriminator_mapping,
|
||||
discriminator_values=field_info.discriminator_values,
|
||||
),
|
||||
)
|
||||
for agg_field_key, (field_info, _) in graph_credentials_inputs.items()
|
||||
}
|
||||
# Build JSON schema directly to avoid expensive create_model + validation overhead
|
||||
properties = {}
|
||||
required_fields = []
|
||||
|
||||
return create_model(
|
||||
self.name.replace(" ", "") + "CredentialsInputSchema",
|
||||
__base__=BlockSchema,
|
||||
**fields, # type: ignore
|
||||
)
|
||||
for agg_field_key, (
|
||||
field_info,
|
||||
_,
|
||||
is_required,
|
||||
) in graph_credentials_inputs.items():
|
||||
providers = list(field_info.provider)
|
||||
cred_types = list(field_info.supported_types)
|
||||
|
||||
field_schema: dict[str, Any] = {
|
||||
"credentials_provider": providers,
|
||||
"credentials_types": cred_types,
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {"title": "Id", "type": "string"},
|
||||
"title": {
|
||||
"anyOf": [{"type": "string"}, {"type": "null"}],
|
||||
"default": None,
|
||||
"title": "Title",
|
||||
},
|
||||
"provider": {
|
||||
"title": "Provider",
|
||||
"type": "string",
|
||||
**(
|
||||
{"enum": providers}
|
||||
if len(providers) > 1
|
||||
else {"const": providers[0]}
|
||||
),
|
||||
},
|
||||
"type": {
|
||||
"title": "Type",
|
||||
"type": "string",
|
||||
**(
|
||||
{"enum": cred_types}
|
||||
if len(cred_types) > 1
|
||||
else {"const": cred_types[0]}
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["id", "provider", "type"],
|
||||
}
|
||||
|
||||
# Add other (optional) field info items
|
||||
field_schema.update(
|
||||
field_info.model_dump(
|
||||
by_alias=True,
|
||||
exclude_defaults=True,
|
||||
exclude={"provider", "supported_types"}, # already included above
|
||||
)
|
||||
)
|
||||
|
||||
# Ensure field schema is well-formed
|
||||
CredentialsMetaInput.validate_credentials_field_schema(
|
||||
field_schema, agg_field_key
|
||||
)
|
||||
|
||||
properties[agg_field_key] = field_schema
|
||||
if is_required:
|
||||
required_fields.append(agg_field_key)
|
||||
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required_fields,
|
||||
}
|
||||
|
||||
def aggregate_credentials_inputs(
|
||||
self,
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]:
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||
"""
|
||||
Returns:
|
||||
dict[aggregated_field_key, tuple(
|
||||
@@ -455,13 +551,19 @@ class Graph(BaseGraph):
|
||||
(now includes discriminator_values from matching nodes)
|
||||
set[(node_id, field_name)]: Node credentials fields that are
|
||||
compatible with this aggregated field spec
|
||||
bool: True if the field is required (any node has credentials_optional=False)
|
||||
)]
|
||||
"""
|
||||
# First collect all credential field data with input defaults
|
||||
node_credential_data = []
|
||||
# Track (field_info, (node_id, field_name), is_required) for each credential field
|
||||
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
|
||||
node_required_map: dict[str, bool] = {} # node_id -> is_required
|
||||
|
||||
for graph in [self] + self.sub_graphs:
|
||||
for node in graph.nodes:
|
||||
# Track if this node requires credentials (credentials_optional=False means required)
|
||||
node_required_map[node.id] = not node.credentials_optional
|
||||
|
||||
for (
|
||||
field_name,
|
||||
field_info,
|
||||
@@ -485,37 +587,43 @@ class Graph(BaseGraph):
|
||||
)
|
||||
|
||||
# Combine credential field info (this will merge discriminator_values automatically)
|
||||
return CredentialsFieldInfo.combine(*node_credential_data)
|
||||
combined = CredentialsFieldInfo.combine(*node_credential_data)
|
||||
|
||||
|
||||
class GraphModel(Graph):
|
||||
user_id: str
|
||||
nodes: list[NodeModel] = [] # type: ignore
|
||||
|
||||
created_at: datetime
|
||||
|
||||
@property
|
||||
def starting_nodes(self) -> list[NodeModel]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
|
||||
# Add is_required flag to each aggregated field
|
||||
# A field is required if ANY node using it has credentials_optional=False
|
||||
return {
|
||||
key: (
|
||||
field_info,
|
||||
node_field_pairs,
|
||||
any(
|
||||
node_required_map.get(node_id, True)
|
||||
for node_id, _ in node_field_pairs
|
||||
),
|
||||
)
|
||||
for key, (field_info, node_field_pairs) in combined.items()
|
||||
}
|
||||
return [
|
||||
node
|
||||
for node in self.nodes
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
@property
|
||||
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
||||
return cast(NodeModel, super().webhook_input_node)
|
||||
def regular_credentials_inputs(
|
||||
self,
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
|
||||
return {
|
||||
k: v
|
||||
for k, v in self.aggregate_credentials_inputs().items()
|
||||
if not v[0].is_auto_credential
|
||||
}
|
||||
|
||||
def meta(self) -> "GraphMeta":
|
||||
"""
|
||||
Returns a GraphMeta object with metadata about the graph.
|
||||
This is used to return metadata about the graph without exposing nodes and links.
|
||||
"""
|
||||
return GraphMeta.from_graph(self)
|
||||
@property
|
||||
def auto_credentials_inputs(
|
||||
self,
|
||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
|
||||
return {
|
||||
k: v
|
||||
for k, v in self.aggregate_credentials_inputs().items()
|
||||
if v[0].is_auto_credential
|
||||
}
|
||||
|
||||
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
||||
"""
|
||||
@@ -567,6 +675,16 @@ class GraphModel(Graph):
|
||||
) and graph_id in graph_id_map:
|
||||
node.input_default["graph_id"] = graph_id_map[graph_id]
|
||||
|
||||
# Clear auto-credentials references (e.g., _credentials_id in
|
||||
# GoogleDriveFile fields) so the new user must re-authenticate
|
||||
# with their own account
|
||||
for node in graph.nodes:
|
||||
if not node.input_default:
|
||||
continue
|
||||
for key, value in node.input_default.items():
|
||||
if isinstance(value, dict) and "_credentials_id" in value:
|
||||
del value["_credentials_id"]
|
||||
|
||||
def validate_graph(
|
||||
self,
|
||||
for_run: bool = False,
|
||||
@@ -799,13 +917,14 @@ class GraphModel(Graph):
|
||||
if is_static_output_block(link.source_id):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
@classmethod
|
||||
def from_db( # type: ignore[reportIncompatibleMethodOverride]
|
||||
cls,
|
||||
graph: AgentGraph,
|
||||
for_export: bool = False,
|
||||
sub_graphs: list[AgentGraph] | None = None,
|
||||
) -> "GraphModel":
|
||||
return GraphModel(
|
||||
) -> Self:
|
||||
return cls(
|
||||
id=graph.id,
|
||||
user_id=graph.userId if not for_export else "",
|
||||
version=graph.version,
|
||||
@@ -831,17 +950,28 @@ class GraphModel(Graph):
|
||||
],
|
||||
)
|
||||
|
||||
def hide_nodes(self) -> "GraphModelWithoutNodes":
|
||||
"""
|
||||
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
|
||||
(excluded from serialization). They are still present in the model instance
|
||||
so all computed fields (e.g. `credentials_input_schema`) still work.
|
||||
"""
|
||||
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
|
||||
|
||||
class GraphMeta(Graph):
|
||||
user_id: str
|
||||
|
||||
# Easy work-around to prevent exposing nodes and links in the API response
|
||||
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
|
||||
links: list[Link] = Field(default=[], exclude=True)
|
||||
class GraphModelWithoutNodes(GraphModel):
|
||||
"""
|
||||
GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
|
||||
|
||||
@staticmethod
|
||||
def from_graph(graph: GraphModel) -> "GraphMeta":
|
||||
return GraphMeta(**graph.model_dump())
|
||||
Used in contexts like the store where exposing internal graph structure
|
||||
is not desired. Inherits all computed fields from GraphModel but marks
|
||||
nodes and links as excluded from JSON output.
|
||||
"""
|
||||
|
||||
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
|
||||
links: list[Link] = Field(default_factory=list, exclude=True)
|
||||
|
||||
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
|
||||
|
||||
|
||||
class GraphsPaginated(BaseModel):
|
||||
@@ -912,21 +1042,11 @@ async def list_graphs_paginated(
|
||||
where=where_clause,
|
||||
distinct=["id"],
|
||||
order={"version": "desc"},
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
skip=offset,
|
||||
take=page_size,
|
||||
)
|
||||
|
||||
graph_models: list[GraphMeta] = []
|
||||
for graph in graphs:
|
||||
try:
|
||||
graph_meta = GraphModel.from_db(graph).meta()
|
||||
# Trigger serialization to validate that the graph is well formed
|
||||
graph_meta.model_dump()
|
||||
graph_models.append(graph_meta)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing graph {graph.id}: {e}")
|
||||
continue
|
||||
graph_models = [GraphMeta.from_db(graph) for graph in graphs]
|
||||
|
||||
return GraphsPaginated(
|
||||
graphs=graph_models,
|
||||
|
||||
@@ -463,3 +463,328 @@ def test_node_credentials_optional_with_other_metadata():
|
||||
assert node.credentials_optional is True
|
||||
assert node.metadata["position"] == {"x": 100, "y": 200}
|
||||
assert node.metadata["customized_name"] == "My Custom Node"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
|
||||
def test_combine_preserves_is_auto_credential_flag():
|
||||
"""
|
||||
CredentialsFieldInfo.combine() must propagate is_auto_credential and
|
||||
input_field_name to the combined result. Regression test for reviewer
|
||||
finding that combine() dropped these fields.
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
auto_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google"],
|
||||
"credentials_types": ["oauth2"],
|
||||
"credentials_scopes": ["drive.readonly"],
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": "spreadsheet",
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
# combine() takes *args of (field_info, key) tuples
|
||||
combined = CredentialsFieldInfo.combine(
|
||||
(auto_field, ("node-1", "credentials")),
|
||||
(auto_field, ("node-2", "credentials")),
|
||||
)
|
||||
|
||||
assert len(combined) == 1
|
||||
group_key = next(iter(combined))
|
||||
combined_info, combined_keys = combined[group_key]
|
||||
|
||||
assert combined_info.is_auto_credential is True
|
||||
assert combined_info.input_field_name == "spreadsheet"
|
||||
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
|
||||
|
||||
|
||||
def test_combine_preserves_regular_credential_defaults():
|
||||
"""Regular credentials should have is_auto_credential=False after combine()."""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
regular_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
combined = CredentialsFieldInfo.combine(
|
||||
(regular_field, ("node-1", "credentials")),
|
||||
)
|
||||
|
||||
group_key = next(iter(combined))
|
||||
combined_info, _ = combined[group_key]
|
||||
|
||||
assert combined_info.is_auto_credential is False
|
||||
assert combined_info.input_field_name is None
|
||||
|
||||
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_reassign_ids_clears_credentials_id():
|
||||
"""
|
||||
[SECRT-1772] _reassign_ids should clear _credentials_id from
|
||||
GoogleDriveFile-style input_default fields so forked agents
|
||||
don't retain the original creator's credential references.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "original-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
# _credentials_id key should be removed (not set to None) so that
|
||||
# _acquire_auto_credentials correctly errors instead of treating it as chained data
|
||||
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
||||
|
||||
|
||||
def test_reassign_ids_preserves_non_credential_fields():
|
||||
"""
|
||||
Regression guard: _reassign_ids should NOT modify non-credential fields
|
||||
like name, mimeType, id, url.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "cred-abc",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
field = graph.nodes[0].input_default["spreadsheet"]
|
||||
assert field["id"] == "file-123"
|
||||
assert field["name"] == "test.xlsx"
|
||||
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
|
||||
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
|
||||
|
||||
|
||||
def test_reassign_ids_handles_no_credentials():
|
||||
"""
|
||||
Regression guard: _reassign_ids should not error when input_default
|
||||
has no dict fields with _credentials_id.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"input": "some value",
|
||||
"another_input": 42,
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
# Should not error, fields unchanged
|
||||
assert graph.nodes[0].input_default["input"] == "some value"
|
||||
assert graph.nodes[0].input_default["another_input"] == 42
|
||||
|
||||
|
||||
def test_reassign_ids_handles_multiple_credential_fields():
|
||||
"""
|
||||
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
|
||||
ALL of them should be cleared.
|
||||
"""
|
||||
from backend.data.graph import GraphModel
|
||||
|
||||
node = Node(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "cred-1",
|
||||
"id": "file-1",
|
||||
"name": "file1.xlsx",
|
||||
},
|
||||
"doc_file": {
|
||||
"_credentials_id": "cred-2",
|
||||
"id": "file-2",
|
||||
"name": "file2.docx",
|
||||
},
|
||||
"plain_input": "not a dict",
|
||||
},
|
||||
)
|
||||
|
||||
graph = Graph(
|
||||
id="test-graph",
|
||||
name="Test",
|
||||
description="Test",
|
||||
nodes=[node],
|
||||
links=[],
|
||||
)
|
||||
|
||||
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||
|
||||
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
||||
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
|
||||
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for discriminate() field propagation
|
||||
def test_discriminate_preserves_is_auto_credential_flag():
|
||||
"""
|
||||
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
|
||||
input_field_name to the discriminated result. Regression test for
|
||||
discriminate() dropping these fields (same class of bug as combine()).
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
auto_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google", "openai"],
|
||||
"credentials_types": ["oauth2"],
|
||||
"credentials_scopes": ["drive.readonly"],
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": "spreadsheet",
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
discriminated = auto_field.discriminate("gemini")
|
||||
|
||||
assert discriminated.is_auto_credential is True
|
||||
assert discriminated.input_field_name == "spreadsheet"
|
||||
assert discriminated.provider == frozenset(["google"])
|
||||
|
||||
|
||||
def test_discriminate_preserves_regular_credential_defaults():
|
||||
"""Regular credentials should have is_auto_credential=False after discriminate()."""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
regular_field = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google", "openai"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
"discriminator": "model",
|
||||
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
discriminated = regular_field.discriminate("gpt-4")
|
||||
|
||||
assert discriminated.is_auto_credential is False
|
||||
assert discriminated.input_field_name is None
|
||||
assert discriminated.provider == frozenset(["openai"])
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for credentials_input_schema excluding auto_credentials
|
||||
def test_credentials_input_schema_excludes_auto_creds():
|
||||
"""
|
||||
GraphModel.credentials_input_schema should exclude auto_credentials
|
||||
(is_auto_credential=True) from the schema. Auto_credentials are
|
||||
transparently resolved at execution time via file picker data.
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import PropertyMock, patch
|
||||
|
||||
from backend.data.graph import GraphModel, NodeModel
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
regular_field_info = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
graph = GraphModel(
|
||||
id="test-graph",
|
||||
version=1,
|
||||
name="Test",
|
||||
description="Test",
|
||||
user_id="test-user",
|
||||
created_at=datetime.now(timezone.utc),
|
||||
nodes=[
|
||||
NodeModel(
|
||||
id="node-1",
|
||||
block_id=StoreValueBlock().id,
|
||||
input_default={},
|
||||
graph_id="test-graph",
|
||||
graph_version=1,
|
||||
),
|
||||
],
|
||||
links=[],
|
||||
)
|
||||
|
||||
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
|
||||
regular_only = {
|
||||
"github_credentials": (
|
||||
regular_field_info,
|
||||
{("node-1", "credentials")},
|
||||
True,
|
||||
),
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
type(graph),
|
||||
"regular_credentials_inputs",
|
||||
new_callable=PropertyMock,
|
||||
return_value=regular_only,
|
||||
):
|
||||
schema = graph.credentials_input_schema
|
||||
field_names = set(schema.get("properties", {}).keys())
|
||||
# Should include regular credential but NOT auto_credential
|
||||
assert "github_credentials" in field_names
|
||||
assert "google_credentials" not in field_names
|
||||
|
||||
@@ -19,7 +19,6 @@ from typing import (
|
||||
cast,
|
||||
get_args,
|
||||
)
|
||||
from urllib.parse import urlparse
|
||||
from uuid import uuid4
|
||||
|
||||
from prisma.enums import CreditTransactionType, OnboardingStep
|
||||
@@ -42,6 +41,7 @@ from typing_extensions import TypedDict
|
||||
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.json import loads as json_loads
|
||||
from backend.util.request import parse_url
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
# Type alias for any provider name (including custom ones)
|
||||
@@ -163,7 +163,6 @@ class User(BaseModel):
|
||||
if TYPE_CHECKING:
|
||||
from prisma.models import User as PrismaUser
|
||||
|
||||
from backend.data.block import BlockSchema
|
||||
|
||||
T = TypeVar("T")
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -397,19 +396,25 @@ class HostScopedCredentials(_BaseCredentials):
|
||||
def matches_url(self, url: str) -> bool:
|
||||
"""Check if this credential should be applied to the given URL."""
|
||||
|
||||
parsed_url = urlparse(url)
|
||||
# Extract hostname without port
|
||||
request_host = parsed_url.hostname
|
||||
request_host, request_port = _extract_host_from_url(url)
|
||||
cred_scope_host, cred_scope_port = _extract_host_from_url(self.host)
|
||||
if not request_host:
|
||||
return False
|
||||
|
||||
# Simple host matching - exact match or wildcard subdomain match
|
||||
if self.host == request_host:
|
||||
# If a port is specified in credential host, the request host port must match
|
||||
if cred_scope_port is not None and request_port != cred_scope_port:
|
||||
return False
|
||||
# Non-standard ports are only allowed if explicitly specified in credential host
|
||||
elif cred_scope_port is None and request_port not in (80, 443, None):
|
||||
return False
|
||||
|
||||
# Simple host matching
|
||||
if cred_scope_host == request_host:
|
||||
return True
|
||||
|
||||
# Support wildcard matching (e.g., "*.example.com" matches "api.example.com")
|
||||
if self.host.startswith("*."):
|
||||
domain = self.host[2:] # Remove "*."
|
||||
if cred_scope_host.startswith("*."):
|
||||
domain = cred_scope_host[2:] # Remove "*."
|
||||
return request_host.endswith(f".{domain}") or request_host == domain
|
||||
|
||||
return False
|
||||
@@ -502,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
|
||||
return get_args(cls.model_fields["type"].annotation)
|
||||
|
||||
@classmethod
|
||||
def validate_credentials_field_schema(cls, model: type["BlockSchema"]):
|
||||
@staticmethod
|
||||
def validate_credentials_field_schema(
|
||||
field_schema: dict[str, Any], field_name: str
|
||||
):
|
||||
"""Validates the schema of a credentials input field"""
|
||||
field_name = next(
|
||||
name for name, type in model.get_credentials_fields().items() if type is cls
|
||||
)
|
||||
field_schema = model.jsonschema()["properties"][field_name]
|
||||
try:
|
||||
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
|
||||
field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
|
||||
except ValidationError as e:
|
||||
if "Field required [type=missing" not in str(e):
|
||||
raise
|
||||
@@ -520,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
f"{field_schema}"
|
||||
) from e
|
||||
|
||||
providers = cls.allowed_providers()
|
||||
providers = field_info.provider
|
||||
if (
|
||||
providers is not None
|
||||
and len(providers) > 1
|
||||
and not schema_extra.discriminator
|
||||
and not field_info.discriminator
|
||||
):
|
||||
raise TypeError(
|
||||
f"Multi-provider CredentialsField '{field_name}' "
|
||||
@@ -551,13 +554,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
)
|
||||
|
||||
|
||||
def _extract_host_from_url(url: str) -> str:
|
||||
"""Extract host from URL for grouping host-scoped credentials."""
|
||||
def _extract_host_from_url(url: str) -> tuple[str, int | None]:
|
||||
"""Extract host and port from URL for grouping host-scoped credentials."""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
return parsed.hostname or url
|
||||
parsed = parse_url(url)
|
||||
return parsed.hostname or url, parsed.port
|
||||
except Exception:
|
||||
return ""
|
||||
return "", None
|
||||
|
||||
|
||||
class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
@@ -568,6 +571,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
discriminator: Optional[str] = None
|
||||
discriminator_mapping: Optional[dict[str, CP]] = None
|
||||
discriminator_values: set[Any] = Field(default_factory=set)
|
||||
is_auto_credential: bool = False
|
||||
input_field_name: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def combine(
|
||||
@@ -606,7 +611,7 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
providers = frozenset(
|
||||
[cast(CP, "http")]
|
||||
+ [
|
||||
cast(CP, _extract_host_from_url(str(value)))
|
||||
cast(CP, parse_url(str(value)).netloc)
|
||||
for value in field.discriminator_values
|
||||
]
|
||||
)
|
||||
@@ -648,6 +653,9 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
+ "_credentials"
|
||||
)
|
||||
|
||||
# Propagate is_auto_credential from the combined field.
|
||||
# All fields in a group should share the same is_auto_credential
|
||||
# value since auto and regular credentials serve different purposes.
|
||||
result[group_key] = (
|
||||
CredentialsFieldInfo[CP, CT](
|
||||
credentials_provider=combined.provider,
|
||||
@@ -656,6 +664,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
discriminator=combined.discriminator,
|
||||
discriminator_mapping=combined.discriminator_mapping,
|
||||
discriminator_values=set(all_discriminator_values),
|
||||
is_auto_credential=combined.is_auto_credential,
|
||||
input_field_name=combined.input_field_name,
|
||||
),
|
||||
combined_keys,
|
||||
)
|
||||
@@ -681,6 +691,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
||||
discriminator=self.discriminator,
|
||||
discriminator_mapping=self.discriminator_mapping,
|
||||
discriminator_values=self.discriminator_values,
|
||||
is_auto_credential=self.is_auto_credential,
|
||||
input_field_name=self.input_field_name,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -79,10 +79,23 @@ class TestHostScopedCredentials:
|
||||
headers={"Authorization": SecretStr("Bearer token")},
|
||||
)
|
||||
|
||||
assert creds.matches_url("http://localhost:8080/api/v1")
|
||||
# Non-standard ports require explicit port in credential host
|
||||
assert not creds.matches_url("http://localhost:8080/api/v1")
|
||||
assert creds.matches_url("https://localhost:443/secure/endpoint")
|
||||
assert creds.matches_url("http://localhost/simple")
|
||||
|
||||
def test_matches_url_with_explicit_port(self):
|
||||
"""Test URL matching with explicit port in credential host."""
|
||||
creds = HostScopedCredentials(
|
||||
provider="custom",
|
||||
host="localhost:8080",
|
||||
headers={"Authorization": SecretStr("Bearer token")},
|
||||
)
|
||||
|
||||
assert creds.matches_url("http://localhost:8080/api/v1")
|
||||
assert not creds.matches_url("http://localhost:3000/api/v1")
|
||||
assert not creds.matches_url("http://localhost/simple")
|
||||
|
||||
def test_empty_headers_dict(self):
|
||||
"""Test HostScopedCredentials with empty headers."""
|
||||
creds = HostScopedCredentials(
|
||||
@@ -128,8 +141,20 @@ class TestHostScopedCredentials:
|
||||
("*.example.com", "https://sub.api.example.com/test", True),
|
||||
("*.example.com", "https://example.com/test", True),
|
||||
("*.example.com", "https://example.org/test", False),
|
||||
("localhost", "http://localhost:3000/test", True),
|
||||
# Non-standard ports require explicit port in credential host
|
||||
("localhost", "http://localhost:3000/test", False),
|
||||
("localhost:3000", "http://localhost:3000/test", True),
|
||||
("localhost", "http://127.0.0.1:3000/test", False),
|
||||
# IPv6 addresses (frontend stores with brackets via URL.hostname)
|
||||
("[::1]", "http://[::1]/test", True),
|
||||
("[::1]", "http://[::1]:80/test", True),
|
||||
("[::1]", "https://[::1]:443/test", True),
|
||||
("[::1]", "http://[::1]:8080/test", False), # Non-standard port
|
||||
("[::1]:8080", "http://[::1]:8080/test", True),
|
||||
("[::1]:8080", "http://[::1]:9090/test", False),
|
||||
("[2001:db8::1]", "http://[2001:db8::1]/path", True),
|
||||
("[2001:db8::1]", "https://[2001:db8::1]:443/path", True),
|
||||
("[2001:db8::1]", "http://[2001:db8::ff]/path", False),
|
||||
],
|
||||
)
|
||||
def test_url_matching_parametrized(self, host: str, test_url: str, expected: bool):
|
||||
|
||||
@@ -172,6 +172,81 @@ def execute_graph(
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
async def _acquire_auto_credentials(
|
||||
input_model: type[BlockSchema],
|
||||
input_data: dict[str, Any],
|
||||
creds_manager: "IntegrationCredentialsManager",
|
||||
user_id: str,
|
||||
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
|
||||
"""
|
||||
Resolve auto_credentials from GoogleDriveFileField-style inputs.
|
||||
|
||||
Returns:
|
||||
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
|
||||
credential locks to release after execution completes.
|
||||
"""
|
||||
extra_exec_kwargs: dict[str, Any] = {}
|
||||
locks: list[AsyncRedisLock] = []
|
||||
|
||||
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
|
||||
# on a later field will strand locks acquired for earlier fields. They'll
|
||||
# auto-expire via Redis TTL, but add a try/except to release partial locks
|
||||
# if that becomes a real scenario.
|
||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||
field_name = info["field_name"]
|
||||
field_data = input_data.get(field_name)
|
||||
|
||||
if field_data and isinstance(field_data, dict):
|
||||
# Check if _credentials_id key exists in the field data
|
||||
if "_credentials_id" in field_data:
|
||||
cred_id = field_data["_credentials_id"]
|
||||
if cred_id:
|
||||
# Credential ID provided - acquire credentials
|
||||
provider = info.get("config", {}).get(
|
||||
"provider", "external service"
|
||||
)
|
||||
file_name = field_data.get("name", "selected file")
|
||||
try:
|
||||
credentials, lock = await creds_manager.acquire(
|
||||
user_id, cred_id
|
||||
)
|
||||
locks.append(lock)
|
||||
extra_exec_kwargs[kwarg_name] = credentials
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"{provider.capitalize()} credentials for "
|
||||
f"'{file_name}' in field '{field_name}' are not "
|
||||
f"available in your account. "
|
||||
f"This can happen if the agent was created by another "
|
||||
f"user or the credentials were deleted. "
|
||||
f"Please open the agent in the builder and re-select "
|
||||
f"the file to authenticate with your own account."
|
||||
)
|
||||
# else: _credentials_id is explicitly None, skip (chained data)
|
||||
else:
|
||||
# _credentials_id key missing entirely - this is an error
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
file_name = field_data.get("name", "selected file")
|
||||
raise ValueError(
|
||||
f"Authentication missing for '{file_name}' in field "
|
||||
f"'{field_name}'. Please re-select the file to authenticate "
|
||||
f"with {provider.capitalize()}."
|
||||
)
|
||||
elif field_data is None and field_name not in input_data:
|
||||
# Field not in input_data at all = connected from upstream block, skip
|
||||
pass
|
||||
else:
|
||||
# field_data is None/empty but key IS in input_data = user didn't select
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
raise ValueError(
|
||||
f"No file selected for '{field_name}'. "
|
||||
f"Please select a file to provide "
|
||||
f"{provider.capitalize()} authentication."
|
||||
)
|
||||
|
||||
return extra_exec_kwargs, locks
|
||||
|
||||
|
||||
async def execute_node(
|
||||
node: Node,
|
||||
data: NodeExecutionEntry,
|
||||
@@ -271,41 +346,14 @@ async def execute_node(
|
||||
extra_exec_kwargs[field_name] = credentials
|
||||
|
||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||
field_name = info["field_name"]
|
||||
field_data = input_data.get(field_name)
|
||||
if field_data and isinstance(field_data, dict):
|
||||
# Check if _credentials_id key exists in the field data
|
||||
if "_credentials_id" in field_data:
|
||||
cred_id = field_data["_credentials_id"]
|
||||
if cred_id:
|
||||
# Credential ID provided - acquire credentials
|
||||
provider = info.get("config", {}).get(
|
||||
"provider", "external service"
|
||||
)
|
||||
file_name = field_data.get("name", "selected file")
|
||||
try:
|
||||
credentials, lock = await creds_manager.acquire(
|
||||
user_id, cred_id
|
||||
)
|
||||
creds_locks.append(lock)
|
||||
extra_exec_kwargs[kwarg_name] = credentials
|
||||
except ValueError:
|
||||
# Credential was deleted or doesn't exist
|
||||
raise ValueError(
|
||||
f"Authentication expired for '{file_name}' in field '{field_name}'. "
|
||||
f"The saved {provider.capitalize()} credentials no longer exist. "
|
||||
f"Please re-select the file to re-authenticate."
|
||||
)
|
||||
# else: _credentials_id is explicitly None, skip credentials (for chained data)
|
||||
else:
|
||||
# _credentials_id key missing entirely - this is an error
|
||||
provider = info.get("config", {}).get("provider", "external service")
|
||||
file_name = field_data.get("name", "selected file")
|
||||
raise ValueError(
|
||||
f"Authentication missing for '{file_name}' in field '{field_name}'. "
|
||||
f"Please re-select the file to authenticate with {provider.capitalize()}."
|
||||
)
|
||||
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
|
||||
input_model=input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=creds_manager,
|
||||
user_id=user_id,
|
||||
)
|
||||
extra_exec_kwargs.update(auto_extra_kwargs)
|
||||
creds_locks.extend(auto_locks)
|
||||
|
||||
output_size = 0
|
||||
|
||||
|
||||
@@ -0,0 +1,320 @@
|
||||
"""
|
||||
Tests for auto_credentials handling in execute_node().
|
||||
|
||||
These test the _acquire_auto_credentials() helper function extracted from
|
||||
execute_node() (manager.py lines 273-308).
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def google_drive_file_data():
|
||||
return {
|
||||
"valid": {
|
||||
"_credentials_id": "cred-id-123",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
},
|
||||
"chained": {
|
||||
"_credentials_id": None,
|
||||
"id": "file-456",
|
||||
"name": "chained.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
},
|
||||
"missing_key": {
|
||||
"id": "file-789",
|
||||
"name": "bad.xlsx",
|
||||
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_input_model(mocker: MockerFixture):
|
||||
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
|
||||
input_model = mocker.MagicMock()
|
||||
input_model.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {
|
||||
"provider": "google",
|
||||
"type": "oauth2",
|
||||
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
|
||||
},
|
||||
}
|
||||
}
|
||||
return input_model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_creds_manager(mocker: MockerFixture):
|
||||
manager = mocker.AsyncMock()
|
||||
mock_lock = mocker.AsyncMock()
|
||||
mock_creds = mocker.MagicMock()
|
||||
mock_creds.id = "cred-id-123"
|
||||
mock_creds.provider = "google"
|
||||
manager.acquire.return_value = (mock_creds, mock_lock)
|
||||
return manager, mock_creds, mock_lock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_happy_path(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""When field_data has a valid _credentials_id, credentials should be acquired."""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, mock_creds, mock_lock = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
||||
assert extra_kwargs["credentials"] == mock_creds
|
||||
assert mock_lock in locks
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_field_none_static_raises(
|
||||
mocker: MockerFixture,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
[THE BUG FIX TEST — OPEN-2895]
|
||||
When field_data is None and the key IS in input_data (user didn't select a file),
|
||||
should raise ValueError instead of silently skipping.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
# Key is present but value is None = user didn't select a file
|
||||
input_data = {"spreadsheet": None}
|
||||
|
||||
with pytest.raises(ValueError, match="No file selected"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_field_absent_skips(
|
||||
mocker: MockerFixture,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
When the field key is NOT in input_data at all (upstream connection),
|
||||
should skip without error.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
# Key not present = connected from upstream block
|
||||
input_data = {}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
manager.acquire.assert_not_called()
|
||||
assert "credentials" not in extra_kwargs
|
||||
assert locks == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_chained_cred_id_none(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
When _credentials_id is explicitly None (chained data from upstream),
|
||||
should skip credential acquisition.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["chained"]}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
manager.acquire.assert_not_called()
|
||||
assert "credentials" not in extra_kwargs
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_missing_cred_id_key_raises(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
When _credentials_id key is missing entirely from field_data dict,
|
||||
should raise ValueError.
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
|
||||
|
||||
with pytest.raises(ValueError, match="Authentication missing"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_ownership_mismatch_error(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
|
||||
the error message should mention 'not available' (not 'expired').
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
manager.acquire.side_effect = ValueError(
|
||||
"Credentials #cred-id-123 for user #user-2 not found"
|
||||
)
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
with pytest.raises(ValueError, match="not available in your account"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-2",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_deleted_credential_error(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
|
||||
the error message should mention 'not available' (not 'expired').
|
||||
"""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, _ = mock_creds_manager
|
||||
manager.acquire.side_effect = ValueError(
|
||||
"Credentials #cred-id-123 for user #user-1 not found"
|
||||
)
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
with pytest.raises(ValueError, match="not available in your account"):
|
||||
await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_lock_appended(
|
||||
mocker: MockerFixture,
|
||||
google_drive_file_data,
|
||||
mock_input_model,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""Lock from acquire() should be included in returned locks list."""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, _, mock_lock = mock_creds_manager
|
||||
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=mock_input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
assert len(locks) == 1
|
||||
assert locks[0] is mock_lock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auto_credentials_multiple_fields(
|
||||
mocker: MockerFixture,
|
||||
mock_creds_manager,
|
||||
):
|
||||
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
|
||||
from backend.executor.manager import _acquire_auto_credentials
|
||||
|
||||
manager, mock_creds, mock_lock = mock_creds_manager
|
||||
|
||||
input_model = mocker.MagicMock()
|
||||
input_model.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
},
|
||||
"credentials2": {
|
||||
"field_name": "doc_file",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
},
|
||||
}
|
||||
|
||||
input_data = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "cred-id-123",
|
||||
"id": "file-1",
|
||||
"name": "file1.xlsx",
|
||||
},
|
||||
"doc_file": {
|
||||
"_credentials_id": None,
|
||||
"id": "file-2",
|
||||
"name": "chained.doc",
|
||||
},
|
||||
}
|
||||
|
||||
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||
input_model=input_model,
|
||||
input_data=input_data,
|
||||
creds_manager=manager,
|
||||
user_id="user-1",
|
||||
)
|
||||
|
||||
# Only the first field should have acquired credentials
|
||||
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
||||
assert "credentials" in extra_kwargs
|
||||
assert "credentials2" not in extra_kwargs
|
||||
assert len(locks) == 1
|
||||
@@ -259,7 +259,8 @@ async def _validate_node_input_credentials(
|
||||
|
||||
# Find any fields of type CredentialsMetaInput
|
||||
credentials_fields = block.input_schema.get_credentials_fields()
|
||||
if not credentials_fields:
|
||||
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
|
||||
if not credentials_fields and not auto_credentials_fields:
|
||||
continue
|
||||
|
||||
# Track if any credential field is missing for this node
|
||||
@@ -339,6 +340,47 @@ async def _validate_node_input_credentials(
|
||||
] = "Invalid credentials: type/provider mismatch"
|
||||
continue
|
||||
|
||||
# Validate auto-credentials (GoogleDriveFileField-based)
|
||||
# These have _credentials_id embedded in the file field data
|
||||
if auto_credentials_fields:
|
||||
for _kwarg_name, info in auto_credentials_fields.items():
|
||||
field_name = info["field_name"]
|
||||
# Check input_default and nodes_input_masks for the field value
|
||||
field_value = node.input_default.get(field_name)
|
||||
if nodes_input_masks and node.id in nodes_input_masks:
|
||||
field_value = nodes_input_masks[node.id].get(
|
||||
field_name, field_value
|
||||
)
|
||||
|
||||
if field_value and isinstance(field_value, dict):
|
||||
if "_credentials_id" not in field_value:
|
||||
# Key removed (e.g., on fork) — needs re-auth
|
||||
has_missing_credentials = True
|
||||
credential_errors[node.id][field_name] = (
|
||||
"Authentication missing for the selected file. "
|
||||
"Please re-select the file to authenticate with "
|
||||
"your own account."
|
||||
)
|
||||
continue
|
||||
cred_id = field_value.get("_credentials_id")
|
||||
if cred_id and isinstance(cred_id, str):
|
||||
try:
|
||||
creds_store = get_integration_credentials_store()
|
||||
creds = await creds_store.get_creds_by_id(user_id, cred_id)
|
||||
except Exception as e:
|
||||
has_missing_credentials = True
|
||||
credential_errors[node.id][
|
||||
field_name
|
||||
] = f"Credentials not available: {e}"
|
||||
continue
|
||||
if not creds:
|
||||
has_missing_credentials = True
|
||||
credential_errors[node.id][field_name] = (
|
||||
"The saved credentials are not available "
|
||||
"for your account. Please re-select the file to "
|
||||
"authenticate with your own account."
|
||||
)
|
||||
|
||||
# If node has optional credentials and any are missing, mark for skipping
|
||||
# But only if there are no other errors for this node
|
||||
if (
|
||||
@@ -370,10 +412,11 @@ def make_node_credentials_input_map(
|
||||
"""
|
||||
result: dict[str, dict[str, JsonValue]] = {}
|
||||
|
||||
# Get aggregated credentials fields for the graph
|
||||
graph_cred_inputs = graph.aggregate_credentials_inputs()
|
||||
# Only map regular credentials (not auto_credentials, which are resolved
|
||||
# at execution time from _credentials_id in file field data)
|
||||
graph_cred_inputs = graph.regular_credentials_inputs
|
||||
|
||||
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items():
|
||||
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
|
||||
# Best-effort map: skip missing items
|
||||
if graph_input_name not in graph_credentials_input:
|
||||
continue
|
||||
|
||||
@@ -907,3 +907,335 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
|
||||
|
||||
# Verify both parent and child status updates
|
||||
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for auto_credentials validation in _validate_node_input_credentials
|
||||
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_auto_creds_valid(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
|
||||
that exists in the store, validation should pass without errors.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-auto-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "valid-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
}
|
||||
}
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
# No regular credentials fields
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||
# Has auto_credentials fields
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Mock the credentials store to return valid credentials
|
||||
mock_store = mocker.MagicMock()
|
||||
mock_creds = mocker.MagicMock()
|
||||
mock_creds.id = "valid-cred-id"
|
||||
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
|
||||
mocker.patch(
|
||||
"backend.executor.utils.get_integration_credentials_store",
|
||||
return_value=mock_store,
|
||||
)
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
assert mock_node.id not in errors
|
||||
assert mock_node.id not in nodes_to_skip
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_auto_creds_missing(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] When a node has auto_credentials with a _credentials_id
|
||||
that doesn't exist for the current user, validation should report an error.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-bad-auto-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "other-users-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
}
|
||||
}
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Mock the credentials store to return None (cred not found for this user)
|
||||
mock_store = mocker.MagicMock()
|
||||
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
|
||||
mocker.patch(
|
||||
"backend.executor.utils.get_integration_credentials_store",
|
||||
return_value=mock_store,
|
||||
)
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="different-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
assert mock_node.id in errors
|
||||
assert "spreadsheet" in errors[mock_node.id]
|
||||
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_both_regular_and_auto(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
|
||||
should have both validated.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-both-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"credentials": {
|
||||
"id": "regular-cred-id",
|
||||
"provider": "github",
|
||||
"type": "api_key",
|
||||
},
|
||||
"spreadsheet": {
|
||||
"_credentials_id": "auto-cred-id",
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
},
|
||||
}
|
||||
|
||||
mock_credentials_field_type = mocker.MagicMock()
|
||||
mock_credentials_meta = mocker.MagicMock()
|
||||
mock_credentials_meta.id = "regular-cred-id"
|
||||
mock_credentials_meta.provider = "github"
|
||||
mock_credentials_meta.type = "api_key"
|
||||
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
# Regular credentials field
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {
|
||||
"credentials": mock_credentials_field_type,
|
||||
}
|
||||
# Auto-credentials field
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"auto_credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
# Mock the credentials store to return valid credentials for both
|
||||
mock_store = mocker.MagicMock()
|
||||
mock_regular_creds = mocker.MagicMock()
|
||||
mock_regular_creds.id = "regular-cred-id"
|
||||
mock_regular_creds.provider = "github"
|
||||
mock_regular_creds.type = "api_key"
|
||||
|
||||
mock_auto_creds = mocker.MagicMock()
|
||||
mock_auto_creds.id = "auto-cred-id"
|
||||
|
||||
def get_creds_side_effect(user_id, cred_id):
|
||||
if cred_id == "regular-cred-id":
|
||||
return mock_regular_creds
|
||||
elif cred_id == "auto-cred-id":
|
||||
return mock_auto_creds
|
||||
return None
|
||||
|
||||
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
|
||||
mocker.patch(
|
||||
"backend.executor.utils.get_integration_credentials_store",
|
||||
return_value=mock_store,
|
||||
)
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# Both should validate successfully - no errors
|
||||
assert mock_node.id not in errors
|
||||
assert mock_node.id not in nodes_to_skip
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
When a node has auto_credentials but the field value has _credentials_id=None
|
||||
(e.g., from upstream connection), validation should skip it without error.
|
||||
"""
|
||||
from backend.executor.utils import _validate_node_input_credentials
|
||||
|
||||
mock_node = mocker.MagicMock()
|
||||
mock_node.id = "node-with-chained-auto-creds"
|
||||
mock_node.credentials_optional = False
|
||||
mock_node.input_default = {
|
||||
"spreadsheet": {
|
||||
"_credentials_id": None,
|
||||
"id": "file-123",
|
||||
"name": "test.xlsx",
|
||||
}
|
||||
}
|
||||
|
||||
mock_block = mocker.MagicMock()
|
||||
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||
"credentials": {
|
||||
"field_name": "spreadsheet",
|
||||
"config": {"provider": "google", "type": "oauth2"},
|
||||
}
|
||||
}
|
||||
mock_node.block = mock_block
|
||||
|
||||
mock_graph = mocker.MagicMock()
|
||||
mock_graph.nodes = [mock_node]
|
||||
|
||||
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||
graph=mock_graph,
|
||||
user_id="test-user",
|
||||
nodes_input_masks=None,
|
||||
)
|
||||
|
||||
# No error - chained data with None cred_id is valid
|
||||
assert mock_node.id not in errors
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_credentials_field_info_auto_credential_tag():
|
||||
"""
|
||||
[Path 4] CredentialsFieldInfo should support is_auto_credential and
|
||||
input_field_name fields for distinguishing auto from regular credentials.
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo
|
||||
|
||||
# Regular credential should have is_auto_credential=False by default
|
||||
regular = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
assert regular.is_auto_credential is False
|
||||
assert regular.input_field_name is None
|
||||
|
||||
# Auto credential should have is_auto_credential=True
|
||||
auto = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["google"],
|
||||
"credentials_types": ["oauth2"],
|
||||
"is_auto_credential": True,
|
||||
"input_field_name": "spreadsheet",
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
assert auto.is_auto_credential is True
|
||||
assert auto.input_field_name == "spreadsheet"
|
||||
|
||||
|
||||
def test_make_node_credentials_input_map_excludes_auto_creds(
|
||||
mocker: MockerFixture,
|
||||
):
|
||||
"""
|
||||
[Path 4] make_node_credentials_input_map should only include regular credentials,
|
||||
not auto_credentials (which are resolved at execution time).
|
||||
"""
|
||||
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||
from backend.executor.utils import make_node_credentials_input_map
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
# Create a mock graph with aggregate_credentials_inputs that returns
|
||||
# both regular and auto credentials
|
||||
mock_graph = mocker.MagicMock()
|
||||
|
||||
regular_field_info = CredentialsFieldInfo.model_validate(
|
||||
{
|
||||
"credentials_provider": ["github"],
|
||||
"credentials_types": ["api_key"],
|
||||
"is_auto_credential": False,
|
||||
},
|
||||
by_alias=True,
|
||||
)
|
||||
|
||||
# Mock regular_credentials_inputs property (auto_credentials are excluded)
|
||||
mock_graph.regular_credentials_inputs = {
|
||||
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
|
||||
}
|
||||
|
||||
graph_credentials_input = {
|
||||
"github_creds": CredentialsMetaInput(
|
||||
id="cred-123",
|
||||
provider=ProviderName("github"),
|
||||
type="api_key",
|
||||
),
|
||||
}
|
||||
|
||||
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
|
||||
|
||||
# Regular credentials should be mapped
|
||||
assert "node-1" in result
|
||||
assert "credentials" in result["node-1"]
|
||||
|
||||
# Auto credentials should NOT appear in the result
|
||||
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
|
||||
for node_id, fields in result.items():
|
||||
for field_name, value in fields.items():
|
||||
# Verify no auto-credential phantom entries
|
||||
if isinstance(value, dict):
|
||||
assert "_credentials_id" not in value
|
||||
|
||||
@@ -224,6 +224,14 @@ openweathermap_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
elevenlabs_credentials = APIKeyCredentials(
|
||||
id="f4a8b6c2-3d1e-4f5a-9b8c-7d6e5f4a3b2c",
|
||||
provider="elevenlabs",
|
||||
api_key=SecretStr(settings.secrets.elevenlabs_api_key),
|
||||
title="Use Credits for ElevenLabs",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
@@ -252,6 +260,7 @@ DEFAULT_CREDENTIALS = [
|
||||
v0_credentials,
|
||||
webshare_proxy_credentials,
|
||||
openweathermap_credentials,
|
||||
elevenlabs_credentials,
|
||||
]
|
||||
|
||||
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
||||
@@ -366,6 +375,8 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(webshare_proxy_credentials)
|
||||
if settings.secrets.openweathermap_api_key:
|
||||
all_credentials.append(openweathermap_credentials)
|
||||
if settings.secrets.elevenlabs_api_key:
|
||||
all_credentials.append(elevenlabs_credentials)
|
||||
return all_credentials
|
||||
|
||||
async def get_creds_by_id(
|
||||
|
||||
@@ -18,6 +18,7 @@ class ProviderName(str, Enum):
|
||||
DISCORD = "discord"
|
||||
D_ID = "d_id"
|
||||
E2B = "e2b"
|
||||
ELEVENLABS = "elevenlabs"
|
||||
FAL = "fal"
|
||||
GITHUB = "github"
|
||||
GOOGLE = "google"
|
||||
|
||||
@@ -8,6 +8,8 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Config
|
||||
@@ -17,6 +19,35 @@ from backend.util.virus_scanner import scan_content_safe
|
||||
if TYPE_CHECKING:
|
||||
from backend.data.execution import ExecutionContext
|
||||
|
||||
|
||||
class WorkspaceUri(BaseModel):
|
||||
"""Parsed workspace:// URI."""
|
||||
|
||||
file_ref: str # File ID or path (e.g. "abc123" or "/path/to/file.txt")
|
||||
mime_type: str | None = None # MIME type from fragment (e.g. "video/mp4")
|
||||
is_path: bool = False # True if file_ref is a path (starts with "/")
|
||||
|
||||
|
||||
def parse_workspace_uri(uri: str) -> WorkspaceUri:
|
||||
"""Parse a workspace:// URI into its components.
|
||||
|
||||
Examples:
|
||||
"workspace://abc123" → WorkspaceUri(file_ref="abc123", mime_type=None, is_path=False)
|
||||
"workspace://abc123#video/mp4" → WorkspaceUri(file_ref="abc123", mime_type="video/mp4", is_path=False)
|
||||
"workspace:///path/to/file.txt" → WorkspaceUri(file_ref="/path/to/file.txt", mime_type=None, is_path=True)
|
||||
"""
|
||||
raw = uri.removeprefix("workspace://")
|
||||
mime_type: str | None = None
|
||||
if "#" in raw:
|
||||
raw, fragment = raw.split("#", 1)
|
||||
mime_type = fragment or None
|
||||
return WorkspaceUri(
|
||||
file_ref=raw,
|
||||
mime_type=mime_type,
|
||||
is_path=raw.startswith("/"),
|
||||
)
|
||||
|
||||
|
||||
# Return format options for store_media_file
|
||||
# - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc.
|
||||
# - "for_external_api": Returns data URI (base64) - use when sending content to external APIs
|
||||
@@ -183,22 +214,20 @@ async def store_media_file(
|
||||
"This file type is only available in CoPilot sessions."
|
||||
)
|
||||
|
||||
# Parse workspace reference
|
||||
# workspace://abc123 - by file ID
|
||||
# workspace:///path/to/file.txt - by virtual path
|
||||
file_ref = file[12:] # Remove "workspace://"
|
||||
# Parse workspace reference (strips #mimeType fragment from file ID)
|
||||
ws = parse_workspace_uri(file)
|
||||
|
||||
if file_ref.startswith("/"):
|
||||
# Path reference
|
||||
workspace_content = await workspace_manager.read_file(file_ref)
|
||||
file_info = await workspace_manager.get_file_info_by_path(file_ref)
|
||||
if ws.is_path:
|
||||
# Path reference: workspace:///path/to/file.txt
|
||||
workspace_content = await workspace_manager.read_file(ws.file_ref)
|
||||
file_info = await workspace_manager.get_file_info_by_path(ws.file_ref)
|
||||
filename = sanitize_filename(
|
||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||
)
|
||||
else:
|
||||
# ID reference
|
||||
workspace_content = await workspace_manager.read_file_by_id(file_ref)
|
||||
file_info = await workspace_manager.get_file_info(file_ref)
|
||||
# ID reference: workspace://abc123 or workspace://abc123#video/mp4
|
||||
workspace_content = await workspace_manager.read_file_by_id(ws.file_ref)
|
||||
file_info = await workspace_manager.get_file_info(ws.file_ref)
|
||||
filename = sanitize_filename(
|
||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||
)
|
||||
@@ -334,7 +363,21 @@ async def store_media_file(
|
||||
|
||||
# Don't re-save if input was already from workspace
|
||||
if is_from_workspace:
|
||||
# Return original workspace reference
|
||||
# Return original workspace reference, ensuring MIME type fragment
|
||||
ws = parse_workspace_uri(file)
|
||||
if not ws.mime_type:
|
||||
# Add MIME type fragment if missing (older refs without it)
|
||||
try:
|
||||
if ws.is_path:
|
||||
info = await workspace_manager.get_file_info_by_path(
|
||||
ws.file_ref
|
||||
)
|
||||
else:
|
||||
info = await workspace_manager.get_file_info(ws.file_ref)
|
||||
if info:
|
||||
return MediaFileType(f"{file}#{info.mimeType}")
|
||||
except Exception:
|
||||
pass
|
||||
return MediaFileType(file)
|
||||
|
||||
# Save new content to workspace
|
||||
@@ -346,7 +389,7 @@ async def store_media_file(
|
||||
filename=filename,
|
||||
overwrite=True,
|
||||
)
|
||||
return MediaFileType(f"workspace://{file_record.id}")
|
||||
return MediaFileType(f"workspace://{file_record.id}#{file_record.mimeType}")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid return_format: {return_format}")
|
||||
|
||||
@@ -157,12 +157,7 @@ async def validate_url(
|
||||
is_trusted: Boolean indicating if the hostname is in trusted_origins
|
||||
ip_addresses: List of IP addresses for the host; empty if the host is trusted
|
||||
"""
|
||||
# Canonicalize URL
|
||||
url = url.strip("/ ").replace("\\", "/")
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme:
|
||||
url = f"http://{url}"
|
||||
parsed = urlparse(url)
|
||||
parsed = parse_url(url)
|
||||
|
||||
# Check scheme
|
||||
if parsed.scheme not in ALLOWED_SCHEMES:
|
||||
@@ -220,6 +215,17 @@ async def validate_url(
|
||||
)
|
||||
|
||||
|
||||
def parse_url(url: str) -> URL:
|
||||
"""Canonicalizes and parses a URL string."""
|
||||
url = url.strip("/ ").replace("\\", "/")
|
||||
|
||||
# Ensure scheme is present for proper parsing
|
||||
if not re.match(r"[a-z0-9+.\-]+://", url):
|
||||
url = f"http://{url}"
|
||||
|
||||
return urlparse(url)
|
||||
|
||||
|
||||
def pin_url(url: URL, ip_addresses: Optional[list[str]] = None) -> URL:
|
||||
"""
|
||||
Pins a URL to a specific IP address to prevent DNS rebinding attacks.
|
||||
|
||||
@@ -656,6 +656,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
e2b_api_key: str = Field(default="", description="E2B API key")
|
||||
nvidia_api_key: str = Field(default="", description="Nvidia API key")
|
||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||
|
||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||
|
||||
@@ -22,6 +22,7 @@ from backend.data.workspace import (
|
||||
soft_delete_workspace_file,
|
||||
)
|
||||
from backend.util.settings import Config
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
from backend.util.workspace_storage import compute_file_checksum, get_workspace_storage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -187,6 +188,9 @@ class WorkspaceManager:
|
||||
f"{Config().max_file_size_mb}MB limit"
|
||||
)
|
||||
|
||||
# Virus scan content before persisting (defense in depth)
|
||||
await scan_content_safe(content, filename=filename)
|
||||
|
||||
# Determine path with session scoping
|
||||
if path is None:
|
||||
path = f"/{filename}"
|
||||
|
||||
47
autogpt_platform/backend/poetry.lock
generated
47
autogpt_platform/backend/poetry.lock
generated
@@ -1169,6 +1169,29 @@ attrs = ">=21.3.0"
|
||||
e2b = ">=1.5.4,<2.0.0"
|
||||
httpx = ">=0.20.0,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "elevenlabs"
|
||||
version = "1.59.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "elevenlabs-1.59.0-py3-none-any.whl", hash = "sha256:468145db81a0bc867708b4a8619699f75583e9481b395ec1339d0b443da771ed"},
|
||||
{file = "elevenlabs-1.59.0.tar.gz", hash = "sha256:16e735bd594e86d415dd445d249c8cc28b09996cfd627fbc10102c0a84698859"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.21.2"
|
||||
pydantic = ">=1.9.2"
|
||||
pydantic-core = ">=2.18.2,<3.0.0"
|
||||
requests = ">=2.20"
|
||||
typing_extensions = ">=4.0.0"
|
||||
websockets = ">=11.0"
|
||||
|
||||
[package.extras]
|
||||
pyaudio = ["pyaudio (>=0.2.14)"]
|
||||
|
||||
[[package]]
|
||||
name = "email-validator"
|
||||
version = "2.2.0"
|
||||
@@ -7361,6 +7384,28 @@ files = [
|
||||
defusedxml = ">=0.7.1,<0.8.0"
|
||||
requests = "*"
|
||||
|
||||
[[package]]
|
||||
name = "yt-dlp"
|
||||
version = "2025.12.8"
|
||||
description = "A feature-rich command-line audio/video downloader"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "yt_dlp-2025.12.8-py3-none-any.whl", hash = "sha256:36e2584342e409cfbfa0b5e61448a1c5189e345cf4564294456ee509e7d3e065"},
|
||||
{file = "yt_dlp-2025.12.8.tar.gz", hash = "sha256:b773c81bb6b71cb2c111cfb859f453c7a71cf2ef44eff234ff155877184c3e4f"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
build = ["build", "hatchling (>=1.27.0)", "pip", "setuptools (>=71.0.2)", "wheel"]
|
||||
curl-cffi = ["curl-cffi (>=0.5.10,<0.6.dev0 || >=0.10.dev0,<0.14) ; implementation_name == \"cpython\""]
|
||||
default = ["brotli ; implementation_name == \"cpython\"", "brotlicffi ; implementation_name != \"cpython\"", "certifi", "mutagen", "pycryptodomex", "requests (>=2.32.2,<3)", "urllib3 (>=2.0.2,<3)", "websockets (>=13.0)", "yt-dlp-ejs (==0.3.2)"]
|
||||
dev = ["autopep8 (>=2.0,<3.0)", "pre-commit", "pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||
pyinstaller = ["pyinstaller (>=6.17.0)"]
|
||||
secretstorage = ["cffi", "secretstorage"]
|
||||
static-analysis = ["autopep8 (>=2.0,<3.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||
test = ["pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "zerobouncesdk"
|
||||
version = "1.1.2"
|
||||
@@ -7512,4 +7557,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf"
|
||||
content-hash = "8239323f9ae6713224dffd1fe8ba8b449fe88b6c3c7a90940294a74f43a0387a"
|
||||
|
||||
@@ -20,6 +20,7 @@ click = "^8.2.0"
|
||||
cryptography = "^45.0"
|
||||
discord-py = "^2.5.2"
|
||||
e2b-code-interpreter = "^1.5.2"
|
||||
elevenlabs = "^1.50.0"
|
||||
fastapi = "^0.116.1"
|
||||
feedparser = "^6.0.11"
|
||||
flake8 = "^7.3.0"
|
||||
@@ -71,6 +72,7 @@ tweepy = "^4.16.0"
|
||||
uvicorn = { extras = ["standard"], version = "^0.35.0" }
|
||||
websockets = "^15.0"
|
||||
youtube-transcript-api = "^1.2.1"
|
||||
yt-dlp = "2025.12.08"
|
||||
zerobouncesdk = "^1.1.2"
|
||||
# NOTE: please insert new dependencies in their alphabetical location
|
||||
pytest-snapshot = "^0.9.0"
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
"description": "A test graph",
|
||||
|
||||
@@ -1,34 +1,14 @@
|
||||
[
|
||||
{
|
||||
"credentials_input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"title": "TestGraphCredentialsInputSchema",
|
||||
"type": "object"
|
||||
},
|
||||
"created_at": "2025-09-04T13:37:00",
|
||||
"description": "A test graph",
|
||||
"forked_from_id": null,
|
||||
"forked_from_version": null,
|
||||
"has_external_trigger": false,
|
||||
"has_human_in_the_loop": false,
|
||||
"has_sensitive_action": false,
|
||||
"id": "graph-123",
|
||||
"input_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"instructions": null,
|
||||
"is_active": true,
|
||||
"name": "Test Graph",
|
||||
"output_schema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"recommended_schedule_cron": null,
|
||||
"sub_graphs": [],
|
||||
"trigger_setup_info": null,
|
||||
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||
"version": 1
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { useState } from "react";
|
||||
import { getSchemaDefaultCredentials } from "../../helpers";
|
||||
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
|
||||
type Credentials = Record<string, Credential>;
|
||||
|
||||
type Props = {
|
||||
agent: GraphMeta | null;
|
||||
agent: GraphModel | null;
|
||||
siblingInputs?: Record<string, any>;
|
||||
onCredentialsChange: (
|
||||
credentials: Record<string, CredentialsMetaInput>,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
export function getCredentialFields(
|
||||
agent: GraphMeta | null,
|
||||
agent: GraphModel | null,
|
||||
): AgentCredentialsFields {
|
||||
if (!agent) return {};
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ import type {
|
||||
CredentialsMetaInput,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import type { InputValues } from "./types";
|
||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
|
||||
export function computeInitialAgentInputs(
|
||||
agent: GraphMeta | null,
|
||||
agent: GraphModel | null,
|
||||
existingInputs?: InputValues | null,
|
||||
): InputValues {
|
||||
const properties = agent?.input_schema?.properties || {};
|
||||
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
|
||||
}
|
||||
|
||||
type IsRunDisabledParams = {
|
||||
agent: GraphMeta | null;
|
||||
agent: GraphModel | null;
|
||||
isRunning: boolean;
|
||||
agentInputs: InputValues | null | undefined;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
import { OAuthPopupResultMessage } from "./types";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
/**
|
||||
* Safely encode a value as JSON for embedding in a script tag.
|
||||
* Escapes characters that could break out of the script context to prevent XSS.
|
||||
*/
|
||||
function safeJsonStringify(value: unknown): string {
|
||||
return JSON.stringify(value)
|
||||
.replace(/</g, "\\u003c")
|
||||
.replace(/>/g, "\\u003e")
|
||||
.replace(/&/g, "\\u0026");
|
||||
}
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
// controlled by the CredentialsInput component. The CredentialsInput opens the login
|
||||
// page in a pop-up window, which then redirects to this route to close the loop.
|
||||
@@ -23,12 +34,13 @@ export async function GET(request: Request) {
|
||||
console.debug("Sending message to opener:", message);
|
||||
|
||||
// Return a response with the message as JSON and a script to close the window
|
||||
// Use safeJsonStringify to prevent XSS by escaping <, >, and & characters
|
||||
return new NextResponse(
|
||||
`
|
||||
<html>
|
||||
<body>
|
||||
<script>
|
||||
window.opener.postMessage(${JSON.stringify(message)});
|
||||
window.opener.postMessage(${safeJsonStringify(message)});
|
||||
window.close();
|
||||
</script>
|
||||
</body>
|
||||
|
||||
@@ -30,6 +30,8 @@ import {
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import jaro from "jaro-winkler";
|
||||
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
|
||||
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
|
||||
uiKey?: string;
|
||||
@@ -107,6 +109,8 @@ export function BlocksControl({
|
||||
.filter((b) => b.uiType !== BlockUIType.AGENT)
|
||||
.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
// Agent blocks are created from GraphMeta which doesn't include schemas.
|
||||
// Schemas will be fetched on-demand when the block is actually added.
|
||||
const agentBlockList = flows
|
||||
.map((flow): _Block => {
|
||||
return {
|
||||
@@ -116,8 +120,9 @@ export function BlocksControl({
|
||||
`Ver.${flow.version}` +
|
||||
(flow.description ? ` | ${flow.description}` : ""),
|
||||
categories: [{ category: "AGENT", description: "" }],
|
||||
inputSchema: flow.input_schema,
|
||||
outputSchema: flow.output_schema,
|
||||
// Empty schemas - will be populated when block is added
|
||||
inputSchema: { type: "object", properties: {} },
|
||||
outputSchema: { type: "object", properties: {} },
|
||||
staticOutput: false,
|
||||
uiType: BlockUIType.AGENT,
|
||||
costs: [],
|
||||
@@ -125,8 +130,7 @@ export function BlocksControl({
|
||||
hardcodedValues: {
|
||||
graph_id: flow.id,
|
||||
graph_version: flow.version,
|
||||
input_schema: flow.input_schema,
|
||||
output_schema: flow.output_schema,
|
||||
// Schemas will be fetched on-demand when block is added
|
||||
},
|
||||
};
|
||||
})
|
||||
@@ -182,6 +186,37 @@ export function BlocksControl({
|
||||
setSelectedCategory(null);
|
||||
}, []);
|
||||
|
||||
// Handler to add a block, fetching graph data on-demand for agent blocks
|
||||
const handleAddBlock = useCallback(
|
||||
async (block: _Block & { notAvailable: string | null }) => {
|
||||
if (block.notAvailable) return;
|
||||
|
||||
// For agent blocks, fetch the full graph to get schemas
|
||||
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
|
||||
const graphID = block.hardcodedValues.graph_id as string;
|
||||
const graphVersion = block.hardcodedValues.graph_version as number;
|
||||
const graphData = okData(
|
||||
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||
);
|
||||
|
||||
if (graphData) {
|
||||
addBlock(block.id, block.name, {
|
||||
...block.hardcodedValues,
|
||||
input_schema: graphData.input_schema,
|
||||
output_schema: graphData.output_schema,
|
||||
});
|
||||
} else {
|
||||
// Fallback: add without schemas (will be incomplete)
|
||||
console.error("Failed to fetch graph data for agent block");
|
||||
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||
}
|
||||
} else {
|
||||
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||
}
|
||||
},
|
||||
[addBlock],
|
||||
);
|
||||
|
||||
// Extract unique categories from blocks
|
||||
const categories = useMemo(() => {
|
||||
return Array.from(
|
||||
@@ -303,10 +338,7 @@ export function BlocksControl({
|
||||
}),
|
||||
);
|
||||
}}
|
||||
onClick={() =>
|
||||
!block.notAvailable &&
|
||||
addBlock(block.id, block.name, block?.hardcodedValues || {})
|
||||
}
|
||||
onClick={() => handleAddBlock(block)}
|
||||
title={block.notAvailable ?? undefined}
|
||||
>
|
||||
<div
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { useState } from "react";
|
||||
import React, { useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import {
|
||||
@@ -11,6 +11,12 @@ import {
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "../../../../../components/__legacy__/ui/table";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
||||
|
||||
@@ -26,6 +32,9 @@ export default function DataTable({
|
||||
data,
|
||||
}: DataTableProps) {
|
||||
const { toast } = useToast();
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
execId: string;
|
||||
@@ -33,6 +42,15 @@ export default function DataTable({
|
||||
data: any[];
|
||||
} | null>(null);
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const copyData = (pin: string, data: string) => {
|
||||
navigator.clipboard.writeText(data).then(() => {
|
||||
toast({
|
||||
@@ -102,15 +120,31 @@ export default function DataTable({
|
||||
<Clipboard size={18} />
|
||||
</Button>
|
||||
</div>
|
||||
{value.map((item, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{value.map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < value.length - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
|
||||
@@ -29,13 +29,17 @@ import "@xyflow/react/dist/style.css";
|
||||
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
|
||||
import "./flow.css";
|
||||
import {
|
||||
BlockIORootSchema,
|
||||
BlockUIType,
|
||||
formatEdgeID,
|
||||
GraphExecutionID,
|
||||
GraphID,
|
||||
GraphMeta,
|
||||
LibraryAgent,
|
||||
SpecialBlockID,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
|
||||
@@ -687,8 +691,94 @@ const FlowEditor: React.FC<{
|
||||
[getNode, updateNode, nodes],
|
||||
);
|
||||
|
||||
/* Shared helper to create and add a node */
|
||||
const createAndAddNode = useCallback(
|
||||
async (
|
||||
blockID: string,
|
||||
blockName: string,
|
||||
hardcodedValues: Record<string, any>,
|
||||
position: { x: number; y: number },
|
||||
): Promise<CustomNode | null> => {
|
||||
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockID}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// For agent blocks, fetch the full graph to get schemas
|
||||
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
|
||||
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
|
||||
let finalHardcodedValues = hardcodedValues;
|
||||
|
||||
if (blockID === SpecialBlockID.AGENT) {
|
||||
const graphID = hardcodedValues.graph_id as string;
|
||||
const graphVersion = hardcodedValues.graph_version as number;
|
||||
const graphData = okData(
|
||||
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||
);
|
||||
|
||||
if (graphData) {
|
||||
inputSchema = graphData.input_schema as BlockIORootSchema;
|
||||
outputSchema = graphData.output_schema as BlockIORootSchema;
|
||||
finalHardcodedValues = {
|
||||
...hardcodedValues,
|
||||
input_schema: graphData.input_schema,
|
||||
output_schema: graphData.output_schema,
|
||||
};
|
||||
} else {
|
||||
console.error("Failed to fetch graph data for agent block");
|
||||
}
|
||||
}
|
||||
|
||||
const newNode: CustomNode = {
|
||||
id: nodeId.toString(),
|
||||
type: "custom",
|
||||
position,
|
||||
data: {
|
||||
blockType: blockName,
|
||||
blockCosts: nodeSchema.costs || [],
|
||||
title: `${blockName} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: inputSchema,
|
||||
outputSchema: outputSchema,
|
||||
hardcodedValues: finalHardcodedValues,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockID,
|
||||
isOutputStatic: nodeSchema.staticOutput,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
addNodes(newNode);
|
||||
setNodeId((prevId) => prevId + 1);
|
||||
clearNodesStatusAndOutput();
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: { ...newNode, ...newNode.data } },
|
||||
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
|
||||
redo: () => addNodes(newNode),
|
||||
});
|
||||
|
||||
return newNode;
|
||||
},
|
||||
[
|
||||
availableBlocks,
|
||||
nodeId,
|
||||
addNodes,
|
||||
deleteElements,
|
||||
clearNodesStatusAndOutput,
|
||||
],
|
||||
);
|
||||
|
||||
const addNode = useCallback(
|
||||
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
|
||||
async (
|
||||
blockId: string,
|
||||
nodeType: string,
|
||||
hardcodedValues: Record<string, any> = {},
|
||||
) => {
|
||||
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockId}`);
|
||||
@@ -707,73 +797,42 @@ const FlowEditor: React.FC<{
|
||||
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
|
||||
|
||||
const { x, y } = getViewport();
|
||||
const viewportCoordinates =
|
||||
const position =
|
||||
nodeDimensions && Object.keys(nodeDimensions).length > 0
|
||||
? // we will get all the dimension of nodes, then store
|
||||
findNewlyAddedBlockCoordinates(
|
||||
? findNewlyAddedBlockCoordinates(
|
||||
nodeDimensions,
|
||||
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
|
||||
60,
|
||||
1.0,
|
||||
)
|
||||
: // we will get all the dimension of nodes, then store
|
||||
{
|
||||
: {
|
||||
x: window.innerWidth / 2 - x,
|
||||
y: window.innerHeight / 2 - y,
|
||||
};
|
||||
|
||||
const newNode: CustomNode = {
|
||||
id: nodeId.toString(),
|
||||
type: "custom",
|
||||
position: viewportCoordinates, // Set the position to the calculated viewport center
|
||||
data: {
|
||||
blockType: nodeType,
|
||||
blockCosts: nodeSchema.costs,
|
||||
title: `${nodeType} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: nodeSchema.inputSchema,
|
||||
outputSchema: nodeSchema.outputSchema,
|
||||
hardcodedValues: hardcodedValues,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockId,
|
||||
isOutputStatic: nodeSchema.staticOutput,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
addNodes(newNode);
|
||||
setNodeId((prevId) => prevId + 1);
|
||||
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
|
||||
const newNode = await createAndAddNode(
|
||||
blockId,
|
||||
nodeType,
|
||||
hardcodedValues,
|
||||
position,
|
||||
);
|
||||
if (!newNode) return;
|
||||
|
||||
setViewport(
|
||||
{
|
||||
// Rough estimate of the dimension of the node is: 500x400px.
|
||||
// Though we skip shifting the X, considering the block menu side-bar.
|
||||
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
||||
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
|
||||
x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
||||
y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
|
||||
zoom: 0.8,
|
||||
},
|
||||
{ duration: 500 },
|
||||
);
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: { ...newNode, ...newNode.data } },
|
||||
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
|
||||
redo: () => addNodes(newNode),
|
||||
});
|
||||
},
|
||||
[
|
||||
nodeId,
|
||||
getViewport,
|
||||
setViewport,
|
||||
availableBlocks,
|
||||
addNodes,
|
||||
nodeDimensions,
|
||||
deleteElements,
|
||||
clearNodesStatusAndOutput,
|
||||
createAndAddNode,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -920,7 +979,7 @@ const FlowEditor: React.FC<{
|
||||
}, []);
|
||||
|
||||
const onDrop = useCallback(
|
||||
(event: React.DragEvent) => {
|
||||
async (event: React.DragEvent) => {
|
||||
event.preventDefault();
|
||||
|
||||
const blockData = event.dataTransfer.getData("application/reactflow");
|
||||
@@ -935,62 +994,17 @@ const FlowEditor: React.FC<{
|
||||
y: event.clientY,
|
||||
});
|
||||
|
||||
// Find the block schema
|
||||
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Create the new node at the drop position
|
||||
const newNode: CustomNode = {
|
||||
id: nodeId.toString(),
|
||||
type: "custom",
|
||||
await createAndAddNode(
|
||||
blockId,
|
||||
blockName,
|
||||
hardcodedValues || {},
|
||||
position,
|
||||
data: {
|
||||
blockType: blockName,
|
||||
blockCosts: nodeSchema.costs || [],
|
||||
title: `${blockName} ${nodeId}`,
|
||||
description: nodeSchema.description,
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: nodeSchema.inputSchema,
|
||||
outputSchema: nodeSchema.outputSchema,
|
||||
hardcodedValues: hardcodedValues,
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockId,
|
||||
uiType: nodeSchema.uiType,
|
||||
},
|
||||
};
|
||||
|
||||
history.push({
|
||||
type: "ADD_NODE",
|
||||
payload: { node: { ...newNode, ...newNode.data } },
|
||||
undo: () => {
|
||||
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
|
||||
},
|
||||
redo: () => {
|
||||
addNodes([newNode]);
|
||||
},
|
||||
});
|
||||
addNodes([newNode]);
|
||||
clearNodesStatusAndOutput();
|
||||
|
||||
setNodeId((prevId) => prevId + 1);
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Failed to drop block:", error);
|
||||
}
|
||||
},
|
||||
[
|
||||
nodeId,
|
||||
availableBlocks,
|
||||
nodes,
|
||||
edges,
|
||||
addNodes,
|
||||
screenToFlowPosition,
|
||||
deleteElements,
|
||||
clearNodesStatusAndOutput,
|
||||
],
|
||||
[screenToFlowPosition, createAndAddNode],
|
||||
);
|
||||
|
||||
const buildContextValue: BuilderContextType = useMemo(
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
import React, { useContext, useState } from "react";
|
||||
import React, { useContext, useMemo, useState } from "react";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Maximize2 } from "lucide-react";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import { ContentRenderer } from "@/components/__legacy__/ui/render";
|
||||
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||
import {
|
||||
globalRegistry,
|
||||
OutputItem,
|
||||
} from "@/components/contextual/OutputRenderers";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
|
||||
@@ -21,6 +27,9 @@ export default function NodeOutputs({
|
||||
data,
|
||||
}: NodeOutputsProps) {
|
||||
const builderContext = useContext(BuilderContext);
|
||||
const enableEnhancedOutputHandling = useGetFlag(
|
||||
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||
);
|
||||
|
||||
const [expandedDialog, setExpandedDialog] = useState<{
|
||||
isOpen: boolean;
|
||||
@@ -37,6 +46,15 @@ export default function NodeOutputs({
|
||||
|
||||
const { getNodeTitle } = builderContext;
|
||||
|
||||
// Prepare renderers for each item when enhanced mode is enabled
|
||||
const getItemRenderer = useMemo(() => {
|
||||
if (!enableEnhancedOutputHandling) return null;
|
||||
return (item: unknown) => {
|
||||
const metadata: OutputMetadata = {};
|
||||
return globalRegistry.getRenderer(item, metadata);
|
||||
};
|
||||
}, [enableEnhancedOutputHandling]);
|
||||
|
||||
const getBeautifiedPinName = (pin: string) => {
|
||||
if (!pin.startsWith("tools_^_")) {
|
||||
return beautifyString(pin);
|
||||
@@ -87,15 +105,31 @@ export default function NodeOutputs({
|
||||
<div className="mt-2">
|
||||
<strong className="mr-2">Data:</strong>
|
||||
<div className="mt-1">
|
||||
{dataArray.slice(0, 10).map((item, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
))}
|
||||
{dataArray.slice(0, 10).map((item, index) => {
|
||||
const renderer = getItemRenderer?.(item);
|
||||
if (enableEnhancedOutputHandling && renderer) {
|
||||
const metadata: OutputMetadata = {};
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<OutputItem
|
||||
value={item}
|
||||
metadata={metadata}
|
||||
renderer={renderer}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<React.Fragment key={index}>
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={truncateLongData}
|
||||
/>
|
||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
{dataArray.length > 10 && (
|
||||
<span style={{ color: "#888" }}>
|
||||
<br />
|
||||
|
||||
@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import type {
|
||||
CredentialsMetaInput,
|
||||
GraphMeta,
|
||||
Graph,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
|
||||
interface RunInputDialogProps {
|
||||
isOpen: boolean;
|
||||
doClose: () => void;
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
doRun?: (
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
|
||||
@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
|
||||
import {
|
||||
BlockUIType,
|
||||
CredentialsMetaInput,
|
||||
GraphMeta,
|
||||
Graph,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
|
||||
import { RunnerInputDialog } from "./RunnerInputUI";
|
||||
|
||||
interface RunnerUIWrapperProps {
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
nodes: Node<CustomNodeData>[];
|
||||
graphExecutionError?: string | null;
|
||||
saveAndRun: (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { GraphInputSchema } from "@/lib/autogpt-server-api";
|
||||
import { GraphMetaLike, IncompatibilityInfo } from "./types";
|
||||
import { GraphLike, IncompatibilityInfo } from "./types";
|
||||
|
||||
// Helper type for schema properties - the generated types are too loose
|
||||
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
|
||||
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
|
||||
*/
|
||||
export function createUpdatedAgentNodeInputs(
|
||||
currentInputs: Record<string, unknown>,
|
||||
latestSubGraphVersion: GraphMetaLike,
|
||||
latestSubGraphVersion: GraphLike,
|
||||
): Record<string, unknown> {
|
||||
return {
|
||||
...currentInputs,
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api";
|
||||
import type {
|
||||
Graph as LegacyGraph,
|
||||
GraphMeta as LegacyGraphMeta,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
|
||||
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||
|
||||
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
|
||||
export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
|
||||
hasUpdate: boolean;
|
||||
currentVersion: number;
|
||||
latestVersion: number;
|
||||
@@ -10,7 +14,10 @@ export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
|
||||
incompatibilities: IncompatibilityInfo | null;
|
||||
};
|
||||
|
||||
// Union type for GraphMeta that works with both legacy and new builder
|
||||
// Union type for Graph (with schemas) that works with both legacy and new builder
|
||||
export type GraphLike = LegacyGraph | GeneratedGraph;
|
||||
|
||||
// Union type for GraphMeta (without schemas) for version detection
|
||||
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
|
||||
|
||||
export type IncompatibilityInfo = {
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import { useMemo } from "react";
|
||||
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
|
||||
import type {
|
||||
GraphInputSchema,
|
||||
GraphOutputSchema,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { getEffectiveType } from "@/lib/utils";
|
||||
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
|
||||
import {
|
||||
@@ -11,26 +17,38 @@ import {
|
||||
/**
|
||||
* Checks if a newer version of a sub-agent is available and determines compatibility
|
||||
*/
|
||||
export function useSubAgentUpdate<T extends GraphMetaLike>(
|
||||
export function useSubAgentUpdate(
|
||||
nodeID: string,
|
||||
graphID: string | undefined,
|
||||
graphVersion: number | undefined,
|
||||
currentInputSchema: GraphInputSchema | undefined,
|
||||
currentOutputSchema: GraphOutputSchema | undefined,
|
||||
connections: EdgeLike[],
|
||||
availableGraphs: T[],
|
||||
): SubAgentUpdateInfo<T> {
|
||||
availableGraphs: GraphMetaLike[],
|
||||
): SubAgentUpdateInfo<GraphModel> {
|
||||
// Find the latest version of the same graph
|
||||
const latestGraph = useMemo(() => {
|
||||
const latestGraphInfo = useMemo(() => {
|
||||
if (!graphID) return null;
|
||||
return availableGraphs.find((graph) => graph.id === graphID) || null;
|
||||
}, [graphID, availableGraphs]);
|
||||
|
||||
// Check if there's an update available
|
||||
// Check if there's a newer version available
|
||||
const hasUpdate = useMemo(() => {
|
||||
if (!latestGraph || graphVersion === undefined) return false;
|
||||
return latestGraph.version! > graphVersion;
|
||||
}, [latestGraph, graphVersion]);
|
||||
if (!latestGraphInfo || graphVersion === undefined) return false;
|
||||
return latestGraphInfo.version! > graphVersion;
|
||||
}, [latestGraphInfo, graphVersion]);
|
||||
|
||||
// Fetch full graph IF an update is detected
|
||||
const { data: latestGraph } = useGetV1GetSpecificGraph(
|
||||
graphID ?? "",
|
||||
{ version: latestGraphInfo?.version },
|
||||
{
|
||||
query: {
|
||||
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
|
||||
select: okData,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Get connected input and output handles for this specific node
|
||||
const connectedHandles = useMemo(() => {
|
||||
@@ -152,8 +170,8 @@ export function useSubAgentUpdate<T extends GraphMetaLike>(
|
||||
return {
|
||||
hasUpdate,
|
||||
currentVersion: graphVersion || 0,
|
||||
latestVersion: latestGraph?.version || 0,
|
||||
latestGraph,
|
||||
latestVersion: latestGraphInfo?.version || 0,
|
||||
latestGraph: latestGraph || null,
|
||||
isCompatible: compatibilityResult.isCompatible,
|
||||
incompatibilities: compatibilityResult.incompatibilities,
|
||||
};
|
||||
|
||||
@@ -18,7 +18,7 @@ interface GraphStore {
|
||||
outputSchema: Record<string, any> | null,
|
||||
) => void;
|
||||
|
||||
// Available graphs; used for sub-graph updates
|
||||
// Available graphs; used for sub-graph updated version detection
|
||||
availableSubGraphs: GraphMeta[];
|
||||
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;
|
||||
|
||||
|
||||
@@ -26,8 +26,20 @@ export function buildCopilotChatUrl(prompt: string): string {
|
||||
|
||||
export function getQuickActions(): string[] {
|
||||
return [
|
||||
"Show me what I can automate",
|
||||
"Design a custom workflow",
|
||||
"Help me with content creation",
|
||||
"I don't know where to start, just ask me stuff",
|
||||
"I do the same thing every week and it's killing me",
|
||||
"Help me find where I'm wasting my time",
|
||||
];
|
||||
}
|
||||
|
||||
export function getInputPlaceholder(width?: number) {
|
||||
if (!width) return "What's your role and what eats up most of your day?";
|
||||
|
||||
if (width < 500) {
|
||||
return "I'm a chef and I hate...";
|
||||
}
|
||||
if (width <= 1080) {
|
||||
return "What's your role and what eats up most of your day?";
|
||||
}
|
||||
return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'";
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Chat } from "@/components/contextual/Chat/Chat";
|
||||
import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useCopilotStore } from "./copilot-page-store";
|
||||
import { getInputPlaceholder } from "./helpers";
|
||||
import { useCopilotPage } from "./useCopilotPage";
|
||||
|
||||
export default function CopilotPage() {
|
||||
@@ -14,8 +16,25 @@ export default function CopilotPage() {
|
||||
const isInterruptModalOpen = useCopilotStore((s) => s.isInterruptModalOpen);
|
||||
const confirmInterrupt = useCopilotStore((s) => s.confirmInterrupt);
|
||||
const cancelInterrupt = useCopilotStore((s) => s.cancelInterrupt);
|
||||
|
||||
const [inputPlaceholder, setInputPlaceholder] = useState(
|
||||
getInputPlaceholder(),
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const handleResize = () => {
|
||||
setInputPlaceholder(getInputPlaceholder(window.innerWidth));
|
||||
};
|
||||
|
||||
handleResize();
|
||||
|
||||
window.addEventListener("resize", handleResize);
|
||||
return () => window.removeEventListener("resize", handleResize);
|
||||
}, []);
|
||||
|
||||
const { greetingName, quickActions, isLoading, hasSession, initialPrompt } =
|
||||
state;
|
||||
|
||||
const {
|
||||
handleQuickAction,
|
||||
startChatWithPrompt,
|
||||
@@ -73,7 +92,7 @@ export default function CopilotPage() {
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-6 py-10">
|
||||
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-3 py-5 md:px-6 md:py-10">
|
||||
<div className="w-full text-center">
|
||||
{isLoading ? (
|
||||
<div className="mx-auto max-w-2xl">
|
||||
@@ -90,25 +109,25 @@ export default function CopilotPage() {
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className="mx-auto max-w-2xl">
|
||||
<div className="mx-auto max-w-3xl">
|
||||
<Text
|
||||
variant="h3"
|
||||
className="mb-3 !text-[1.375rem] text-zinc-700"
|
||||
className="mb-1 !text-[1.375rem] text-zinc-700"
|
||||
>
|
||||
Hey, <span className="text-violet-600">{greetingName}</span>
|
||||
</Text>
|
||||
<Text variant="h3" className="mb-8 !font-normal">
|
||||
What do you want to automate?
|
||||
Tell me about your work — I'll find what to automate.
|
||||
</Text>
|
||||
|
||||
<div className="mb-6">
|
||||
<ChatInput
|
||||
onSend={startChatWithPrompt}
|
||||
placeholder='You can search or just ask - e.g. "create a blog post outline"'
|
||||
placeholder={inputPlaceholder}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-nowrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
<div className="flex flex-wrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||
{quickActions.map((action) => (
|
||||
<Button
|
||||
key={action}
|
||||
@@ -116,7 +135,7 @@ export default function CopilotPage() {
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={() => handleQuickAction(action)}
|
||||
className="h-auto shrink-0 border-zinc-600 !px-4 !py-2 text-[1rem] text-zinc-600"
|
||||
className="h-auto shrink-0 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
|
||||
>
|
||||
{action}
|
||||
</Button>
|
||||
|
||||
@@ -10,8 +10,8 @@ import React, {
|
||||
import {
|
||||
CredentialsMetaInput,
|
||||
CredentialsType,
|
||||
Graph,
|
||||
GraphExecutionID,
|
||||
GraphMeta,
|
||||
LibraryAgentPreset,
|
||||
LibraryAgentPresetID,
|
||||
LibraryAgentPresetUpdatable,
|
||||
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
|
||||
className,
|
||||
recommendedScheduleCron,
|
||||
}: {
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
agentActions?: ButtonAction[];
|
||||
recommendedScheduleCron?: string | null;
|
||||
doRun?: (
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
import React, { useCallback, useMemo } from "react";
|
||||
|
||||
import {
|
||||
Graph,
|
||||
GraphExecutionID,
|
||||
GraphMeta,
|
||||
Schedule,
|
||||
ScheduleID,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
|
||||
onForcedRun,
|
||||
doDeleteSchedule,
|
||||
}: {
|
||||
graph: GraphMeta;
|
||||
graph: Graph;
|
||||
schedule: Schedule;
|
||||
agentActions: ButtonAction[];
|
||||
onForcedRun: (runID: GraphExecutionID) => void;
|
||||
|
||||
@@ -5629,7 +5629,9 @@
|
||||
"description": "Successful Response",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": { "$ref": "#/components/schemas/GraphMeta" }
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/GraphModelWithoutNodes"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -6495,18 +6497,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -6514,11 +6504,22 @@
|
||||
"forked_from_version": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "description"],
|
||||
"title": "BaseGraph"
|
||||
"title": "BaseGraph",
|
||||
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
|
||||
},
|
||||
"BaseGraph-Output": {
|
||||
"properties": {
|
||||
@@ -6539,18 +6540,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -6559,6 +6548,16 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
},
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
@@ -6605,7 +6604,8 @@
|
||||
"has_sensitive_action",
|
||||
"trigger_setup_info"
|
||||
],
|
||||
"title": "BaseGraph"
|
||||
"title": "BaseGraph",
|
||||
"description": "Graph with nodes, links, and computed I/O schema fields.\n\nUsed to represent sub-graphs within a `Graph`. Contains the full graph\nstructure including nodes and links, plus computed fields for schemas\nand trigger info. Does NOT include user_id or created_at (see GraphModel)."
|
||||
},
|
||||
"BlockCategoryResponse": {
|
||||
"properties": {
|
||||
@@ -7399,18 +7399,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -7419,16 +7407,26 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/Node" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
},
|
||||
"sub_graphs": {
|
||||
"items": { "$ref": "#/components/schemas/BaseGraph-Input" },
|
||||
"type": "array",
|
||||
"title": "Sub Graphs",
|
||||
"default": []
|
||||
"title": "Sub Graphs"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "description"],
|
||||
"title": "Graph"
|
||||
"title": "Graph",
|
||||
"description": "Creatable graph model used in API create/update endpoints."
|
||||
},
|
||||
"GraphExecution": {
|
||||
"properties": {
|
||||
@@ -7778,6 +7776,52 @@
|
||||
"description": "Response schema for paginated graph executions."
|
||||
},
|
||||
"GraphMeta": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"version": { "type": "integer", "title": "Version" },
|
||||
"is_active": {
|
||||
"type": "boolean",
|
||||
"title": "Is Active",
|
||||
"default": true
|
||||
},
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"instructions": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Instructions"
|
||||
},
|
||||
"recommended_schedule_cron": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
},
|
||||
"forked_from_version": {
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Created At"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"version",
|
||||
"name",
|
||||
"description",
|
||||
"user_id",
|
||||
"created_at"
|
||||
],
|
||||
"title": "GraphMeta",
|
||||
"description": "Lightweight graph metadata model representing an existing graph from the database,\nfor use in listings and summaries.\n\nLacks `GraphModel`'s nodes, links, and expensive computed fields.\nUse for list endpoints where full graph data is not needed and performance matters."
|
||||
},
|
||||
"GraphModel": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"version": { "type": "integer", "title": "Version", "default": 1 },
|
||||
@@ -7804,13 +7848,27 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Created At"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/NodeModel" },
|
||||
"type": "array",
|
||||
"title": "Nodes"
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links"
|
||||
},
|
||||
"sub_graphs": {
|
||||
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
|
||||
"type": "array",
|
||||
"title": "Sub Graphs",
|
||||
"default": []
|
||||
"title": "Sub Graphs"
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
@@ -7857,6 +7915,7 @@
|
||||
"name",
|
||||
"description",
|
||||
"user_id",
|
||||
"created_at",
|
||||
"input_schema",
|
||||
"output_schema",
|
||||
"has_external_trigger",
|
||||
@@ -7865,9 +7924,10 @@
|
||||
"trigger_setup_info",
|
||||
"credentials_input_schema"
|
||||
],
|
||||
"title": "GraphMeta"
|
||||
"title": "GraphModel",
|
||||
"description": "Full graph model representing an existing graph from the database.\n\nThis is the primary model for working with persisted graphs. Includes all\ngraph data (nodes, links, sub_graphs) plus user ownership and timestamps.\nProvides computed fields (input_schema, output_schema, etc.) used during\nset-up (frontend) and execution (backend).\n\nInherits from:\n- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas\n- `GraphMeta`: provides user_id, created_at for database records"
|
||||
},
|
||||
"GraphModel": {
|
||||
"GraphModelWithoutNodes": {
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"version": { "type": "integer", "title": "Version", "default": 1 },
|
||||
@@ -7886,18 +7946,6 @@
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Recommended Schedule Cron"
|
||||
},
|
||||
"nodes": {
|
||||
"items": { "$ref": "#/components/schemas/NodeModel" },
|
||||
"type": "array",
|
||||
"title": "Nodes",
|
||||
"default": []
|
||||
},
|
||||
"links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Links",
|
||||
"default": []
|
||||
},
|
||||
"forked_from_id": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Forked From Id"
|
||||
@@ -7906,12 +7954,6 @@
|
||||
"anyOf": [{ "type": "integer" }, { "type": "null" }],
|
||||
"title": "Forked From Version"
|
||||
},
|
||||
"sub_graphs": {
|
||||
"items": { "$ref": "#/components/schemas/BaseGraph-Output" },
|
||||
"type": "array",
|
||||
"title": "Sub Graphs",
|
||||
"default": []
|
||||
},
|
||||
"user_id": { "type": "string", "title": "User Id" },
|
||||
"created_at": {
|
||||
"type": "string",
|
||||
@@ -7973,7 +8015,8 @@
|
||||
"trigger_setup_info",
|
||||
"credentials_input_schema"
|
||||
],
|
||||
"title": "GraphModel"
|
||||
"title": "GraphModelWithoutNodes",
|
||||
"description": "GraphModel variant that excludes nodes, links, and sub-graphs from serialization.\n\nUsed in contexts like the store where exposing internal graph structure\nis not desired. Inherits all computed fields from GraphModel but marks\nnodes and links as excluded from JSON output."
|
||||
},
|
||||
"GraphSettings": {
|
||||
"properties": {
|
||||
@@ -8613,26 +8656,22 @@
|
||||
"input_default": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Default",
|
||||
"default": {}
|
||||
"title": "Input Default"
|
||||
},
|
||||
"metadata": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Metadata",
|
||||
"default": {}
|
||||
"title": "Metadata"
|
||||
},
|
||||
"input_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Input Links",
|
||||
"default": []
|
||||
"title": "Input Links"
|
||||
},
|
||||
"output_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Output Links",
|
||||
"default": []
|
||||
"title": "Output Links"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
@@ -8712,26 +8751,22 @@
|
||||
"input_default": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Default",
|
||||
"default": {}
|
||||
"title": "Input Default"
|
||||
},
|
||||
"metadata": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Metadata",
|
||||
"default": {}
|
||||
"title": "Metadata"
|
||||
},
|
||||
"input_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Input Links",
|
||||
"default": []
|
||||
"title": "Input Links"
|
||||
},
|
||||
"output_links": {
|
||||
"items": { "$ref": "#/components/schemas/Link" },
|
||||
"type": "array",
|
||||
"title": "Output Links",
|
||||
"default": []
|
||||
"title": "Output Links"
|
||||
},
|
||||
"graph_id": { "type": "string", "title": "Graph Id" },
|
||||
"graph_version": { "type": "integer", "title": "Graph Version" },
|
||||
|
||||
@@ -22,7 +22,7 @@ const isValidVideoUrl = (url: string): boolean => {
|
||||
if (url.startsWith("data:video")) {
|
||||
return true;
|
||||
}
|
||||
const videoExtensions = /\.(mp4|webm|ogg)$/i;
|
||||
const videoExtensions = /\.(mp4|webm|ogg|mov|avi|mkv|m4v)$/i;
|
||||
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$/;
|
||||
const cleanedUrl = url.split("?")[0];
|
||||
return (
|
||||
@@ -44,11 +44,29 @@ const isValidAudioUrl = (url: string): boolean => {
|
||||
if (url.startsWith("data:audio")) {
|
||||
return true;
|
||||
}
|
||||
const audioExtensions = /\.(mp3|wav)$/i;
|
||||
const audioExtensions = /\.(mp3|wav|ogg|m4a|aac|flac)$/i;
|
||||
const cleanedUrl = url.split("?")[0];
|
||||
return isValidMediaUri(url) && audioExtensions.test(cleanedUrl);
|
||||
};
|
||||
|
||||
const getVideoMimeType = (url: string): string => {
|
||||
if (url.startsWith("data:video/")) {
|
||||
const match = url.match(/^data:(video\/[^;]+)/);
|
||||
return match?.[1] || "video/mp4";
|
||||
}
|
||||
const extension = url.split("?")[0].split(".").pop()?.toLowerCase();
|
||||
const mimeMap: Record<string, string> = {
|
||||
mp4: "video/mp4",
|
||||
webm: "video/webm",
|
||||
ogg: "video/ogg",
|
||||
mov: "video/quicktime",
|
||||
avi: "video/x-msvideo",
|
||||
mkv: "video/x-matroska",
|
||||
m4v: "video/mp4",
|
||||
};
|
||||
return mimeMap[extension || ""] || "video/mp4";
|
||||
};
|
||||
|
||||
const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
const videoId = getYouTubeVideoId(videoUrl);
|
||||
return (
|
||||
@@ -63,7 +81,7 @@ const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||
></iframe>
|
||||
) : (
|
||||
<video controls width="100%" height="315">
|
||||
<source src={videoUrl} type="video/mp4" />
|
||||
<source src={videoUrl} type={getVideoMimeType(videoUrl)} />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
)}
|
||||
|
||||
@@ -2,7 +2,6 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessi
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { GlobeHemisphereEastIcon } from "@phosphor-icons/react";
|
||||
import { useEffect } from "react";
|
||||
@@ -56,10 +55,6 @@ export function ChatContainer({
|
||||
onStreamingChange?.(isStreaming);
|
||||
}, [isStreaming, onStreamingChange]);
|
||||
|
||||
const breakpoint = useBreakpoint();
|
||||
const isMobile =
|
||||
breakpoint === "base" || breakpoint === "sm" || breakpoint === "md";
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
@@ -127,11 +122,7 @@ export function ChatContainer({
|
||||
disabled={isStreaming || !sessionId}
|
||||
isStreaming={isStreaming}
|
||||
onStop={stopStreaming}
|
||||
placeholder={
|
||||
isMobile
|
||||
? "You can search or just ask"
|
||||
: 'You can search or just ask — e.g. "create a blog post outline"'
|
||||
}
|
||||
placeholder="What else can I help with?"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -74,19 +74,20 @@ export function ChatInput({
|
||||
hasMultipleLines ? "rounded-xlarge" : "rounded-full",
|
||||
)}
|
||||
>
|
||||
{!value && !isRecording && (
|
||||
<div
|
||||
className="pointer-events-none absolute inset-0 top-0.5 flex items-center justify-start pl-14 text-[1rem] text-zinc-400"
|
||||
aria-hidden="true"
|
||||
>
|
||||
{isTranscribing ? "Transcribing..." : placeholder}
|
||||
</div>
|
||||
)}
|
||||
<textarea
|
||||
id={inputId}
|
||||
aria-label="Chat message input"
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder={
|
||||
isTranscribing
|
||||
? "Transcribing..."
|
||||
: isRecording
|
||||
? ""
|
||||
: placeholder
|
||||
}
|
||||
disabled={isInputDisabled}
|
||||
rows={1}
|
||||
className={cn(
|
||||
@@ -122,13 +123,14 @@ export function ChatInput({
|
||||
size="icon"
|
||||
aria-label={isRecording ? "Stop recording" : "Start recording"}
|
||||
onClick={toggleRecording}
|
||||
disabled={disabled || isTranscribing}
|
||||
disabled={disabled || isTranscribing || isStreaming}
|
||||
className={cn(
|
||||
isRecording
|
||||
? "animate-pulse border-red-500 bg-red-500 text-white hover:border-red-600 hover:bg-red-600"
|
||||
: isTranscribing
|
||||
? "border-zinc-300 bg-zinc-100 text-zinc-400"
|
||||
: "border-zinc-300 bg-white text-zinc-500 hover:border-zinc-400 hover:bg-zinc-50 hover:text-zinc-700",
|
||||
isStreaming && "opacity-40",
|
||||
)}
|
||||
>
|
||||
{isTranscribing ? (
|
||||
|
||||
@@ -38,8 +38,8 @@ export function AudioWaveform({
|
||||
// Create audio context and analyser
|
||||
const audioContext = new AudioContext();
|
||||
const analyser = audioContext.createAnalyser();
|
||||
analyser.fftSize = 512;
|
||||
analyser.smoothingTimeConstant = 0.8;
|
||||
analyser.fftSize = 256;
|
||||
analyser.smoothingTimeConstant = 0.3;
|
||||
|
||||
// Connect the stream to the analyser
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
@@ -73,10 +73,11 @@ export function AudioWaveform({
|
||||
maxAmplitude = Math.max(maxAmplitude, amplitude);
|
||||
}
|
||||
|
||||
// Map amplitude (0-128) to bar height
|
||||
const normalized = (maxAmplitude / 128) * 255;
|
||||
const height =
|
||||
minBarHeight + (normalized / 255) * (maxBarHeight - minBarHeight);
|
||||
// Normalize amplitude (0-128 range) to 0-1
|
||||
const normalized = maxAmplitude / 128;
|
||||
// Apply sensitivity boost (multiply by 4) and use sqrt curve to amplify quiet sounds
|
||||
const boosted = Math.min(1, Math.sqrt(normalized) * 4);
|
||||
const height = minBarHeight + boosted * (maxBarHeight - minBarHeight);
|
||||
newBars.push(height);
|
||||
}
|
||||
|
||||
|
||||
@@ -224,7 +224,7 @@ export function useVoiceRecording({
|
||||
[value, isTranscribing, toggleRecording, baseHandleKeyDown],
|
||||
);
|
||||
|
||||
const showMicButton = isSupported && !isStreaming;
|
||||
const showMicButton = isSupported;
|
||||
const isInputDisabled = disabled || isStreaming || isTranscribing;
|
||||
|
||||
// Cleanup on unmount
|
||||
|
||||
@@ -102,18 +102,6 @@ export function ChatMessage({
|
||||
}
|
||||
}
|
||||
|
||||
function handleClarificationAnswers(answers: Record<string, string>) {
|
||||
if (onSendMessage) {
|
||||
const contextMessage = Object.entries(answers)
|
||||
.map(([keyword, answer]) => `${keyword}: ${answer}`)
|
||||
.join("\n");
|
||||
|
||||
onSendMessage(
|
||||
`I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const handleCopy = useCallback(
|
||||
async function handleCopy() {
|
||||
if (message.type !== "message") return;
|
||||
@@ -162,6 +150,22 @@ export function ChatMessage({
|
||||
.slice(index + 1)
|
||||
.some((m) => m.type === "message" && m.role === "user");
|
||||
|
||||
const handleClarificationAnswers = (answers: Record<string, string>) => {
|
||||
if (onSendMessage) {
|
||||
// Iterate over questions (preserves original order) instead of answers
|
||||
const contextMessage = message.questions
|
||||
.map((q) => {
|
||||
const answer = answers[q.keyword] || "";
|
||||
return `> ${q.question}\n\n${answer}`;
|
||||
})
|
||||
.join("\n\n");
|
||||
|
||||
onSendMessage(
|
||||
`**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<ClarificationQuestionsWidget
|
||||
questions={message.questions}
|
||||
@@ -346,6 +350,7 @@ export function ChatMessage({
|
||||
toolId={message.toolId}
|
||||
toolName={message.toolName}
|
||||
result={message.result}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { EyeSlash } from "@phosphor-icons/react";
|
||||
import React from "react";
|
||||
import React, { useState } from "react";
|
||||
import ReactMarkdown from "react-markdown";
|
||||
import remarkGfm from "remark-gfm";
|
||||
|
||||
@@ -48,7 +48,9 @@ interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
||||
*/
|
||||
function resolveWorkspaceUrl(src: string): string {
|
||||
if (src.startsWith("workspace://")) {
|
||||
const fileId = src.replace("workspace://", "");
|
||||
// Strip MIME type fragment if present (e.g., workspace://abc123#video/mp4 → abc123)
|
||||
const withoutPrefix = src.replace("workspace://", "");
|
||||
const fileId = withoutPrefix.split("#")[0];
|
||||
// Use the generated API URL helper to get the correct path
|
||||
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
|
||||
// Route through the Next.js proxy (same pattern as customMutator for client-side)
|
||||
@@ -65,13 +67,49 @@ function isWorkspaceImage(src: string | undefined): boolean {
|
||||
return src?.includes("/workspace/files/") ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders a workspace video with controls and an optional "AI cannot see" badge.
|
||||
*/
|
||||
function WorkspaceVideo({
|
||||
src,
|
||||
aiCannotSee,
|
||||
}: {
|
||||
src: string;
|
||||
aiCannotSee: boolean;
|
||||
}) {
|
||||
return (
|
||||
<span className="relative my-2 inline-block">
|
||||
<video
|
||||
controls
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
preload="metadata"
|
||||
>
|
||||
<source src={src} />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
{aiCannotSee && (
|
||||
<span
|
||||
className="absolute bottom-2 right-2 flex items-center gap-1 rounded bg-black/70 px-2 py-1 text-xs text-white"
|
||||
title="The AI cannot see this video"
|
||||
>
|
||||
<EyeSlash size={14} />
|
||||
<span>AI cannot see this video</span>
|
||||
</span>
|
||||
)}
|
||||
</span>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom image component that shows an indicator when the AI cannot see the image.
|
||||
* Also handles the "video:" alt-text prefix convention to render <video> elements.
|
||||
* For workspace files with unknown types, falls back to <video> if <img> fails.
|
||||
* Note: src is already transformed by urlTransform, so workspace:// is now /api/workspace/...
|
||||
*/
|
||||
function MarkdownImage(props: Record<string, unknown>) {
|
||||
const src = props.src as string | undefined;
|
||||
const alt = props.alt as string | undefined;
|
||||
const [imgFailed, setImgFailed] = useState(false);
|
||||
|
||||
const aiCannotSee = isWorkspaceImage(src);
|
||||
|
||||
@@ -84,6 +122,18 @@ function MarkdownImage(props: Record<string, unknown>) {
|
||||
);
|
||||
}
|
||||
|
||||
// Detect video: prefix in alt text (set by formatOutputValue in helpers.ts)
|
||||
if (alt?.startsWith("video:")) {
|
||||
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||
}
|
||||
|
||||
// If the <img> failed to load and this is a workspace file, try as video.
|
||||
// This handles generic output keys like "file_out" where the MIME type
|
||||
// isn't known from the key name alone.
|
||||
if (imgFailed && aiCannotSee) {
|
||||
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<span className="relative my-2 inline-block">
|
||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||
@@ -92,6 +142,9 @@ function MarkdownImage(props: Record<string, unknown>) {
|
||||
alt={alt || "Image"}
|
||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||
loading="lazy"
|
||||
onError={() => {
|
||||
if (aiCannotSee) setImgFailed(true);
|
||||
}}
|
||||
/>
|
||||
{aiCannotSee && (
|
||||
<span
|
||||
|
||||
@@ -73,6 +73,7 @@ export function MessageList({
|
||||
key={index}
|
||||
message={message}
|
||||
prevMessage={messages[index - 1]}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -5,11 +5,13 @@ import { shouldSkipAgentOutput } from "../../helpers";
|
||||
export interface LastToolResponseProps {
|
||||
message: ChatMessageData;
|
||||
prevMessage: ChatMessageData | undefined;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function LastToolResponse({
|
||||
message,
|
||||
prevMessage,
|
||||
onSendMessage,
|
||||
}: LastToolResponseProps) {
|
||||
if (message.type !== "tool_response") return null;
|
||||
|
||||
@@ -21,6 +23,7 @@ export function LastToolResponse({
|
||||
toolId={message.toolId}
|
||||
toolName={message.toolName}
|
||||
result={message.result}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { Progress } from "@/components/atoms/Progress/Progress";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress";
|
||||
|
||||
export interface ThinkingMessageProps {
|
||||
className?: string;
|
||||
@@ -11,6 +13,7 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
||||
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
|
||||
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const progress = useAsymptoticProgress(showCoffeeMessage);
|
||||
|
||||
useEffect(() => {
|
||||
if (timerRef.current === null) {
|
||||
@@ -49,9 +52,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
||||
<AIChatBubble>
|
||||
<div className="transition-all duration-500 ease-in-out">
|
||||
{showCoffeeMessage ? (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
This could take a few minutes, grab a coffee ☕️
|
||||
</span>
|
||||
<div className="flex flex-col items-center gap-3">
|
||||
<div className="flex w-full max-w-[280px] flex-col gap-1.5">
|
||||
<div className="flex items-center justify-between text-xs text-neutral-500">
|
||||
<span>Working on it...</span>
|
||||
<span>{Math.round(progress)}%</span>
|
||||
</div>
|
||||
<Progress value={progress} className="h-2 w-full" />
|
||||
</div>
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
This could take a few minutes, grab a coffee ☕️
|
||||
</span>
|
||||
</div>
|
||||
) : showSlowLoader ? (
|
||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||
Taking a bit more time...
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
|
||||
/**
|
||||
* Hook that returns a progress value that starts fast and slows down,
|
||||
* asymptotically approaching but never reaching the max value.
|
||||
*
|
||||
* Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife))
|
||||
* This creates the "game loading bar" effect where:
|
||||
* - 50% is reached at halfLifeSeconds
|
||||
* - 75% is reached at 2 * halfLifeSeconds
|
||||
* - 87.5% is reached at 3 * halfLifeSeconds
|
||||
* - and so on...
|
||||
*
|
||||
* @param isActive - Whether the progress should be animating
|
||||
* @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30)
|
||||
* @param maxProgress - Maximum progress value to approach (default: 100)
|
||||
* @param intervalMs - Update interval in milliseconds (default: 100)
|
||||
* @returns Current progress value (0-maxProgress)
|
||||
*/
|
||||
export function useAsymptoticProgress(
|
||||
isActive: boolean,
|
||||
halfLifeSeconds = 30,
|
||||
maxProgress = 100,
|
||||
intervalMs = 100,
|
||||
) {
|
||||
const [progress, setProgress] = useState(0);
|
||||
const elapsedTimeRef = useRef(0);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isActive) {
|
||||
setProgress(0);
|
||||
elapsedTimeRef.current = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
elapsedTimeRef.current += intervalMs / 1000;
|
||||
// Half-life approach: progress = max * (1 - 0.5^(time/halfLife))
|
||||
// At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc.
|
||||
const newProgress =
|
||||
maxProgress *
|
||||
(1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds));
|
||||
setProgress(newProgress);
|
||||
}, intervalMs);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [isActive, halfLifeSeconds, maxProgress, intervalMs]);
|
||||
|
||||
return progress;
|
||||
}
|
||||
@@ -0,0 +1,128 @@
|
||||
"use client";
|
||||
|
||||
import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||
import { RunAgentModal } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
CheckCircleIcon,
|
||||
PencilLineIcon,
|
||||
PlayIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
|
||||
interface Props {
|
||||
agentName: string;
|
||||
libraryAgentId: string;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function AgentCreatedPrompt({
|
||||
agentName,
|
||||
libraryAgentId,
|
||||
onSendMessage,
|
||||
}: Props) {
|
||||
// Fetch library agent eagerly so modal is ready when user clicks
|
||||
const { data: libraryAgentResponse, isLoading } = useGetV2GetLibraryAgent(
|
||||
libraryAgentId,
|
||||
{
|
||||
query: {
|
||||
enabled: !!libraryAgentId,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const libraryAgent =
|
||||
libraryAgentResponse?.status === 200 ? libraryAgentResponse.data : null;
|
||||
|
||||
function handleRunWithPlaceholders() {
|
||||
onSendMessage?.(
|
||||
`Run the agent "${agentName}" with placeholder/example values so I can test it.`,
|
||||
);
|
||||
}
|
||||
|
||||
function handleRunCreated(execution: GraphExecutionMeta) {
|
||||
onSendMessage?.(
|
||||
`I've started the agent "${agentName}". The execution ID is ${execution.id}. Please monitor its progress and let me know when it completes.`,
|
||||
);
|
||||
}
|
||||
|
||||
function handleScheduleCreated(schedule: GraphExecutionJobInfo) {
|
||||
const scheduleInfo = schedule.cron
|
||||
? `with cron schedule "${schedule.cron}"`
|
||||
: "to run on the specified schedule";
|
||||
onSendMessage?.(
|
||||
`I've scheduled the agent "${agentName}" ${scheduleInfo}. The schedule ID is ${schedule.id}.`,
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<AIChatBubble>
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-green-100">
|
||||
<CheckCircleIcon
|
||||
size={18}
|
||||
weight="fill"
|
||||
className="text-green-600"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<Text variant="body-medium" className="text-neutral-900">
|
||||
Agent Created Successfully
|
||||
</Text>
|
||||
<Text variant="small" className="text-neutral-500">
|
||||
"{agentName}" is ready to test
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-2">
|
||||
<Text variant="small-medium" className="text-neutral-700">
|
||||
Ready to test?
|
||||
</Text>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
onClick={handleRunWithPlaceholders}
|
||||
className="gap-2"
|
||||
>
|
||||
<PlayIcon size={16} />
|
||||
Run with example values
|
||||
</Button>
|
||||
{libraryAgent ? (
|
||||
<RunAgentModal
|
||||
triggerSlot={
|
||||
<Button variant="outline" size="small" className="gap-2">
|
||||
<PencilLineIcon size={16} />
|
||||
Run with my inputs
|
||||
</Button>
|
||||
}
|
||||
agent={libraryAgent}
|
||||
onRunCreated={handleRunCreated}
|
||||
onScheduleCreated={handleScheduleCreated}
|
||||
/>
|
||||
) : (
|
||||
<Button
|
||||
variant="outline"
|
||||
size="small"
|
||||
loading={isLoading}
|
||||
disabled
|
||||
className="gap-2"
|
||||
>
|
||||
<PencilLineIcon size={16} />
|
||||
Run with my inputs
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
<Text variant="small" className="text-neutral-500">
|
||||
or just ask me
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
</AIChatBubble>
|
||||
);
|
||||
}
|
||||
@@ -2,11 +2,13 @@ import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
import type { ToolResult } from "@/types/chat";
|
||||
import { WarningCircleIcon } from "@phosphor-icons/react";
|
||||
import { AgentCreatedPrompt } from "./AgentCreatedPrompt";
|
||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
|
||||
import {
|
||||
formatToolResponse,
|
||||
getErrorMessage,
|
||||
isAgentSavedResponse,
|
||||
isErrorResponse,
|
||||
} from "./helpers";
|
||||
|
||||
@@ -16,6 +18,7 @@ export interface ToolResponseMessageProps {
|
||||
result?: ToolResult;
|
||||
success?: boolean;
|
||||
className?: string;
|
||||
onSendMessage?: (content: string) => void;
|
||||
}
|
||||
|
||||
export function ToolResponseMessage({
|
||||
@@ -24,6 +27,7 @@ export function ToolResponseMessage({
|
||||
result,
|
||||
success: _success,
|
||||
className,
|
||||
onSendMessage,
|
||||
}: ToolResponseMessageProps) {
|
||||
if (isErrorResponse(result)) {
|
||||
const errorMessage = getErrorMessage(result);
|
||||
@@ -43,6 +47,18 @@ export function ToolResponseMessage({
|
||||
);
|
||||
}
|
||||
|
||||
// Check for agent_saved response - show special prompt
|
||||
const agentSavedData = isAgentSavedResponse(result);
|
||||
if (agentSavedData.isSaved) {
|
||||
return (
|
||||
<AgentCreatedPrompt
|
||||
agentName={agentSavedData.agentName}
|
||||
libraryAgentId={agentSavedData.libraryAgentId}
|
||||
onSendMessage={onSendMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
const formattedText = formatToolResponse(result, toolName);
|
||||
|
||||
return (
|
||||
|
||||
@@ -6,6 +6,43 @@ function stripInternalReasoning(content: string): string {
|
||||
.trim();
|
||||
}
|
||||
|
||||
export interface AgentSavedData {
|
||||
isSaved: boolean;
|
||||
agentName: string;
|
||||
agentId: string;
|
||||
libraryAgentId: string;
|
||||
libraryAgentLink: string;
|
||||
}
|
||||
|
||||
export function isAgentSavedResponse(result: unknown): AgentSavedData {
|
||||
if (typeof result !== "object" || result === null) {
|
||||
return {
|
||||
isSaved: false,
|
||||
agentName: "",
|
||||
agentId: "",
|
||||
libraryAgentId: "",
|
||||
libraryAgentLink: "",
|
||||
};
|
||||
}
|
||||
const response = result as Record<string, unknown>;
|
||||
if (response.type === "agent_saved") {
|
||||
return {
|
||||
isSaved: true,
|
||||
agentName: (response.agent_name as string) || "Agent",
|
||||
agentId: (response.agent_id as string) || "",
|
||||
libraryAgentId: (response.library_agent_id as string) || "",
|
||||
libraryAgentLink: (response.library_agent_link as string) || "",
|
||||
};
|
||||
}
|
||||
return {
|
||||
isSaved: false,
|
||||
agentName: "",
|
||||
agentId: "",
|
||||
libraryAgentId: "",
|
||||
libraryAgentLink: "",
|
||||
};
|
||||
}
|
||||
|
||||
export function isErrorResponse(result: unknown): boolean {
|
||||
if (typeof result === "string") {
|
||||
const lower = result.toLowerCase();
|
||||
@@ -39,69 +76,101 @@ export function getErrorMessage(result: unknown): string {
|
||||
|
||||
/**
|
||||
* Check if a value is a workspace file reference.
|
||||
* Format: workspace://{fileId} or workspace://{fileId}#{mimeType}
|
||||
*/
|
||||
function isWorkspaceRef(value: unknown): value is string {
|
||||
return typeof value === "string" && value.startsWith("workspace://");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a workspace reference appears to be an image based on common patterns.
|
||||
* Since workspace refs don't have extensions, we check the context or assume image
|
||||
* for certain block types.
|
||||
*
|
||||
* TODO: Replace keyword matching with MIME type encoded in workspace ref.
|
||||
* e.g., workspace://abc123#image/png or workspace://abc123#video/mp4
|
||||
* This would let frontend render correctly without fragile keyword matching.
|
||||
* Extract MIME type from a workspace reference fragment.
|
||||
* e.g., "workspace://abc123#video/mp4" → "video/mp4"
|
||||
* Returns undefined if no fragment is present.
|
||||
*/
|
||||
function isLikelyImageRef(value: string, outputKey?: string): boolean {
|
||||
if (!isWorkspaceRef(value)) return false;
|
||||
|
||||
// Check output key name for video-related hints (these are NOT images)
|
||||
const videoKeywords = ["video", "mp4", "mov", "avi", "webm", "movie", "clip"];
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check output key name for image-related hints
|
||||
const imageKeywords = [
|
||||
"image",
|
||||
"img",
|
||||
"photo",
|
||||
"picture",
|
||||
"thumbnail",
|
||||
"avatar",
|
||||
"icon",
|
||||
"screenshot",
|
||||
];
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Default to treating workspace refs as potential images
|
||||
// since that's the most common case for generated content
|
||||
return true;
|
||||
function getWorkspaceMimeType(value: string): string | undefined {
|
||||
const hashIndex = value.indexOf("#");
|
||||
if (hashIndex === -1) return undefined;
|
||||
return value.slice(hashIndex + 1) || undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single output value, converting workspace refs to markdown images.
|
||||
* Determine the media category of a workspace ref or data URI.
|
||||
* Uses the MIME type fragment on workspace refs when available,
|
||||
* falls back to output key keyword matching for older refs without it.
|
||||
*/
|
||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||
if (isWorkspaceRef(value) && isLikelyImageRef(value, outputKey)) {
|
||||
// Format as markdown image
|
||||
return ``;
|
||||
function getMediaCategory(
|
||||
value: string,
|
||||
outputKey?: string,
|
||||
): "video" | "image" | "audio" | "unknown" {
|
||||
// Data URIs carry their own MIME type
|
||||
if (value.startsWith("data:video/")) return "video";
|
||||
if (value.startsWith("data:image/")) return "image";
|
||||
if (value.startsWith("data:audio/")) return "audio";
|
||||
|
||||
// Workspace refs: prefer MIME type fragment
|
||||
if (isWorkspaceRef(value)) {
|
||||
const mime = getWorkspaceMimeType(value);
|
||||
if (mime) {
|
||||
if (mime.startsWith("video/")) return "video";
|
||||
if (mime.startsWith("image/")) return "image";
|
||||
if (mime.startsWith("audio/")) return "audio";
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
// Fallback: keyword matching on output key for older refs without fragment
|
||||
if (outputKey) {
|
||||
const lowerKey = outputKey.toLowerCase();
|
||||
|
||||
const videoKeywords = [
|
||||
"video",
|
||||
"mp4",
|
||||
"mov",
|
||||
"avi",
|
||||
"webm",
|
||||
"movie",
|
||||
"clip",
|
||||
];
|
||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) return "video";
|
||||
|
||||
const imageKeywords = [
|
||||
"image",
|
||||
"img",
|
||||
"photo",
|
||||
"picture",
|
||||
"thumbnail",
|
||||
"avatar",
|
||||
"icon",
|
||||
"screenshot",
|
||||
];
|
||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) return "image";
|
||||
}
|
||||
|
||||
// Default to image for backward compatibility
|
||||
return "image";
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a single output value, converting workspace refs to markdown images/videos.
|
||||
* Videos use a "video:" alt-text prefix so the MarkdownContent renderer can
|
||||
* distinguish them from images and render a <video> element.
|
||||
*/
|
||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||
if (typeof value === "string") {
|
||||
// Check for data URIs (images)
|
||||
if (value.startsWith("data:image/")) {
|
||||
const category = getMediaCategory(value, outputKey);
|
||||
|
||||
if (category === "video") {
|
||||
// Format with "video:" prefix so MarkdownContent renders <video>
|
||||
return ``;
|
||||
}
|
||||
|
||||
if (category === "image") {
|
||||
return ``;
|
||||
}
|
||||
|
||||
// For audio, unknown workspace refs, data URIs, etc. - return as-is
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,17 @@ export function HostScopedCredentialsModal({
|
||||
const currentHost = currentUrl ? getHostFromUrl(currentUrl) : "";
|
||||
|
||||
const formSchema = z.object({
|
||||
host: z.string().min(1, "Host is required"),
|
||||
host: z
|
||||
.string()
|
||||
.min(1, "Host is required")
|
||||
.refine((val) => !/^[a-zA-Z][a-zA-Z\d+\-.]*:\/\//.test(val), {
|
||||
message: "Enter only the host (e.g. api.example.com), not a full URL",
|
||||
})
|
||||
.refine((val) => !val.includes("/"), {
|
||||
message:
|
||||
"Enter only the host (e.g. api.example.com), without a trailing path. " +
|
||||
"You may specify a port (e.g. api.example.com:8080) if needed.",
|
||||
}),
|
||||
title: z.string().optional(),
|
||||
headers: z.record(z.string()).optional(),
|
||||
});
|
||||
|
||||
@@ -26,6 +26,7 @@ export const providerIcons: Partial<
|
||||
nvidia: fallbackIcon,
|
||||
discord: FaDiscord,
|
||||
d_id: fallbackIcon,
|
||||
elevenlabs: fallbackIcon,
|
||||
google_maps: FaGoogle,
|
||||
jina: fallbackIcon,
|
||||
ideogram: fallbackIcon,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user