mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-07 21:35:34 -05:00
Compare commits
21 Commits
ntindle/wa
...
ntindle/go
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4f81bc4fc | ||
|
|
c5abc01f25 | ||
|
|
8b7053c1de | ||
|
|
e00c1202ad | ||
|
|
8fddc9d71f | ||
|
|
3d1cd03fc8 | ||
|
|
e7ebe42306 | ||
|
|
e0fab7e34e | ||
|
|
29ee85c86f | ||
|
|
85b6520710 | ||
|
|
bfa942e032 | ||
|
|
11256076d8 | ||
|
|
3ca2387631 | ||
|
|
ed07f02738 | ||
|
|
b121030c94 | ||
|
|
c22c18374d | ||
|
|
e40233a3ac | ||
|
|
3ae5eabf9d | ||
|
|
a077ba9f03 | ||
|
|
5401d54eaa | ||
|
|
5ac89d7c0b |
16
.github/workflows/platform-frontend-ci.yml
vendored
16
.github/workflows/platform-frontend-ci.yml
vendored
@@ -27,11 +27,20 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
cache-key: ${{ steps.cache-key.outputs.key }}
|
cache-key: ${{ steps.cache-key.outputs.key }}
|
||||||
|
components-changed: ${{ steps.filter.outputs.components }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Check for component changes
|
||||||
|
uses: dorny/paths-filter@v3
|
||||||
|
id: filter
|
||||||
|
with:
|
||||||
|
filters: |
|
||||||
|
components:
|
||||||
|
- 'autogpt_platform/frontend/src/components/**'
|
||||||
|
|
||||||
- name: Set up Node.js
|
- name: Set up Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
@@ -90,8 +99,11 @@ jobs:
|
|||||||
chromatic:
|
chromatic:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: setup
|
needs: setup
|
||||||
# Only run on dev branch pushes or PRs targeting dev
|
# Disabled: to re-enable, remove 'false &&' from the condition below
|
||||||
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
|
if: >-
|
||||||
|
false
|
||||||
|
&& (github.ref == 'refs/heads/dev' || github.base_ref == 'dev')
|
||||||
|
&& needs.setup.outputs.components-changed == 'true'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
|
|||||||
@@ -152,6 +152,7 @@ REPLICATE_API_KEY=
|
|||||||
REVID_API_KEY=
|
REVID_API_KEY=
|
||||||
SCREENSHOTONE_API_KEY=
|
SCREENSHOTONE_API_KEY=
|
||||||
UNREAL_SPEECH_API_KEY=
|
UNREAL_SPEECH_API_KEY=
|
||||||
|
ELEVENLABS_API_KEY=
|
||||||
|
|
||||||
# Data & Search Services
|
# Data & Search Services
|
||||||
E2B_API_KEY=
|
E2B_API_KEY=
|
||||||
|
|||||||
3
autogpt_platform/backend/.gitignore
vendored
3
autogpt_platform/backend/.gitignore
vendored
@@ -19,3 +19,6 @@ load-tests/*.json
|
|||||||
load-tests/*.log
|
load-tests/*.log
|
||||||
load-tests/node_modules/*
|
load-tests/node_modules/*
|
||||||
migrations/*/rollback*.sql
|
migrations/*/rollback*.sql
|
||||||
|
|
||||||
|
# Workspace files
|
||||||
|
workspaces/
|
||||||
|
|||||||
@@ -62,10 +62,12 @@ ENV POETRY_HOME=/opt/poetry \
|
|||||||
DEBIAN_FRONTEND=noninteractive
|
DEBIAN_FRONTEND=noninteractive
|
||||||
ENV PATH=/opt/poetry/bin:$PATH
|
ENV PATH=/opt/poetry/bin:$PATH
|
||||||
|
|
||||||
# Install Python without upgrading system-managed packages
|
# Install Python, FFmpeg, and ImageMagick (required for video processing blocks)
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
python3.13 \
|
python3.13 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
|
ffmpeg \
|
||||||
|
imagemagick \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy only necessary files from builder
|
# Copy only necessary files from builder
|
||||||
|
|||||||
@@ -1,251 +0,0 @@
|
|||||||
import logging
|
|
||||||
|
|
||||||
import autogpt_libs.auth
|
|
||||||
import fastapi
|
|
||||||
import fastapi.responses
|
|
||||||
|
|
||||||
import backend.api.features.store.db as store_db
|
|
||||||
import backend.api.features.store.model as store_model
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
router = fastapi.APIRouter(
|
|
||||||
prefix="/admin/waitlist",
|
|
||||||
tags=["store", "admin", "waitlist"],
|
|
||||||
dependencies=[fastapi.Security(autogpt_libs.auth.requires_admin_user)],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"",
|
|
||||||
summary="Create Waitlist",
|
|
||||||
response_model=store_model.WaitlistAdminResponse,
|
|
||||||
)
|
|
||||||
async def create_waitlist(
|
|
||||||
request: store_model.WaitlistCreateRequest,
|
|
||||||
user_id: str = fastapi.Security(autogpt_libs.auth.get_user_id),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Create a new waitlist (admin only).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: Waitlist creation details
|
|
||||||
user_id: Authenticated admin user creating the waitlist
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
WaitlistAdminResponse with the created waitlist details
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
waitlist = await store_db.create_waitlist_admin(
|
|
||||||
admin_user_id=user_id,
|
|
||||||
data=request,
|
|
||||||
)
|
|
||||||
return waitlist
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error creating waitlist: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while creating the waitlist"},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"",
|
|
||||||
summary="List All Waitlists",
|
|
||||||
response_model=store_model.WaitlistAdminListResponse,
|
|
||||||
)
|
|
||||||
async def list_waitlists():
|
|
||||||
"""
|
|
||||||
Get all waitlists with admin details (admin only).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
WaitlistAdminListResponse with all waitlists
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return await store_db.get_waitlists_admin()
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error listing waitlists: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while fetching waitlists"},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/{waitlist_id}",
|
|
||||||
summary="Get Waitlist Details",
|
|
||||||
response_model=store_model.WaitlistAdminResponse,
|
|
||||||
)
|
|
||||||
async def get_waitlist(
|
|
||||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Get a single waitlist with admin details (admin only).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
waitlist_id: ID of the waitlist to retrieve
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
WaitlistAdminResponse with waitlist details
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return await store_db.get_waitlist_admin(waitlist_id)
|
|
||||||
except ValueError:
|
|
||||||
logger.warning("Waitlist not found: %s", waitlist_id)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=404,
|
|
||||||
content={"detail": "Waitlist not found"},
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error fetching waitlist: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while fetching the waitlist"},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.put(
|
|
||||||
"/{waitlist_id}",
|
|
||||||
summary="Update Waitlist",
|
|
||||||
response_model=store_model.WaitlistAdminResponse,
|
|
||||||
)
|
|
||||||
async def update_waitlist(
|
|
||||||
request: store_model.WaitlistUpdateRequest,
|
|
||||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Update a waitlist (admin only).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
waitlist_id: ID of the waitlist to update
|
|
||||||
request: Fields to update
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
WaitlistAdminResponse with updated waitlist details
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return await store_db.update_waitlist_admin(waitlist_id, request)
|
|
||||||
except ValueError:
|
|
||||||
logger.warning("Waitlist not found for update: %s", waitlist_id)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=404,
|
|
||||||
content={"detail": "Waitlist not found"},
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error updating waitlist: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while updating the waitlist"},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.delete(
|
|
||||||
"/{waitlist_id}",
|
|
||||||
summary="Delete Waitlist",
|
|
||||||
)
|
|
||||||
async def delete_waitlist(
|
|
||||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Soft delete a waitlist (admin only).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
waitlist_id: ID of the waitlist to delete
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Success message
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
await store_db.delete_waitlist_admin(waitlist_id)
|
|
||||||
return {"message": "Waitlist deleted successfully"}
|
|
||||||
except ValueError:
|
|
||||||
logger.warning(f"Waitlist not found for deletion: {waitlist_id}")
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=404,
|
|
||||||
content={"detail": "Waitlist not found"},
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error deleting waitlist: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while deleting the waitlist"},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/{waitlist_id}/signups",
|
|
||||||
summary="Get Waitlist Signups",
|
|
||||||
response_model=store_model.WaitlistSignupListResponse,
|
|
||||||
)
|
|
||||||
async def get_waitlist_signups(
|
|
||||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Get all signups for a waitlist (admin only).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
waitlist_id: ID of the waitlist
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
WaitlistSignupListResponse with all signups
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return await store_db.get_waitlist_signups_admin(waitlist_id)
|
|
||||||
except ValueError:
|
|
||||||
logger.warning("Waitlist not found for signups: %s", waitlist_id)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=404,
|
|
||||||
content={"detail": "Waitlist not found"},
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error fetching waitlist signups: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while fetching waitlist signups"},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/{waitlist_id}/link",
|
|
||||||
summary="Link Waitlist to Store Listing",
|
|
||||||
response_model=store_model.WaitlistAdminResponse,
|
|
||||||
)
|
|
||||||
async def link_waitlist_to_listing(
|
|
||||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist"),
|
|
||||||
store_listing_id: str = fastapi.Body(
|
|
||||||
..., embed=True, description="The ID of the store listing"
|
|
||||||
),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Link a waitlist to a store listing (admin only).
|
|
||||||
|
|
||||||
When the linked store listing is approved/published, waitlist users
|
|
||||||
will be automatically notified.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
waitlist_id: ID of the waitlist
|
|
||||||
store_listing_id: ID of the store listing to link
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
WaitlistAdminResponse with updated waitlist details
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return await store_db.link_waitlist_to_listing_admin(
|
|
||||||
waitlist_id, store_listing_id
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
logger.warning(
|
|
||||||
"Link failed - waitlist or listing not found: %s, %s",
|
|
||||||
waitlist_id,
|
|
||||||
store_listing_id,
|
|
||||||
)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=404,
|
|
||||||
content={"detail": "Waitlist or store listing not found"},
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception("Error linking waitlist to listing: %s", e)
|
|
||||||
return fastapi.responses.JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"detail": "An error occurred while linking the waitlist"},
|
|
||||||
)
|
|
||||||
@@ -11,7 +11,7 @@ class ChatConfig(BaseSettings):
|
|||||||
|
|
||||||
# OpenAI API Configuration
|
# OpenAI API Configuration
|
||||||
model: str = Field(
|
model: str = Field(
|
||||||
default="anthropic/claude-opus-4.5", description="Default model to use"
|
default="anthropic/claude-opus-4.6", description="Default model to use"
|
||||||
)
|
)
|
||||||
title_model: str = Field(
|
title_model: str = Field(
|
||||||
default="openai/gpt-4o-mini",
|
default="openai/gpt-4o-mini",
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ from backend.data.understanding import (
|
|||||||
get_business_understanding,
|
get_business_understanding,
|
||||||
)
|
)
|
||||||
from backend.util.exceptions import NotFoundError
|
from backend.util.exceptions import NotFoundError
|
||||||
from backend.util.settings import Settings
|
from backend.util.settings import AppEnvironment, Settings
|
||||||
|
|
||||||
from . import db as chat_db
|
from . import db as chat_db
|
||||||
from . import stream_registry
|
from . import stream_registry
|
||||||
@@ -222,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str:
|
|||||||
try:
|
try:
|
||||||
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
|
||||||
# Use asyncio.to_thread to avoid blocking the event loop
|
# Use asyncio.to_thread to avoid blocking the event loop
|
||||||
|
# In non-production environments, fetch the latest prompt version
|
||||||
|
# instead of the production-labeled version for easier testing
|
||||||
|
label = (
|
||||||
|
None
|
||||||
|
if settings.config.app_env == AppEnvironment.PRODUCTION
|
||||||
|
else "latest"
|
||||||
|
)
|
||||||
prompt = await asyncio.to_thread(
|
prompt = await asyncio.to_thread(
|
||||||
langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0
|
langfuse.get_prompt,
|
||||||
|
config.langfuse_prompt_name,
|
||||||
|
label=label,
|
||||||
|
cache_ttl_seconds=0,
|
||||||
)
|
)
|
||||||
return prompt.compile(users_information=context)
|
return prompt.compile(users_information=context)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -618,6 +628,9 @@ async def stream_chat_completion(
|
|||||||
total_tokens=chunk.totalTokens,
|
total_tokens=chunk.totalTokens,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
elif isinstance(chunk, StreamHeartbeat):
|
||||||
|
# Pass through heartbeat to keep SSE connection alive
|
||||||
|
yield chunk
|
||||||
else:
|
else:
|
||||||
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
|
||||||
|
|
||||||
|
|||||||
@@ -7,15 +7,7 @@ from typing import Any, NotRequired, TypedDict
|
|||||||
|
|
||||||
from backend.api.features.library import db as library_db
|
from backend.api.features.library import db as library_db
|
||||||
from backend.api.features.store import db as store_db
|
from backend.api.features.store import db as store_db
|
||||||
from backend.data.graph import (
|
from backend.data.graph import Graph, Link, Node, get_graph, get_store_listed_graphs
|
||||||
Graph,
|
|
||||||
Link,
|
|
||||||
Node,
|
|
||||||
create_graph,
|
|
||||||
get_graph,
|
|
||||||
get_graph_all_versions,
|
|
||||||
get_store_listed_graphs,
|
|
||||||
)
|
|
||||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||||
|
|
||||||
from .service import (
|
from .service import (
|
||||||
@@ -28,8 +20,6 @@ from .service import (
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
AGENT_EXECUTOR_BLOCK_ID = "e189baac-8c20-45a1-94a7-55177ea42565"
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutionSummary(TypedDict):
|
class ExecutionSummary(TypedDict):
|
||||||
"""Summary of a single execution for quality assessment."""
|
"""Summary of a single execution for quality assessment."""
|
||||||
@@ -669,45 +659,6 @@ def json_to_graph(agent_json: dict[str, Any]) -> Graph:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _reassign_node_ids(graph: Graph) -> None:
|
|
||||||
"""Reassign all node and link IDs to new UUIDs.
|
|
||||||
|
|
||||||
This is needed when creating a new version to avoid unique constraint violations.
|
|
||||||
"""
|
|
||||||
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
|
|
||||||
|
|
||||||
for node in graph.nodes:
|
|
||||||
node.id = id_map[node.id]
|
|
||||||
|
|
||||||
for link in graph.links:
|
|
||||||
link.id = str(uuid.uuid4())
|
|
||||||
if link.source_id in id_map:
|
|
||||||
link.source_id = id_map[link.source_id]
|
|
||||||
if link.sink_id in id_map:
|
|
||||||
link.sink_id = id_map[link.sink_id]
|
|
||||||
|
|
||||||
|
|
||||||
def _populate_agent_executor_user_ids(agent_json: dict[str, Any], user_id: str) -> None:
|
|
||||||
"""Populate user_id in AgentExecutorBlock nodes.
|
|
||||||
|
|
||||||
The external agent generator creates AgentExecutorBlock nodes with empty user_id.
|
|
||||||
This function fills in the actual user_id so sub-agents run with correct permissions.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent_json: Agent JSON dict (modified in place)
|
|
||||||
user_id: User ID to set
|
|
||||||
"""
|
|
||||||
for node in agent_json.get("nodes", []):
|
|
||||||
if node.get("block_id") == AGENT_EXECUTOR_BLOCK_ID:
|
|
||||||
input_default = node.get("input_default") or {}
|
|
||||||
if not input_default.get("user_id"):
|
|
||||||
input_default["user_id"] = user_id
|
|
||||||
node["input_default"] = input_default
|
|
||||||
logger.debug(
|
|
||||||
f"Set user_id for AgentExecutorBlock node {node.get('id')}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def save_agent_to_library(
|
async def save_agent_to_library(
|
||||||
agent_json: dict[str, Any], user_id: str, is_update: bool = False
|
agent_json: dict[str, Any], user_id: str, is_update: bool = False
|
||||||
) -> tuple[Graph, Any]:
|
) -> tuple[Graph, Any]:
|
||||||
@@ -721,35 +672,10 @@ async def save_agent_to_library(
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple of (created Graph, LibraryAgent)
|
Tuple of (created Graph, LibraryAgent)
|
||||||
"""
|
"""
|
||||||
# Populate user_id in AgentExecutorBlock nodes before conversion
|
|
||||||
_populate_agent_executor_user_ids(agent_json, user_id)
|
|
||||||
|
|
||||||
graph = json_to_graph(agent_json)
|
graph = json_to_graph(agent_json)
|
||||||
|
|
||||||
if is_update:
|
if is_update:
|
||||||
if graph.id:
|
return await library_db.update_graph_in_library(graph, user_id)
|
||||||
existing_versions = await get_graph_all_versions(graph.id, user_id)
|
return await library_db.create_graph_in_library(graph, user_id)
|
||||||
if existing_versions:
|
|
||||||
latest_version = max(v.version for v in existing_versions)
|
|
||||||
graph.version = latest_version + 1
|
|
||||||
_reassign_node_ids(graph)
|
|
||||||
logger.info(f"Updating agent {graph.id} to version {graph.version}")
|
|
||||||
else:
|
|
||||||
graph.id = str(uuid.uuid4())
|
|
||||||
graph.version = 1
|
|
||||||
_reassign_node_ids(graph)
|
|
||||||
logger.info(f"Creating new agent with ID {graph.id}")
|
|
||||||
|
|
||||||
created_graph = await create_graph(graph, user_id)
|
|
||||||
|
|
||||||
library_agents = await library_db.create_library_agent(
|
|
||||||
graph=created_graph,
|
|
||||||
user_id=user_id,
|
|
||||||
sensitive_action_safe_mode=True,
|
|
||||||
create_library_agents_for_sub_graphs=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
return created_graph, library_agents[0]
|
|
||||||
|
|
||||||
|
|
||||||
def graph_to_json(graph: Graph) -> dict[str, Any]:
|
def graph_to_json(graph: Graph) -> dict[str, Any]:
|
||||||
|
|||||||
@@ -206,9 +206,9 @@ async def search_agents(
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
no_results_msg = (
|
no_results_msg = (
|
||||||
f"No agents found matching '{query}'. Try different keywords or browse the marketplace."
|
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
|
||||||
if source == "marketplace"
|
if source == "marketplace"
|
||||||
else f"No agents matching '{query}' found in your library."
|
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
|
||||||
)
|
)
|
||||||
return NoResultsResponse(
|
return NoResultsResponse(
|
||||||
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
||||||
@@ -224,10 +224,10 @@ async def search_agents(
|
|||||||
message = (
|
message = (
|
||||||
"Now you have found some options for the user to choose from. "
|
"Now you have found some options for the user to choose from. "
|
||||||
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
||||||
"Please ask the user if they would like to use any of these agents."
|
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
|
||||||
if source == "marketplace"
|
if source == "marketplace"
|
||||||
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
||||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute."
|
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
|
||||||
)
|
)
|
||||||
|
|
||||||
return AgentsFoundResponse(
|
return AgentsFoundResponse(
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ from typing import Any
|
|||||||
from backend.api.features.library import db as library_db
|
from backend.api.features.library import db as library_db
|
||||||
from backend.api.features.library import model as library_model
|
from backend.api.features.library import model as library_model
|
||||||
from backend.api.features.store import db as store_db
|
from backend.api.features.store import db as store_db
|
||||||
from backend.data import graph as graph_db
|
|
||||||
from backend.data.graph import GraphModel
|
from backend.data.graph import GraphModel
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
CredentialsFieldInfo,
|
CredentialsFieldInfo,
|
||||||
@@ -44,14 +43,8 @@ async def fetch_graph_from_store_slug(
|
|||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
# Get the graph from store listing version
|
# Get the graph from store listing version
|
||||||
graph_meta = await store_db.get_available_graph(
|
graph = await store_db.get_available_graph(
|
||||||
store_agent.store_listing_version_id
|
store_agent.store_listing_version_id, hide_nodes=False
|
||||||
)
|
|
||||||
graph = await graph_db.get_graph(
|
|
||||||
graph_id=graph_meta.id,
|
|
||||||
version=graph_meta.version,
|
|
||||||
user_id=None, # Public access
|
|
||||||
include_subgraphs=True,
|
|
||||||
)
|
)
|
||||||
return graph, store_agent
|
return graph, store_agent
|
||||||
|
|
||||||
@@ -124,11 +117,11 @@ def build_missing_credentials_from_graph(
|
|||||||
preserving all supported credential types for each field.
|
preserving all supported credential types for each field.
|
||||||
"""
|
"""
|
||||||
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
|
||||||
aggregated_fields = graph.aggregate_credentials_inputs()
|
aggregated_fields = graph.regular_credentials_inputs
|
||||||
|
|
||||||
return {
|
return {
|
||||||
field_key: _serialize_missing_credential(field_key, field_info)
|
field_key: _serialize_missing_credential(field_key, field_info)
|
||||||
for field_key, (field_info, _node_fields) in aggregated_fields.items()
|
for field_key, (field_info, _, _) in aggregated_fields.items()
|
||||||
if field_key not in matched_keys
|
if field_key not in matched_keys
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,7 +244,7 @@ async def match_user_credentials_to_graph(
|
|||||||
missing_creds: list[str] = []
|
missing_creds: list[str] = []
|
||||||
|
|
||||||
# Get aggregated credentials requirements from the graph
|
# Get aggregated credentials requirements from the graph
|
||||||
aggregated_creds = graph.aggregate_credentials_inputs()
|
aggregated_creds = graph.regular_credentials_inputs
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
|
||||||
)
|
)
|
||||||
@@ -269,7 +262,8 @@ async def match_user_credentials_to_graph(
|
|||||||
# provider is in the set of acceptable providers.
|
# provider is in the set of acceptable providers.
|
||||||
for credential_field_name, (
|
for credential_field_name, (
|
||||||
credential_requirements,
|
credential_requirements,
|
||||||
_node_fields,
|
_,
|
||||||
|
_,
|
||||||
) in aggregated_creds.items():
|
) in aggregated_creds.items():
|
||||||
# Find first matching credential by provider, type, and scopes
|
# Find first matching credential by provider, type, and scopes
|
||||||
matching_cred = next(
|
matching_cred = next(
|
||||||
|
|||||||
@@ -0,0 +1,78 @@
|
|||||||
|
"""Tests for chat tools utility functions."""
|
||||||
|
|
||||||
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
|
||||||
|
def _make_regular_field() -> CredentialsFieldInfo:
|
||||||
|
return CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["github"],
|
||||||
|
"credentials_types": ["api_key"],
|
||||||
|
"is_auto_credential": False,
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_missing_credentials_excludes_auto_creds():
|
||||||
|
"""
|
||||||
|
build_missing_credentials_from_graph() should use regular_credentials_inputs
|
||||||
|
and thus exclude auto_credentials from the "missing" set.
|
||||||
|
"""
|
||||||
|
from backend.api.features.chat.tools.utils import (
|
||||||
|
build_missing_credentials_from_graph,
|
||||||
|
)
|
||||||
|
|
||||||
|
regular_field = _make_regular_field()
|
||||||
|
|
||||||
|
mock_graph = MagicMock()
|
||||||
|
# regular_credentials_inputs should only return the non-auto field
|
||||||
|
mock_graph.regular_credentials_inputs = {
|
||||||
|
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
||||||
|
}
|
||||||
|
|
||||||
|
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
|
||||||
|
|
||||||
|
# Should include the regular credential
|
||||||
|
assert "github_api_key" in result
|
||||||
|
# Should NOT include the auto_credential (not in regular_credentials_inputs)
|
||||||
|
assert "google_oauth2" not in result
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_match_user_credentials_excludes_auto_creds():
|
||||||
|
"""
|
||||||
|
match_user_credentials_to_graph() should use regular_credentials_inputs
|
||||||
|
and thus exclude auto_credentials from matching.
|
||||||
|
"""
|
||||||
|
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
|
||||||
|
|
||||||
|
regular_field = _make_regular_field()
|
||||||
|
|
||||||
|
mock_graph = MagicMock()
|
||||||
|
mock_graph.id = "test-graph"
|
||||||
|
# regular_credentials_inputs returns only non-auto fields
|
||||||
|
mock_graph.regular_credentials_inputs = {
|
||||||
|
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock the credentials manager to return no credentials
|
||||||
|
with patch(
|
||||||
|
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
|
||||||
|
) as MockCredsMgr:
|
||||||
|
mock_store = AsyncMock()
|
||||||
|
mock_store.get_all_creds.return_value = []
|
||||||
|
MockCredsMgr.return_value.store = mock_store
|
||||||
|
|
||||||
|
matched, missing = await match_user_credentials_to_graph(
|
||||||
|
user_id="test-user", graph=mock_graph
|
||||||
|
)
|
||||||
|
|
||||||
|
# No credentials available, so github should be missing
|
||||||
|
assert len(matched) == 0
|
||||||
|
assert len(missing) == 1
|
||||||
|
assert "github_api_key" in missing[0]
|
||||||
@@ -19,7 +19,10 @@ from backend.data.graph import GraphSettings
|
|||||||
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
from backend.data.includes import AGENT_PRESET_INCLUDE, library_agent_include
|
||||||
from backend.data.model import CredentialsMetaInput
|
from backend.data.model import CredentialsMetaInput
|
||||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||||
|
on_graph_activate,
|
||||||
|
on_graph_deactivate,
|
||||||
|
)
|
||||||
from backend.util.clients import get_scheduler_client
|
from backend.util.clients import get_scheduler_client
|
||||||
from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError
|
from backend.util.exceptions import DatabaseError, InvalidInputError, NotFoundError
|
||||||
from backend.util.json import SafeJson
|
from backend.util.json import SafeJson
|
||||||
@@ -371,7 +374,7 @@ async def get_library_agent_by_graph_id(
|
|||||||
|
|
||||||
|
|
||||||
async def add_generated_agent_image(
|
async def add_generated_agent_image(
|
||||||
graph: graph_db.BaseGraph,
|
graph: graph_db.GraphBaseMeta,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
library_agent_id: str,
|
library_agent_id: str,
|
||||||
) -> Optional[prisma.models.LibraryAgent]:
|
) -> Optional[prisma.models.LibraryAgent]:
|
||||||
@@ -537,6 +540,92 @@ async def update_agent_version_in_library(
|
|||||||
return library_model.LibraryAgent.from_db(lib)
|
return library_model.LibraryAgent.from_db(lib)
|
||||||
|
|
||||||
|
|
||||||
|
async def create_graph_in_library(
|
||||||
|
graph: graph_db.Graph,
|
||||||
|
user_id: str,
|
||||||
|
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||||
|
"""Create a new graph and add it to the user's library."""
|
||||||
|
graph.version = 1
|
||||||
|
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||||
|
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||||
|
|
||||||
|
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||||
|
|
||||||
|
library_agents = await create_library_agent(
|
||||||
|
graph=created_graph,
|
||||||
|
user_id=user_id,
|
||||||
|
sensitive_action_safe_mode=True,
|
||||||
|
create_library_agents_for_sub_graphs=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
if created_graph.is_active:
|
||||||
|
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||||
|
|
||||||
|
return created_graph, library_agents[0]
|
||||||
|
|
||||||
|
|
||||||
|
async def update_graph_in_library(
|
||||||
|
graph: graph_db.Graph,
|
||||||
|
user_id: str,
|
||||||
|
) -> tuple[graph_db.GraphModel, library_model.LibraryAgent]:
|
||||||
|
"""Create a new version of an existing graph and update the library entry."""
|
||||||
|
existing_versions = await graph_db.get_graph_all_versions(graph.id, user_id)
|
||||||
|
current_active_version = (
|
||||||
|
next((v for v in existing_versions if v.is_active), None)
|
||||||
|
if existing_versions
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
graph.version = (
|
||||||
|
max(v.version for v in existing_versions) + 1 if existing_versions else 1
|
||||||
|
)
|
||||||
|
|
||||||
|
graph_model = graph_db.make_graph_model(graph, user_id)
|
||||||
|
graph_model.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||||
|
|
||||||
|
created_graph = await graph_db.create_graph(graph_model, user_id)
|
||||||
|
|
||||||
|
library_agent = await get_library_agent_by_graph_id(user_id, created_graph.id)
|
||||||
|
if not library_agent:
|
||||||
|
raise NotFoundError(f"Library agent not found for graph {created_graph.id}")
|
||||||
|
|
||||||
|
library_agent = await update_library_agent_version_and_settings(
|
||||||
|
user_id, created_graph
|
||||||
|
)
|
||||||
|
|
||||||
|
if created_graph.is_active:
|
||||||
|
created_graph = await on_graph_activate(created_graph, user_id=user_id)
|
||||||
|
await graph_db.set_graph_active_version(
|
||||||
|
graph_id=created_graph.id,
|
||||||
|
version=created_graph.version,
|
||||||
|
user_id=user_id,
|
||||||
|
)
|
||||||
|
if current_active_version:
|
||||||
|
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||||
|
|
||||||
|
return created_graph, library_agent
|
||||||
|
|
||||||
|
|
||||||
|
async def update_library_agent_version_and_settings(
|
||||||
|
user_id: str, agent_graph: graph_db.GraphModel
|
||||||
|
) -> library_model.LibraryAgent:
|
||||||
|
"""Update library agent to point to new graph version and sync settings."""
|
||||||
|
library = await update_agent_version_in_library(
|
||||||
|
user_id, agent_graph.id, agent_graph.version
|
||||||
|
)
|
||||||
|
updated_settings = GraphSettings.from_graph(
|
||||||
|
graph=agent_graph,
|
||||||
|
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
||||||
|
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
||||||
|
)
|
||||||
|
if updated_settings != library.settings:
|
||||||
|
library = await update_library_agent(
|
||||||
|
library_agent_id=library.id,
|
||||||
|
user_id=user_id,
|
||||||
|
settings=updated_settings,
|
||||||
|
)
|
||||||
|
return library
|
||||||
|
|
||||||
|
|
||||||
async def update_library_agent(
|
async def update_library_agent(
|
||||||
library_agent_id: str,
|
library_agent_id: str,
|
||||||
user_id: str,
|
user_id: str,
|
||||||
@@ -1014,7 +1103,7 @@ async def create_preset_from_graph_execution(
|
|||||||
raise NotFoundError(
|
raise NotFoundError(
|
||||||
f"Graph #{graph_execution.graph_id} not found or accessible"
|
f"Graph #{graph_execution.graph_id} not found or accessible"
|
||||||
)
|
)
|
||||||
elif len(graph.aggregate_credentials_inputs()) > 0:
|
elif len(graph.regular_credentials_inputs) > 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Graph execution #{graph_exec_id} can't be turned into a preset "
|
f"Graph execution #{graph_exec_id} can't be turned into a preset "
|
||||||
"because it was run before this feature existed "
|
"because it was run before this feature existed "
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from typing import Any, Literal
|
from typing import Any, Literal, overload
|
||||||
|
|
||||||
import fastapi
|
import fastapi
|
||||||
import prisma.enums
|
import prisma.enums
|
||||||
@@ -11,8 +11,8 @@ import prisma.types
|
|||||||
|
|
||||||
from backend.data.db import transaction
|
from backend.data.db import transaction
|
||||||
from backend.data.graph import (
|
from backend.data.graph import (
|
||||||
GraphMeta,
|
|
||||||
GraphModel,
|
GraphModel,
|
||||||
|
GraphModelWithoutNodes,
|
||||||
get_graph,
|
get_graph,
|
||||||
get_graph_as_admin,
|
get_graph_as_admin,
|
||||||
get_sub_graphs,
|
get_sub_graphs,
|
||||||
@@ -22,7 +22,6 @@ from backend.data.notifications import (
|
|||||||
AgentApprovalData,
|
AgentApprovalData,
|
||||||
AgentRejectionData,
|
AgentRejectionData,
|
||||||
NotificationEventModel,
|
NotificationEventModel,
|
||||||
WaitlistLaunchData,
|
|
||||||
)
|
)
|
||||||
from backend.notifications.notifications import queue_notification_async
|
from backend.notifications.notifications import queue_notification_async
|
||||||
from backend.util.exceptions import DatabaseError
|
from backend.util.exceptions import DatabaseError
|
||||||
@@ -335,7 +334,22 @@ async def get_store_agent_details(
|
|||||||
raise DatabaseError("Failed to fetch agent details") from e
|
raise DatabaseError("Failed to fetch agent details") from e
|
||||||
|
|
||||||
|
|
||||||
async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
@overload
|
||||||
|
async def get_available_graph(
|
||||||
|
store_listing_version_id: str, hide_nodes: Literal[False]
|
||||||
|
) -> GraphModel: ...
|
||||||
|
|
||||||
|
|
||||||
|
@overload
|
||||||
|
async def get_available_graph(
|
||||||
|
store_listing_version_id: str, hide_nodes: Literal[True] = True
|
||||||
|
) -> GraphModelWithoutNodes: ...
|
||||||
|
|
||||||
|
|
||||||
|
async def get_available_graph(
|
||||||
|
store_listing_version_id: str,
|
||||||
|
hide_nodes: bool = True,
|
||||||
|
) -> GraphModelWithoutNodes | GraphModel:
|
||||||
try:
|
try:
|
||||||
# Get avaialble, non-deleted store listing version
|
# Get avaialble, non-deleted store listing version
|
||||||
store_listing_version = (
|
store_listing_version = (
|
||||||
@@ -345,7 +359,7 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
|||||||
"isAvailable": True,
|
"isAvailable": True,
|
||||||
"isDeleted": False,
|
"isDeleted": False,
|
||||||
},
|
},
|
||||||
include={"AgentGraph": {"include": {"Nodes": True}}},
|
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -355,7 +369,9 @@ async def get_available_graph(store_listing_version_id: str) -> GraphMeta:
|
|||||||
detail=f"Store listing version {store_listing_version_id} not found",
|
detail=f"Store listing version {store_listing_version_id} not found",
|
||||||
)
|
)
|
||||||
|
|
||||||
return GraphModel.from_db(store_listing_version.AgentGraph).meta()
|
return (GraphModelWithoutNodes if hide_nodes else GraphModel).from_db(
|
||||||
|
store_listing_version.AgentGraph
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error getting agent: {e}")
|
logger.error(f"Error getting agent: {e}")
|
||||||
@@ -1714,29 +1730,6 @@ async def review_store_submission(
|
|||||||
# Don't fail the review process if email sending fails
|
# Don't fail the review process if email sending fails
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Notify waitlist users if this is an approval and has a linked waitlist
|
|
||||||
if is_approved and submission.StoreListing:
|
|
||||||
try:
|
|
||||||
frontend_base_url = (
|
|
||||||
settings.config.frontend_base_url
|
|
||||||
or settings.config.platform_base_url
|
|
||||||
)
|
|
||||||
store_agent = (
|
|
||||||
await prisma.models.StoreAgent.prisma().find_first_or_raise(
|
|
||||||
where={"storeListingVersionId": submission.id}
|
|
||||||
)
|
|
||||||
)
|
|
||||||
creator_username = store_agent.creator_username or "unknown"
|
|
||||||
store_url = f"{frontend_base_url}/marketplace/agent/{creator_username}/{store_agent.slug}"
|
|
||||||
await notify_waitlist_users_on_launch(
|
|
||||||
store_listing_id=submission.StoreListing.id,
|
|
||||||
agent_name=submission.name,
|
|
||||||
store_url=store_url,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Failed to notify waitlist users on agent approval: {e}")
|
|
||||||
# Don't fail the approval process
|
|
||||||
|
|
||||||
# Convert to Pydantic model for consistency
|
# Convert to Pydantic model for consistency
|
||||||
return store_model.StoreSubmission(
|
return store_model.StoreSubmission(
|
||||||
listing_id=(submission.StoreListing.id if submission.StoreListing else ""),
|
listing_id=(submission.StoreListing.id if submission.StoreListing else ""),
|
||||||
@@ -1984,552 +1977,3 @@ async def get_agent_as_admin(
|
|||||||
)
|
)
|
||||||
|
|
||||||
return graph
|
return graph
|
||||||
|
|
||||||
|
|
||||||
def _waitlist_to_store_entry(
|
|
||||||
waitlist: prisma.models.WaitlistEntry,
|
|
||||||
) -> store_model.StoreWaitlistEntry:
|
|
||||||
"""Convert a WaitlistEntry to StoreWaitlistEntry for public display."""
|
|
||||||
return store_model.StoreWaitlistEntry(
|
|
||||||
waitlistId=waitlist.id,
|
|
||||||
slug=waitlist.slug,
|
|
||||||
name=waitlist.name,
|
|
||||||
subHeading=waitlist.subHeading,
|
|
||||||
videoUrl=waitlist.videoUrl,
|
|
||||||
agentOutputDemoUrl=waitlist.agentOutputDemoUrl,
|
|
||||||
imageUrls=waitlist.imageUrls or [],
|
|
||||||
description=waitlist.description,
|
|
||||||
categories=waitlist.categories,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def get_waitlist() -> list[store_model.StoreWaitlistEntry]:
|
|
||||||
"""Get all active waitlists for public display."""
|
|
||||||
try:
|
|
||||||
waitlists = await prisma.models.WaitlistEntry.prisma().find_many(
|
|
||||||
where=prisma.types.WaitlistEntryWhereInput(isDeleted=False),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Filter out closed/done waitlists and sort by votes (descending)
|
|
||||||
excluded_statuses = {
|
|
||||||
prisma.enums.WaitlistExternalStatus.CANCELED,
|
|
||||||
prisma.enums.WaitlistExternalStatus.DONE,
|
|
||||||
}
|
|
||||||
active_waitlists = [w for w in waitlists if w.status not in excluded_statuses]
|
|
||||||
sorted_list = sorted(active_waitlists, key=lambda x: x.votes, reverse=True)
|
|
||||||
|
|
||||||
return [_waitlist_to_store_entry(w) for w in sorted_list]
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error fetching waitlists: {e}")
|
|
||||||
raise DatabaseError("Failed to fetch waitlists") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def get_user_waitlist_memberships(user_id: str) -> list[str]:
|
|
||||||
"""Get all waitlist IDs that a user has joined."""
|
|
||||||
try:
|
|
||||||
user = await prisma.models.User.prisma().find_unique(
|
|
||||||
where={"id": user_id},
|
|
||||||
include={"joinedWaitlists": True},
|
|
||||||
)
|
|
||||||
if not user or not user.joinedWaitlists:
|
|
||||||
return []
|
|
||||||
return [w.id for w in user.joinedWaitlists]
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error fetching user waitlist memberships: {e}")
|
|
||||||
raise DatabaseError("Failed to fetch waitlist memberships") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def add_user_to_waitlist(
|
|
||||||
waitlist_id: str, user_id: str | None, email: str | None
|
|
||||||
) -> store_model.StoreWaitlistEntry:
|
|
||||||
"""
|
|
||||||
Add a user to a waitlist.
|
|
||||||
|
|
||||||
For logged-in users: connects via joinedUsers relation
|
|
||||||
For anonymous users: adds email to unaffiliatedEmailUsers array
|
|
||||||
"""
|
|
||||||
if not user_id and not email:
|
|
||||||
raise ValueError("Either user_id or email must be provided")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Find the waitlist
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not waitlist:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
|
||||||
|
|
||||||
if waitlist.isDeleted:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} is no longer available")
|
|
||||||
|
|
||||||
if waitlist.status in [
|
|
||||||
prisma.enums.WaitlistExternalStatus.CANCELED,
|
|
||||||
prisma.enums.WaitlistExternalStatus.DONE,
|
|
||||||
]:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} is closed")
|
|
||||||
|
|
||||||
if user_id:
|
|
||||||
# Check if user already joined
|
|
||||||
joined_user_ids = [u.id for u in (waitlist.joinedUsers or [])]
|
|
||||||
if user_id in joined_user_ids:
|
|
||||||
# Already joined - return waitlist info
|
|
||||||
logger.debug(f"User {user_id} already joined waitlist {waitlist_id}")
|
|
||||||
else:
|
|
||||||
# Connect user to waitlist
|
|
||||||
await prisma.models.WaitlistEntry.prisma().update(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
data={"joinedUsers": {"connect": [{"id": user_id}]}},
|
|
||||||
)
|
|
||||||
logger.info(f"User {user_id} joined waitlist {waitlist_id}")
|
|
||||||
|
|
||||||
# If user was previously in email list, remove them
|
|
||||||
# Use transaction to prevent race conditions
|
|
||||||
if email:
|
|
||||||
async with transaction() as tx:
|
|
||||||
current_waitlist = await tx.waitlistentry.find_unique(
|
|
||||||
where={"id": waitlist_id}
|
|
||||||
)
|
|
||||||
if current_waitlist and email in (
|
|
||||||
current_waitlist.unaffiliatedEmailUsers or []
|
|
||||||
):
|
|
||||||
updated_emails: list[str] = [
|
|
||||||
e
|
|
||||||
for e in (current_waitlist.unaffiliatedEmailUsers or [])
|
|
||||||
if e != email
|
|
||||||
]
|
|
||||||
await tx.waitlistentry.update(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
data={"unaffiliatedEmailUsers": updated_emails},
|
|
||||||
)
|
|
||||||
elif email:
|
|
||||||
# Add email to unaffiliated list if not already present
|
|
||||||
# Use transaction to prevent race conditions with concurrent signups
|
|
||||||
async with transaction() as tx:
|
|
||||||
# Re-fetch within transaction to get latest state
|
|
||||||
current_waitlist = await tx.waitlistentry.find_unique(
|
|
||||||
where={"id": waitlist_id}
|
|
||||||
)
|
|
||||||
if current_waitlist:
|
|
||||||
current_emails: list[str] = list(
|
|
||||||
current_waitlist.unaffiliatedEmailUsers or []
|
|
||||||
)
|
|
||||||
if email not in current_emails:
|
|
||||||
current_emails.append(email)
|
|
||||||
await tx.waitlistentry.update(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
data={"unaffiliatedEmailUsers": current_emails},
|
|
||||||
)
|
|
||||||
logger.info(f"Email {email} added to waitlist {waitlist_id}")
|
|
||||||
else:
|
|
||||||
logger.debug(f"Email {email} already on waitlist {waitlist_id}")
|
|
||||||
|
|
||||||
# Re-fetch to return updated data
|
|
||||||
updated_waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id}
|
|
||||||
)
|
|
||||||
return _waitlist_to_store_entry(updated_waitlist or waitlist)
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error adding user to waitlist: {e}")
|
|
||||||
raise DatabaseError("Failed to add user to waitlist") from e
|
|
||||||
|
|
||||||
|
|
||||||
# ============== Admin Waitlist Functions ==============
|
|
||||||
|
|
||||||
|
|
||||||
def _waitlist_to_admin_response(
|
|
||||||
waitlist: prisma.models.WaitlistEntry,
|
|
||||||
) -> store_model.WaitlistAdminResponse:
|
|
||||||
"""Convert a WaitlistEntry to WaitlistAdminResponse."""
|
|
||||||
joined_count = len(waitlist.joinedUsers) if waitlist.joinedUsers else 0
|
|
||||||
email_count = (
|
|
||||||
len(waitlist.unaffiliatedEmailUsers) if waitlist.unaffiliatedEmailUsers else 0
|
|
||||||
)
|
|
||||||
|
|
||||||
return store_model.WaitlistAdminResponse(
|
|
||||||
id=waitlist.id,
|
|
||||||
createdAt=waitlist.createdAt.isoformat() if waitlist.createdAt else "",
|
|
||||||
updatedAt=waitlist.updatedAt.isoformat() if waitlist.updatedAt else "",
|
|
||||||
slug=waitlist.slug,
|
|
||||||
name=waitlist.name,
|
|
||||||
subHeading=waitlist.subHeading,
|
|
||||||
description=waitlist.description,
|
|
||||||
categories=waitlist.categories,
|
|
||||||
imageUrls=waitlist.imageUrls or [],
|
|
||||||
videoUrl=waitlist.videoUrl,
|
|
||||||
agentOutputDemoUrl=waitlist.agentOutputDemoUrl,
|
|
||||||
status=waitlist.status or prisma.enums.WaitlistExternalStatus.NOT_STARTED,
|
|
||||||
votes=waitlist.votes,
|
|
||||||
signupCount=joined_count + email_count,
|
|
||||||
storeListingId=waitlist.storeListingId,
|
|
||||||
owningUserId=waitlist.owningUserId,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def create_waitlist_admin(
|
|
||||||
admin_user_id: str,
|
|
||||||
data: store_model.WaitlistCreateRequest,
|
|
||||||
) -> store_model.WaitlistAdminResponse:
|
|
||||||
"""Create a new waitlist (admin only)."""
|
|
||||||
logger.info(f"Admin {admin_user_id} creating waitlist: {data.name}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().create(
|
|
||||||
data=prisma.types.WaitlistEntryCreateInput(
|
|
||||||
name=data.name,
|
|
||||||
slug=data.slug,
|
|
||||||
subHeading=data.subHeading,
|
|
||||||
description=data.description,
|
|
||||||
categories=data.categories,
|
|
||||||
imageUrls=data.imageUrls,
|
|
||||||
videoUrl=data.videoUrl,
|
|
||||||
agentOutputDemoUrl=data.agentOutputDemoUrl,
|
|
||||||
owningUserId=admin_user_id,
|
|
||||||
status=prisma.enums.WaitlistExternalStatus.NOT_STARTED,
|
|
||||||
),
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
return _waitlist_to_admin_response(waitlist)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error creating waitlist: {e}")
|
|
||||||
raise DatabaseError("Failed to create waitlist") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def get_waitlists_admin() -> store_model.WaitlistAdminListResponse:
|
|
||||||
"""Get all waitlists with admin details."""
|
|
||||||
try:
|
|
||||||
waitlists = await prisma.models.WaitlistEntry.prisma().find_many(
|
|
||||||
where=prisma.types.WaitlistEntryWhereInput(isDeleted=False),
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
order={"createdAt": "desc"},
|
|
||||||
)
|
|
||||||
|
|
||||||
return store_model.WaitlistAdminListResponse(
|
|
||||||
waitlists=[_waitlist_to_admin_response(w) for w in waitlists],
|
|
||||||
totalCount=len(waitlists),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error fetching waitlists for admin: {e}")
|
|
||||||
raise DatabaseError("Failed to fetch waitlists") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def get_waitlist_admin(
|
|
||||||
waitlist_id: str,
|
|
||||||
) -> store_model.WaitlistAdminResponse:
|
|
||||||
"""Get a single waitlist with admin details."""
|
|
||||||
try:
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not waitlist:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
|
||||||
|
|
||||||
if waitlist.isDeleted:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} has been deleted")
|
|
||||||
|
|
||||||
return _waitlist_to_admin_response(waitlist)
|
|
||||||
except ValueError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error fetching waitlist {waitlist_id}: {e}")
|
|
||||||
raise DatabaseError("Failed to fetch waitlist") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def update_waitlist_admin(
|
|
||||||
waitlist_id: str,
|
|
||||||
data: store_model.WaitlistUpdateRequest,
|
|
||||||
) -> store_model.WaitlistAdminResponse:
|
|
||||||
"""Update a waitlist (admin only)."""
|
|
||||||
logger.info(f"Updating waitlist {waitlist_id}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Check if waitlist exists first
|
|
||||||
existing = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id}
|
|
||||||
)
|
|
||||||
|
|
||||||
if not existing:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
|
||||||
|
|
||||||
if existing.isDeleted:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} has been deleted")
|
|
||||||
|
|
||||||
# Build update data from explicitly provided fields
|
|
||||||
# Use model_fields_set to allow clearing fields by setting them to None
|
|
||||||
field_mappings = {
|
|
||||||
"name": data.name,
|
|
||||||
"slug": data.slug,
|
|
||||||
"subHeading": data.subHeading,
|
|
||||||
"description": data.description,
|
|
||||||
"categories": data.categories,
|
|
||||||
"imageUrls": data.imageUrls,
|
|
||||||
"videoUrl": data.videoUrl,
|
|
||||||
"agentOutputDemoUrl": data.agentOutputDemoUrl,
|
|
||||||
"storeListingId": data.storeListingId,
|
|
||||||
}
|
|
||||||
update_data: dict[str, Any] = {
|
|
||||||
k: v for k, v in field_mappings.items() if k in data.model_fields_set
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add status if provided (already validated as enum by Pydantic)
|
|
||||||
if "status" in data.model_fields_set and data.status is not None:
|
|
||||||
update_data["status"] = data.status
|
|
||||||
|
|
||||||
if not update_data:
|
|
||||||
# No updates, just return current data
|
|
||||||
return await get_waitlist_admin(waitlist_id)
|
|
||||||
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().update(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
data=prisma.types.WaitlistEntryUpdateInput(**update_data),
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
# We already verified existence above, so this should never be None
|
|
||||||
assert waitlist is not None
|
|
||||||
return _waitlist_to_admin_response(waitlist)
|
|
||||||
except ValueError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error updating waitlist {waitlist_id}: {e}")
|
|
||||||
raise DatabaseError("Failed to update waitlist") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def delete_waitlist_admin(waitlist_id: str) -> None:
|
|
||||||
"""Soft delete a waitlist (admin only)."""
|
|
||||||
logger.info(f"Soft deleting waitlist {waitlist_id}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Check if waitlist exists first
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not waitlist:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
|
||||||
|
|
||||||
if waitlist.isDeleted:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} has already been deleted")
|
|
||||||
|
|
||||||
await prisma.models.WaitlistEntry.prisma().update(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
data={"isDeleted": True},
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error deleting waitlist {waitlist_id}: {e}")
|
|
||||||
raise DatabaseError("Failed to delete waitlist") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def get_waitlist_signups_admin(
|
|
||||||
waitlist_id: str,
|
|
||||||
) -> store_model.WaitlistSignupListResponse:
|
|
||||||
"""Get all signups for a waitlist (admin only)."""
|
|
||||||
try:
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not waitlist:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
|
||||||
|
|
||||||
signups: list[store_model.WaitlistSignup] = []
|
|
||||||
|
|
||||||
# Add user signups
|
|
||||||
for user in waitlist.joinedUsers or []:
|
|
||||||
signups.append(
|
|
||||||
store_model.WaitlistSignup(
|
|
||||||
type="user",
|
|
||||||
userId=user.id,
|
|
||||||
email=user.email,
|
|
||||||
username=user.name,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add email signups
|
|
||||||
for email in waitlist.unaffiliatedEmailUsers or []:
|
|
||||||
signups.append(
|
|
||||||
store_model.WaitlistSignup(
|
|
||||||
type="email",
|
|
||||||
email=email,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return store_model.WaitlistSignupListResponse(
|
|
||||||
waitlistId=waitlist_id,
|
|
||||||
signups=signups,
|
|
||||||
totalCount=len(signups),
|
|
||||||
)
|
|
||||||
except ValueError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error fetching signups for waitlist {waitlist_id}: {e}")
|
|
||||||
raise DatabaseError("Failed to fetch waitlist signups") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def link_waitlist_to_listing_admin(
|
|
||||||
waitlist_id: str,
|
|
||||||
store_listing_id: str,
|
|
||||||
) -> store_model.WaitlistAdminResponse:
|
|
||||||
"""Link a waitlist to a store listing (admin only)."""
|
|
||||||
logger.info(f"Linking waitlist {waitlist_id} to listing {store_listing_id}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Verify the waitlist exists
|
|
||||||
waitlist = await prisma.models.WaitlistEntry.prisma().find_unique(
|
|
||||||
where={"id": waitlist_id}
|
|
||||||
)
|
|
||||||
|
|
||||||
if not waitlist:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} not found")
|
|
||||||
|
|
||||||
if waitlist.isDeleted:
|
|
||||||
raise ValueError(f"Waitlist {waitlist_id} has been deleted")
|
|
||||||
|
|
||||||
# Verify the store listing exists
|
|
||||||
listing = await prisma.models.StoreListing.prisma().find_unique(
|
|
||||||
where={"id": store_listing_id}
|
|
||||||
)
|
|
||||||
|
|
||||||
if not listing:
|
|
||||||
raise ValueError(f"Store listing {store_listing_id} not found")
|
|
||||||
|
|
||||||
updated_waitlist = await prisma.models.WaitlistEntry.prisma().update(
|
|
||||||
where={"id": waitlist_id},
|
|
||||||
data={"StoreListing": {"connect": {"id": store_listing_id}}},
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
# We already verified existence above, so this should never be None
|
|
||||||
assert updated_waitlist is not None
|
|
||||||
return _waitlist_to_admin_response(updated_waitlist)
|
|
||||||
except ValueError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error linking waitlist to listing: {e}")
|
|
||||||
raise DatabaseError("Failed to link waitlist to listing") from e
|
|
||||||
|
|
||||||
|
|
||||||
async def notify_waitlist_users_on_launch(
|
|
||||||
store_listing_id: str,
|
|
||||||
agent_name: str,
|
|
||||||
store_url: str,
|
|
||||||
) -> int:
|
|
||||||
"""
|
|
||||||
Notify all users on waitlists linked to a store listing when the agent is launched.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
store_listing_id: The ID of the store listing that was approved
|
|
||||||
agent_name: The name of the approved agent
|
|
||||||
store_url: The URL to the agent's store page
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The number of notifications sent
|
|
||||||
"""
|
|
||||||
logger.info(f"Notifying waitlist users for store listing {store_listing_id}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Find all active waitlists linked to this store listing
|
|
||||||
# Exclude DONE and CANCELED to prevent duplicate notifications on re-approval
|
|
||||||
waitlists = await prisma.models.WaitlistEntry.prisma().find_many(
|
|
||||||
where={
|
|
||||||
"storeListingId": store_listing_id,
|
|
||||||
"isDeleted": False,
|
|
||||||
"status": {
|
|
||||||
"not_in": [
|
|
||||||
prisma.enums.WaitlistExternalStatus.DONE,
|
|
||||||
prisma.enums.WaitlistExternalStatus.CANCELED,
|
|
||||||
]
|
|
||||||
},
|
|
||||||
},
|
|
||||||
include={"joinedUsers": True},
|
|
||||||
)
|
|
||||||
|
|
||||||
if not waitlists:
|
|
||||||
logger.info(
|
|
||||||
f"No active waitlists found for store listing {store_listing_id}"
|
|
||||||
)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
notification_count = 0
|
|
||||||
launched_at = datetime.now(tz=timezone.utc)
|
|
||||||
|
|
||||||
for waitlist in waitlists:
|
|
||||||
# Track notification results for this waitlist
|
|
||||||
users_to_notify = waitlist.joinedUsers or []
|
|
||||||
failed_user_ids: list[str] = []
|
|
||||||
|
|
||||||
# Notify registered users
|
|
||||||
for user in users_to_notify:
|
|
||||||
try:
|
|
||||||
notification_data = WaitlistLaunchData(
|
|
||||||
agent_name=agent_name,
|
|
||||||
waitlist_name=waitlist.name,
|
|
||||||
store_url=store_url,
|
|
||||||
launched_at=launched_at,
|
|
||||||
)
|
|
||||||
|
|
||||||
notification_event = NotificationEventModel[WaitlistLaunchData](
|
|
||||||
user_id=user.id,
|
|
||||||
type=prisma.enums.NotificationType.WAITLIST_LAUNCH,
|
|
||||||
data=notification_data,
|
|
||||||
)
|
|
||||||
|
|
||||||
await queue_notification_async(notification_event)
|
|
||||||
notification_count += 1
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
f"Failed to send waitlist launch notification to user {user.id}: {e}"
|
|
||||||
)
|
|
||||||
failed_user_ids.append(user.id)
|
|
||||||
|
|
||||||
# Note: For unaffiliated email users, you would need to send emails directly
|
|
||||||
# since they don't have user IDs for the notification system.
|
|
||||||
# This could be done via a separate email service.
|
|
||||||
# For now, we log these for potential manual follow-up or future implementation.
|
|
||||||
has_pending_email_users = bool(waitlist.unaffiliatedEmailUsers)
|
|
||||||
if has_pending_email_users:
|
|
||||||
logger.info(
|
|
||||||
f"Waitlist {waitlist.id} has {len(waitlist.unaffiliatedEmailUsers)} "
|
|
||||||
f"unaffiliated email users that need email notifications"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only mark waitlist as DONE if all registered user notifications succeeded
|
|
||||||
# AND there are no unaffiliated email users still waiting for notifications
|
|
||||||
if not failed_user_ids and not has_pending_email_users:
|
|
||||||
await prisma.models.WaitlistEntry.prisma().update(
|
|
||||||
where={"id": waitlist.id},
|
|
||||||
data={"status": prisma.enums.WaitlistExternalStatus.DONE},
|
|
||||||
)
|
|
||||||
logger.info(f"Updated waitlist {waitlist.id} status to DONE")
|
|
||||||
elif failed_user_ids:
|
|
||||||
logger.warning(
|
|
||||||
f"Waitlist {waitlist.id} not marked as DONE due to "
|
|
||||||
f"{len(failed_user_ids)} failed notifications"
|
|
||||||
)
|
|
||||||
elif has_pending_email_users:
|
|
||||||
logger.warning(
|
|
||||||
f"Waitlist {waitlist.id} not marked as DONE due to "
|
|
||||||
f"{len(waitlist.unaffiliatedEmailUsers)} pending email-only users"
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"Sent {notification_count} waitlist launch notifications for store listing {store_listing_id}"
|
|
||||||
)
|
|
||||||
return notification_count
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
f"Error notifying waitlist users for store listing {store_listing_id}: {e}"
|
|
||||||
)
|
|
||||||
# Don't raise - we don't want to fail the approval process
|
|
||||||
return 0
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ from backend.blocks.ideogram import (
|
|||||||
StyleType,
|
StyleType,
|
||||||
UpscaleOption,
|
UpscaleOption,
|
||||||
)
|
)
|
||||||
from backend.data.graph import BaseGraph
|
from backend.data.graph import GraphBaseMeta
|
||||||
from backend.data.model import CredentialsMetaInput, ProviderName
|
from backend.data.model import CredentialsMetaInput, ProviderName
|
||||||
from backend.integrations.credentials_store import ideogram_credentials
|
from backend.integrations.credentials_store import ideogram_credentials
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
@@ -34,14 +34,14 @@ class ImageStyle(str, Enum):
|
|||||||
DIGITAL_ART = "digital art"
|
DIGITAL_ART = "digital art"
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_image(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
async def generate_agent_image(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||||
if settings.config.use_agent_image_generation_v2:
|
if settings.config.use_agent_image_generation_v2:
|
||||||
return await generate_agent_image_v2(graph=agent)
|
return await generate_agent_image_v2(graph=agent)
|
||||||
else:
|
else:
|
||||||
return await generate_agent_image_v1(agent=agent)
|
return await generate_agent_image_v1(agent=agent)
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
async def generate_agent_image_v2(graph: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||||
"""
|
"""
|
||||||
Generate an image for an agent using Ideogram model.
|
Generate an image for an agent using Ideogram model.
|
||||||
Returns:
|
Returns:
|
||||||
@@ -54,14 +54,17 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
|||||||
description = f"{name} ({graph.description})" if graph.description else name
|
description = f"{name} ({graph.description})" if graph.description else name
|
||||||
|
|
||||||
prompt = (
|
prompt = (
|
||||||
f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
|
"Create a visually striking retro-futuristic vector pop art illustration "
|
||||||
f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
|
f'prominently featuring "{name}" in bold typography. The image clearly and '
|
||||||
f"along with recognizable objects directly associated with the primary function of a {name}. "
|
f"literally depicts a {description}, along with recognizable objects directly "
|
||||||
f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
|
f"associated with the primary function of a {name}. "
|
||||||
f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
|
f"Ensure the imagery is concrete, intuitive, and immediately understandable, "
|
||||||
f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
|
f"clearly conveying the purpose of a {name}. "
|
||||||
f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
|
"Maintain vibrant, limited-palette colors, sharp vector lines, "
|
||||||
f"prioritizing clear visual storytelling and thematic clarity above all else."
|
"geometric shapes, flat illustration techniques, and solid colors "
|
||||||
|
"without gradients or shading. Preserve a retro-futuristic aesthetic "
|
||||||
|
"influenced by mid-century futurism and 1960s psychedelia, "
|
||||||
|
"prioritizing clear visual storytelling and thematic clarity above all else."
|
||||||
)
|
)
|
||||||
|
|
||||||
custom_colors = [
|
custom_colors = [
|
||||||
@@ -99,12 +102,12 @@ async def generate_agent_image_v2(graph: BaseGraph | AgentGraph) -> io.BytesIO:
|
|||||||
return io.BytesIO(response.content)
|
return io.BytesIO(response.content)
|
||||||
|
|
||||||
|
|
||||||
async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
async def generate_agent_image_v1(agent: GraphBaseMeta | AgentGraph) -> io.BytesIO:
|
||||||
"""
|
"""
|
||||||
Generate an image for an agent using Flux model via Replicate API.
|
Generate an image for an agent using Flux model via Replicate API.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
agent (Graph): The agent to generate an image for
|
agent (GraphBaseMeta | AgentGraph): The agent to generate an image for
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
io.BytesIO: The generated image as bytes
|
io.BytesIO: The generated image as bytes
|
||||||
@@ -114,7 +117,13 @@ async def generate_agent_image_v1(agent: BaseGraph | AgentGraph) -> io.BytesIO:
|
|||||||
raise ValueError("Missing Replicate API key in settings")
|
raise ValueError("Missing Replicate API key in settings")
|
||||||
|
|
||||||
# Construct prompt from agent details
|
# Construct prompt from agent details
|
||||||
prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design."
|
prompt = (
|
||||||
|
"Create a visually engaging app store thumbnail for the AI agent "
|
||||||
|
"that highlights what it does in a clear and captivating way:\n"
|
||||||
|
f"- **Name**: {agent.name}\n"
|
||||||
|
f"- **Description**: {agent.description}\n"
|
||||||
|
f"Focus on showcasing its core functionality with an appealing design."
|
||||||
|
)
|
||||||
|
|
||||||
# Set up Replicate client
|
# Set up Replicate client
|
||||||
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)
|
client = ReplicateClient(api_token=settings.secrets.replicate_api_key)
|
||||||
|
|||||||
@@ -224,102 +224,6 @@ class ReviewSubmissionRequest(pydantic.BaseModel):
|
|||||||
internal_comments: str | None = None # Private admin notes
|
internal_comments: str | None = None # Private admin notes
|
||||||
|
|
||||||
|
|
||||||
class StoreWaitlistEntry(pydantic.BaseModel):
|
|
||||||
"""Public waitlist entry - no PII fields exposed."""
|
|
||||||
|
|
||||||
waitlistId: str
|
|
||||||
slug: str
|
|
||||||
|
|
||||||
# Content fields
|
|
||||||
name: str
|
|
||||||
subHeading: str
|
|
||||||
videoUrl: str | None = None
|
|
||||||
agentOutputDemoUrl: str | None = None
|
|
||||||
imageUrls: list[str]
|
|
||||||
description: str
|
|
||||||
categories: list[str]
|
|
||||||
|
|
||||||
|
|
||||||
class StoreWaitlistsAllResponse(pydantic.BaseModel):
|
|
||||||
listings: list[StoreWaitlistEntry]
|
|
||||||
|
|
||||||
|
|
||||||
# Admin Waitlist Models
|
|
||||||
|
|
||||||
|
|
||||||
class WaitlistCreateRequest(pydantic.BaseModel):
|
|
||||||
"""Request model for creating a new waitlist."""
|
|
||||||
|
|
||||||
name: str
|
|
||||||
slug: str
|
|
||||||
subHeading: str
|
|
||||||
description: str
|
|
||||||
categories: list[str] = []
|
|
||||||
imageUrls: list[str] = []
|
|
||||||
videoUrl: str | None = None
|
|
||||||
agentOutputDemoUrl: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class WaitlistUpdateRequest(pydantic.BaseModel):
|
|
||||||
"""Request model for updating a waitlist."""
|
|
||||||
|
|
||||||
name: str | None = None
|
|
||||||
slug: str | None = None
|
|
||||||
subHeading: str | None = None
|
|
||||||
description: str | None = None
|
|
||||||
categories: list[str] | None = None
|
|
||||||
imageUrls: list[str] | None = None
|
|
||||||
videoUrl: str | None = None
|
|
||||||
agentOutputDemoUrl: str | None = None
|
|
||||||
status: prisma.enums.WaitlistExternalStatus | None = None
|
|
||||||
storeListingId: str | None = None # Link to a store listing
|
|
||||||
|
|
||||||
|
|
||||||
class WaitlistAdminResponse(pydantic.BaseModel):
|
|
||||||
"""Admin response model with full waitlist details including internal data."""
|
|
||||||
|
|
||||||
id: str
|
|
||||||
createdAt: str
|
|
||||||
updatedAt: str
|
|
||||||
slug: str
|
|
||||||
name: str
|
|
||||||
subHeading: str
|
|
||||||
description: str
|
|
||||||
categories: list[str]
|
|
||||||
imageUrls: list[str]
|
|
||||||
videoUrl: str | None = None
|
|
||||||
agentOutputDemoUrl: str | None = None
|
|
||||||
status: prisma.enums.WaitlistExternalStatus
|
|
||||||
votes: int
|
|
||||||
signupCount: int # Total count of joinedUsers + unaffiliatedEmailUsers
|
|
||||||
storeListingId: str | None = None
|
|
||||||
owningUserId: str
|
|
||||||
|
|
||||||
|
|
||||||
class WaitlistSignup(pydantic.BaseModel):
|
|
||||||
"""Individual signup entry for a waitlist."""
|
|
||||||
|
|
||||||
type: str # "user" or "email"
|
|
||||||
userId: str | None = None
|
|
||||||
email: str | None = None
|
|
||||||
username: str | None = None # For user signups
|
|
||||||
|
|
||||||
|
|
||||||
class WaitlistSignupListResponse(pydantic.BaseModel):
|
|
||||||
"""Response model for listing waitlist signups."""
|
|
||||||
|
|
||||||
waitlistId: str
|
|
||||||
signups: list[WaitlistSignup]
|
|
||||||
totalCount: int
|
|
||||||
|
|
||||||
|
|
||||||
class WaitlistAdminListResponse(pydantic.BaseModel):
|
|
||||||
"""Response model for listing all waitlists (admin view)."""
|
|
||||||
|
|
||||||
waitlists: list[WaitlistAdminResponse]
|
|
||||||
totalCount: int
|
|
||||||
|
|
||||||
|
|
||||||
class UnifiedSearchResult(pydantic.BaseModel):
|
class UnifiedSearchResult(pydantic.BaseModel):
|
||||||
"""A single result from unified hybrid search across all content types."""
|
"""A single result from unified hybrid search across all content types."""
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import autogpt_libs.auth
|
|||||||
import fastapi
|
import fastapi
|
||||||
import fastapi.responses
|
import fastapi.responses
|
||||||
import prisma.enums
|
import prisma.enums
|
||||||
from autogpt_libs.auth.dependencies import get_optional_user_id
|
|
||||||
|
|
||||||
import backend.data.graph
|
import backend.data.graph
|
||||||
import backend.util.json
|
import backend.util.json
|
||||||
@@ -82,74 +81,6 @@ async def update_or_create_profile(
|
|||||||
return updated_profile
|
return updated_profile
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
|
||||||
############## Waitlist Endpoints ############
|
|
||||||
##############################################
|
|
||||||
@router.get(
|
|
||||||
"/waitlist",
|
|
||||||
summary="Get the agent waitlist",
|
|
||||||
tags=["store", "public"],
|
|
||||||
response_model=store_model.StoreWaitlistsAllResponse,
|
|
||||||
)
|
|
||||||
async def get_waitlist():
|
|
||||||
"""
|
|
||||||
Get all active waitlists for public display.
|
|
||||||
"""
|
|
||||||
waitlists = await store_db.get_waitlist()
|
|
||||||
return store_model.StoreWaitlistsAllResponse(listings=waitlists)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
|
||||||
"/waitlist/my-memberships",
|
|
||||||
summary="Get waitlist IDs the current user has joined",
|
|
||||||
tags=["store", "private"],
|
|
||||||
)
|
|
||||||
async def get_my_waitlist_memberships(
|
|
||||||
user_id: str = fastapi.Security(autogpt_libs.auth.get_user_id),
|
|
||||||
) -> list[str]:
|
|
||||||
"""Returns list of waitlist IDs the authenticated user has joined."""
|
|
||||||
return await store_db.get_user_waitlist_memberships(user_id)
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
path="/waitlist/{waitlist_id}/join",
|
|
||||||
summary="Add self to the agent waitlist",
|
|
||||||
tags=["store", "public"],
|
|
||||||
response_model=store_model.StoreWaitlistEntry,
|
|
||||||
)
|
|
||||||
async def add_self_to_waitlist(
|
|
||||||
user_id: str | None = fastapi.Security(get_optional_user_id),
|
|
||||||
waitlist_id: str = fastapi.Path(..., description="The ID of the waitlist to join"),
|
|
||||||
email: str | None = fastapi.Body(
|
|
||||||
default=None, embed=True, description="Email address for unauthenticated users"
|
|
||||||
),
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Add the current user to the agent waitlist.
|
|
||||||
"""
|
|
||||||
if not user_id and not email:
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail="Either user authentication or email address is required",
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
waitlist_entry = await store_db.add_user_to_waitlist(
|
|
||||||
waitlist_id=waitlist_id, user_id=user_id, email=email
|
|
||||||
)
|
|
||||||
return waitlist_entry
|
|
||||||
except ValueError as e:
|
|
||||||
error_msg = str(e)
|
|
||||||
if "not found" in error_msg:
|
|
||||||
raise fastapi.HTTPException(status_code=404, detail="Waitlist not found")
|
|
||||||
# Waitlist exists but is closed or unavailable
|
|
||||||
raise fastapi.HTTPException(status_code=400, detail=error_msg)
|
|
||||||
except Exception:
|
|
||||||
raise fastapi.HTTPException(
|
|
||||||
status_code=500, detail="An error occurred while joining the waitlist"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
##############################################
|
||||||
############### Agent Endpoints ##############
|
############### Agent Endpoints ##############
|
||||||
##############################################
|
##############################################
|
||||||
@@ -347,7 +278,7 @@ async def get_agent(
|
|||||||
)
|
)
|
||||||
async def get_graph_meta_by_store_listing_version_id(
|
async def get_graph_meta_by_store_listing_version_id(
|
||||||
store_listing_version_id: str,
|
store_listing_version_id: str,
|
||||||
) -> backend.data.graph.GraphMeta:
|
) -> backend.data.graph.GraphModelWithoutNodes:
|
||||||
"""
|
"""
|
||||||
Get Agent Graph from Store Listing Version ID.
|
Get Agent Graph from Store Listing Version ID.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -101,7 +101,6 @@ from backend.util.timezone_utils import (
|
|||||||
from backend.util.virus_scanner import scan_content_safe
|
from backend.util.virus_scanner import scan_content_safe
|
||||||
|
|
||||||
from .library import db as library_db
|
from .library import db as library_db
|
||||||
from .library import model as library_model
|
|
||||||
from .store.model import StoreAgentDetails
|
from .store.model import StoreAgentDetails
|
||||||
|
|
||||||
|
|
||||||
@@ -823,18 +822,16 @@ async def update_graph(
|
|||||||
graph: graph_db.Graph,
|
graph: graph_db.Graph,
|
||||||
user_id: Annotated[str, Security(get_user_id)],
|
user_id: Annotated[str, Security(get_user_id)],
|
||||||
) -> graph_db.GraphModel:
|
) -> graph_db.GraphModel:
|
||||||
# Sanity check
|
|
||||||
if graph.id and graph.id != graph_id:
|
if graph.id and graph.id != graph_id:
|
||||||
raise HTTPException(400, detail="Graph ID does not match ID in URI")
|
raise HTTPException(400, detail="Graph ID does not match ID in URI")
|
||||||
|
|
||||||
# Determine new version
|
|
||||||
existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
|
existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id)
|
||||||
if not existing_versions:
|
if not existing_versions:
|
||||||
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
|
raise HTTPException(404, detail=f"Graph #{graph_id} not found")
|
||||||
latest_version_number = max(g.version for g in existing_versions)
|
|
||||||
graph.version = latest_version_number + 1
|
|
||||||
|
|
||||||
|
graph.version = max(g.version for g in existing_versions) + 1
|
||||||
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
||||||
|
|
||||||
graph = graph_db.make_graph_model(graph, user_id)
|
graph = graph_db.make_graph_model(graph, user_id)
|
||||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||||
graph.validate_graph(for_run=False)
|
graph.validate_graph(for_run=False)
|
||||||
@@ -842,27 +839,23 @@ async def update_graph(
|
|||||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||||
|
|
||||||
if new_graph_version.is_active:
|
if new_graph_version.is_active:
|
||||||
# Keep the library agent up to date with the new active version
|
await library_db.update_library_agent_version_and_settings(
|
||||||
await _update_library_agent_version_and_settings(user_id, new_graph_version)
|
user_id, new_graph_version
|
||||||
|
)
|
||||||
# Handle activation of the new graph first to ensure continuity
|
|
||||||
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
|
new_graph_version = await on_graph_activate(new_graph_version, user_id=user_id)
|
||||||
# Ensure new version is the only active version
|
|
||||||
await graph_db.set_graph_active_version(
|
await graph_db.set_graph_active_version(
|
||||||
graph_id=graph_id, version=new_graph_version.version, user_id=user_id
|
graph_id=graph_id, version=new_graph_version.version, user_id=user_id
|
||||||
)
|
)
|
||||||
if current_active_version:
|
if current_active_version:
|
||||||
# Handle deactivation of the previously active version
|
|
||||||
await on_graph_deactivate(current_active_version, user_id=user_id)
|
await on_graph_deactivate(current_active_version, user_id=user_id)
|
||||||
|
|
||||||
# Fetch new graph version *with sub-graphs* (needed for credentials input schema)
|
|
||||||
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
new_graph_version_with_subgraphs = await graph_db.get_graph(
|
||||||
graph_id,
|
graph_id,
|
||||||
new_graph_version.version,
|
new_graph_version.version,
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
include_subgraphs=True,
|
include_subgraphs=True,
|
||||||
)
|
)
|
||||||
assert new_graph_version_with_subgraphs # make type checker happy
|
assert new_graph_version_with_subgraphs
|
||||||
return new_graph_version_with_subgraphs
|
return new_graph_version_with_subgraphs
|
||||||
|
|
||||||
|
|
||||||
@@ -900,33 +893,15 @@ async def set_graph_active_version(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Keep the library agent up to date with the new active version
|
# Keep the library agent up to date with the new active version
|
||||||
await _update_library_agent_version_and_settings(user_id, new_active_graph)
|
await library_db.update_library_agent_version_and_settings(
|
||||||
|
user_id, new_active_graph
|
||||||
|
)
|
||||||
|
|
||||||
if current_active_graph and current_active_graph.version != new_active_version:
|
if current_active_graph and current_active_graph.version != new_active_version:
|
||||||
# Handle deactivation of the previously active version
|
# Handle deactivation of the previously active version
|
||||||
await on_graph_deactivate(current_active_graph, user_id=user_id)
|
await on_graph_deactivate(current_active_graph, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
async def _update_library_agent_version_and_settings(
|
|
||||||
user_id: str, agent_graph: graph_db.GraphModel
|
|
||||||
) -> library_model.LibraryAgent:
|
|
||||||
library = await library_db.update_agent_version_in_library(
|
|
||||||
user_id, agent_graph.id, agent_graph.version
|
|
||||||
)
|
|
||||||
updated_settings = GraphSettings.from_graph(
|
|
||||||
graph=agent_graph,
|
|
||||||
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
|
|
||||||
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
|
|
||||||
)
|
|
||||||
if updated_settings != library.settings:
|
|
||||||
library = await library_db.update_library_agent(
|
|
||||||
library_agent_id=library.id,
|
|
||||||
user_id=user_id,
|
|
||||||
settings=updated_settings,
|
|
||||||
)
|
|
||||||
return library
|
|
||||||
|
|
||||||
|
|
||||||
@v1_router.patch(
|
@v1_router.patch(
|
||||||
path="/graphs/{graph_id}/settings",
|
path="/graphs/{graph_id}/settings",
|
||||||
summary="Update graph settings",
|
summary="Update graph settings",
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ from prisma.errors import PrismaError
|
|||||||
import backend.api.features.admin.credit_admin_routes
|
import backend.api.features.admin.credit_admin_routes
|
||||||
import backend.api.features.admin.execution_analytics_routes
|
import backend.api.features.admin.execution_analytics_routes
|
||||||
import backend.api.features.admin.store_admin_routes
|
import backend.api.features.admin.store_admin_routes
|
||||||
import backend.api.features.admin.waitlist_admin_routes
|
|
||||||
import backend.api.features.builder
|
import backend.api.features.builder
|
||||||
import backend.api.features.builder.routes
|
import backend.api.features.builder.routes
|
||||||
import backend.api.features.chat.routes as chat_routes
|
import backend.api.features.chat.routes as chat_routes
|
||||||
@@ -307,11 +306,6 @@ app.include_router(
|
|||||||
tags=["v2", "admin"],
|
tags=["v2", "admin"],
|
||||||
prefix="/api/store",
|
prefix="/api/store",
|
||||||
)
|
)
|
||||||
app.include_router(
|
|
||||||
backend.api.features.admin.waitlist_admin_routes.router,
|
|
||||||
tags=["v2", "admin"],
|
|
||||||
prefix="/api/store",
|
|
||||||
)
|
|
||||||
app.include_router(
|
app.include_router(
|
||||||
backend.api.features.admin.credit_admin_routes.router,
|
backend.api.features.admin.credit_admin_routes.router,
|
||||||
tags=["v2", "admin"],
|
tags=["v2", "admin"],
|
||||||
|
|||||||
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
28
autogpt_platform/backend/backend/blocks/elevenlabs/_auth.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
"""ElevenLabs integration blocks - test credentials and shared utilities."""
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic import SecretStr
|
||||||
|
|
||||||
|
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
|
||||||
|
TEST_CREDENTIALS = APIKeyCredentials(
|
||||||
|
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||||
|
provider="elevenlabs",
|
||||||
|
api_key=SecretStr("mock-elevenlabs-api-key"),
|
||||||
|
title="Mock ElevenLabs API key",
|
||||||
|
expires_at=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
TEST_CREDENTIALS_INPUT = {
|
||||||
|
"provider": TEST_CREDENTIALS.provider,
|
||||||
|
"id": TEST_CREDENTIALS.id,
|
||||||
|
"type": TEST_CREDENTIALS.type,
|
||||||
|
"title": TEST_CREDENTIALS.title,
|
||||||
|
}
|
||||||
|
|
||||||
|
ElevenLabsCredentials = APIKeyCredentials
|
||||||
|
ElevenLabsCredentialsInput = CredentialsMetaInput[
|
||||||
|
Literal[ProviderName.ELEVENLABS], Literal["api_key"]
|
||||||
|
]
|
||||||
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
77
autogpt_platform/backend/backend/blocks/encoder_block.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
"""Text encoding block for converting special characters to escape sequences."""
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
|
||||||
|
|
||||||
|
class TextEncoderBlock(Block):
|
||||||
|
"""
|
||||||
|
Encodes a string by converting special characters into escape sequences.
|
||||||
|
|
||||||
|
This block is the inverse of TextDecoderBlock. It takes text containing
|
||||||
|
special characters (like newlines, tabs, etc.) and converts them into
|
||||||
|
their escape sequence representations (e.g., newline becomes \\n).
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
"""Input schema for TextEncoderBlock."""
|
||||||
|
|
||||||
|
text: str = SchemaField(
|
||||||
|
description="A string containing special characters to be encoded",
|
||||||
|
placeholder="Your text with newlines and quotes to encode",
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
"""Output schema for TextEncoderBlock."""
|
||||||
|
|
||||||
|
encoded_text: str = SchemaField(
|
||||||
|
description="The encoded text with special characters converted to escape sequences"
|
||||||
|
)
|
||||||
|
error: str = SchemaField(description="Error message if encoding fails")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="5185f32e-4b65-4ecf-8fbb-873f003f09d6",
|
||||||
|
description="Encodes a string by converting special characters into escape sequences",
|
||||||
|
categories={BlockCategory.TEXT},
|
||||||
|
input_schema=TextEncoderBlock.Input,
|
||||||
|
output_schema=TextEncoderBlock.Output,
|
||||||
|
test_input={
|
||||||
|
"text": """Hello
|
||||||
|
World!
|
||||||
|
This is a "quoted" string."""
|
||||||
|
},
|
||||||
|
test_output=[
|
||||||
|
(
|
||||||
|
"encoded_text",
|
||||||
|
"""Hello\\nWorld!\\nThis is a "quoted" string.""",
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||||
|
"""
|
||||||
|
Encode the input text by converting special characters to escape sequences.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_data: The input containing the text to encode.
|
||||||
|
**kwargs: Additional keyword arguments (unused).
|
||||||
|
|
||||||
|
Yields:
|
||||||
|
The encoded text with escape sequences, or an error message if encoding fails.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
encoded_text = codecs.encode(input_data.text, "unicode_escape").decode(
|
||||||
|
"utf-8"
|
||||||
|
)
|
||||||
|
yield "encoded_text", encoded_text
|
||||||
|
except Exception as e:
|
||||||
|
yield "error", f"Encoding error: {str(e)}"
|
||||||
@@ -115,6 +115,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
|||||||
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
CLAUDE_4_5_OPUS = "claude-opus-4-5-20251101"
|
||||||
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
CLAUDE_4_5_SONNET = "claude-sonnet-4-5-20250929"
|
||||||
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
CLAUDE_4_5_HAIKU = "claude-haiku-4-5-20251001"
|
||||||
|
CLAUDE_4_6_OPUS = "claude-opus-4-6"
|
||||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||||
# AI/ML API models
|
# AI/ML API models
|
||||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||||
@@ -270,6 +271,9 @@ MODEL_METADATA = {
|
|||||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||||
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
|
||||||
), # claude-4-sonnet-20250514
|
), # claude-4-sonnet-20250514
|
||||||
|
LlmModel.CLAUDE_4_6_OPUS: ModelMetadata(
|
||||||
|
"anthropic", 200000, 128000, "Claude Opus 4.6", "Anthropic", "Anthropic", 3
|
||||||
|
), # claude-opus-4-6
|
||||||
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
|
||||||
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
|
||||||
), # claude-opus-4-5-20251101
|
), # claude-opus-4-5-20251101
|
||||||
|
|||||||
@@ -1,246 +0,0 @@
|
|||||||
import os
|
|
||||||
import tempfile
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
|
||||||
from moviepy.video.fx.Loop import Loop
|
|
||||||
from moviepy.video.io.VideoFileClip import VideoFileClip
|
|
||||||
|
|
||||||
from backend.data.block import (
|
|
||||||
Block,
|
|
||||||
BlockCategory,
|
|
||||||
BlockOutput,
|
|
||||||
BlockSchemaInput,
|
|
||||||
BlockSchemaOutput,
|
|
||||||
)
|
|
||||||
from backend.data.execution import ExecutionContext
|
|
||||||
from backend.data.model import SchemaField
|
|
||||||
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
|
||||||
|
|
||||||
|
|
||||||
class MediaDurationBlock(Block):
|
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
|
||||||
media_in: MediaFileType = SchemaField(
|
|
||||||
description="Media input (URL, data URI, or local path)."
|
|
||||||
)
|
|
||||||
is_video: bool = SchemaField(
|
|
||||||
description="Whether the media is a video (True) or audio (False).",
|
|
||||||
default=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
|
||||||
duration: float = SchemaField(
|
|
||||||
description="Duration of the media file (in seconds)."
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__(
|
|
||||||
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
|
||||||
description="Block to get the duration of a media file.",
|
|
||||||
categories={BlockCategory.MULTIMEDIA},
|
|
||||||
input_schema=MediaDurationBlock.Input,
|
|
||||||
output_schema=MediaDurationBlock.Output,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def run(
|
|
||||||
self,
|
|
||||||
input_data: Input,
|
|
||||||
*,
|
|
||||||
execution_context: ExecutionContext,
|
|
||||||
**kwargs,
|
|
||||||
) -> BlockOutput:
|
|
||||||
# 1) Store the input media locally
|
|
||||||
local_media_path = await store_media_file(
|
|
||||||
file=input_data.media_in,
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_local_processing",
|
|
||||||
)
|
|
||||||
assert execution_context.graph_exec_id is not None
|
|
||||||
media_abspath = get_exec_file_path(
|
|
||||||
execution_context.graph_exec_id, local_media_path
|
|
||||||
)
|
|
||||||
|
|
||||||
# 2) Load the clip
|
|
||||||
if input_data.is_video:
|
|
||||||
clip = VideoFileClip(media_abspath)
|
|
||||||
else:
|
|
||||||
clip = AudioFileClip(media_abspath)
|
|
||||||
|
|
||||||
yield "duration", clip.duration
|
|
||||||
|
|
||||||
|
|
||||||
class LoopVideoBlock(Block):
|
|
||||||
"""
|
|
||||||
Block for looping (repeating) a video clip until a given duration or number of loops.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
|
||||||
video_in: MediaFileType = SchemaField(
|
|
||||||
description="The input video (can be a URL, data URI, or local path)."
|
|
||||||
)
|
|
||||||
# Provide EITHER a `duration` or `n_loops` or both. We'll demonstrate `duration`.
|
|
||||||
duration: Optional[float] = SchemaField(
|
|
||||||
description="Target duration (in seconds) to loop the video to. If omitted, defaults to no looping.",
|
|
||||||
default=None,
|
|
||||||
ge=0.0,
|
|
||||||
)
|
|
||||||
n_loops: Optional[int] = SchemaField(
|
|
||||||
description="Number of times to repeat the video. If omitted, defaults to 1 (no repeat).",
|
|
||||||
default=None,
|
|
||||||
ge=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
|
||||||
video_out: str = SchemaField(
|
|
||||||
description="Looped video returned either as a relative path or a data URI."
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__(
|
|
||||||
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
|
||||||
description="Block to loop a video to a given duration or number of repeats.",
|
|
||||||
categories={BlockCategory.MULTIMEDIA},
|
|
||||||
input_schema=LoopVideoBlock.Input,
|
|
||||||
output_schema=LoopVideoBlock.Output,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def run(
|
|
||||||
self,
|
|
||||||
input_data: Input,
|
|
||||||
*,
|
|
||||||
execution_context: ExecutionContext,
|
|
||||||
**kwargs,
|
|
||||||
) -> BlockOutput:
|
|
||||||
assert execution_context.graph_exec_id is not None
|
|
||||||
assert execution_context.node_exec_id is not None
|
|
||||||
graph_exec_id = execution_context.graph_exec_id
|
|
||||||
node_exec_id = execution_context.node_exec_id
|
|
||||||
|
|
||||||
# 1) Store the input video locally
|
|
||||||
local_video_path = await store_media_file(
|
|
||||||
file=input_data.video_in,
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_local_processing",
|
|
||||||
)
|
|
||||||
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
|
||||||
|
|
||||||
# 2) Load the clip
|
|
||||||
clip = VideoFileClip(input_abspath)
|
|
||||||
|
|
||||||
# 3) Apply the loop effect
|
|
||||||
looped_clip = clip
|
|
||||||
if input_data.duration:
|
|
||||||
# Loop until we reach the specified duration
|
|
||||||
looped_clip = looped_clip.with_effects([Loop(duration=input_data.duration)])
|
|
||||||
elif input_data.n_loops:
|
|
||||||
looped_clip = looped_clip.with_effects([Loop(n=input_data.n_loops)])
|
|
||||||
else:
|
|
||||||
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
|
||||||
|
|
||||||
assert isinstance(looped_clip, VideoFileClip)
|
|
||||||
|
|
||||||
# 4) Save the looped output
|
|
||||||
output_filename = MediaFileType(
|
|
||||||
f"{node_exec_id}_looped_{os.path.basename(local_video_path)}"
|
|
||||||
)
|
|
||||||
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
|
||||||
|
|
||||||
looped_clip = looped_clip.with_audio(clip.audio)
|
|
||||||
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
|
||||||
|
|
||||||
# Return output - for_block_output returns workspace:// if available, else data URI
|
|
||||||
video_out = await store_media_file(
|
|
||||||
file=output_filename,
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_block_output",
|
|
||||||
)
|
|
||||||
|
|
||||||
yield "video_out", video_out
|
|
||||||
|
|
||||||
|
|
||||||
class AddAudioToVideoBlock(Block):
|
|
||||||
"""
|
|
||||||
Block that adds (attaches) an audio track to an existing video.
|
|
||||||
Optionally scale the volume of the new track.
|
|
||||||
"""
|
|
||||||
|
|
||||||
class Input(BlockSchemaInput):
|
|
||||||
video_in: MediaFileType = SchemaField(
|
|
||||||
description="Video input (URL, data URI, or local path)."
|
|
||||||
)
|
|
||||||
audio_in: MediaFileType = SchemaField(
|
|
||||||
description="Audio input (URL, data URI, or local path)."
|
|
||||||
)
|
|
||||||
volume: float = SchemaField(
|
|
||||||
description="Volume scale for the newly attached audio track (1.0 = original).",
|
|
||||||
default=1.0,
|
|
||||||
)
|
|
||||||
|
|
||||||
class Output(BlockSchemaOutput):
|
|
||||||
video_out: MediaFileType = SchemaField(
|
|
||||||
description="Final video (with attached audio), as a path or data URI."
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__(
|
|
||||||
id="3503748d-62b6-4425-91d6-725b064af509",
|
|
||||||
description="Block to attach an audio file to a video file using moviepy.",
|
|
||||||
categories={BlockCategory.MULTIMEDIA},
|
|
||||||
input_schema=AddAudioToVideoBlock.Input,
|
|
||||||
output_schema=AddAudioToVideoBlock.Output,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def run(
|
|
||||||
self,
|
|
||||||
input_data: Input,
|
|
||||||
*,
|
|
||||||
execution_context: ExecutionContext,
|
|
||||||
**kwargs,
|
|
||||||
) -> BlockOutput:
|
|
||||||
assert execution_context.graph_exec_id is not None
|
|
||||||
assert execution_context.node_exec_id is not None
|
|
||||||
graph_exec_id = execution_context.graph_exec_id
|
|
||||||
node_exec_id = execution_context.node_exec_id
|
|
||||||
|
|
||||||
# 1) Store the inputs locally
|
|
||||||
local_video_path = await store_media_file(
|
|
||||||
file=input_data.video_in,
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_local_processing",
|
|
||||||
)
|
|
||||||
local_audio_path = await store_media_file(
|
|
||||||
file=input_data.audio_in,
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_local_processing",
|
|
||||||
)
|
|
||||||
|
|
||||||
abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id)
|
|
||||||
video_abspath = os.path.join(abs_temp_dir, local_video_path)
|
|
||||||
audio_abspath = os.path.join(abs_temp_dir, local_audio_path)
|
|
||||||
|
|
||||||
# 2) Load video + audio with moviepy
|
|
||||||
video_clip = VideoFileClip(video_abspath)
|
|
||||||
audio_clip = AudioFileClip(audio_abspath)
|
|
||||||
# Optionally scale volume
|
|
||||||
if input_data.volume != 1.0:
|
|
||||||
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
|
||||||
|
|
||||||
# 3) Attach the new audio track
|
|
||||||
final_clip = video_clip.with_audio(audio_clip)
|
|
||||||
|
|
||||||
# 4) Write to output file
|
|
||||||
output_filename = MediaFileType(
|
|
||||||
f"{node_exec_id}_audio_attached_{os.path.basename(local_video_path)}"
|
|
||||||
)
|
|
||||||
output_abspath = os.path.join(abs_temp_dir, output_filename)
|
|
||||||
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
|
||||||
|
|
||||||
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
|
||||||
video_out = await store_media_file(
|
|
||||||
file=output_filename,
|
|
||||||
execution_context=execution_context,
|
|
||||||
return_format="for_block_output",
|
|
||||||
)
|
|
||||||
|
|
||||||
yield "video_out", video_out
|
|
||||||
@@ -0,0 +1,77 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from backend.blocks.encoder_block import TextEncoderBlock
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_text_encoder_basic():
|
||||||
|
"""Test basic encoding of newlines and special characters."""
|
||||||
|
block = TextEncoderBlock()
|
||||||
|
result = []
|
||||||
|
async for output in block.run(TextEncoderBlock.Input(text="Hello\nWorld")):
|
||||||
|
result.append(output)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0][0] == "encoded_text"
|
||||||
|
assert result[0][1] == "Hello\\nWorld"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_text_encoder_multiple_escapes():
|
||||||
|
"""Test encoding of multiple escape sequences."""
|
||||||
|
block = TextEncoderBlock()
|
||||||
|
result = []
|
||||||
|
async for output in block.run(
|
||||||
|
TextEncoderBlock.Input(text="Line1\nLine2\tTabbed\rCarriage")
|
||||||
|
):
|
||||||
|
result.append(output)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0][0] == "encoded_text"
|
||||||
|
assert "\\n" in result[0][1]
|
||||||
|
assert "\\t" in result[0][1]
|
||||||
|
assert "\\r" in result[0][1]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_text_encoder_unicode():
|
||||||
|
"""Test that unicode characters are handled correctly."""
|
||||||
|
block = TextEncoderBlock()
|
||||||
|
result = []
|
||||||
|
async for output in block.run(TextEncoderBlock.Input(text="Hello 世界\n")):
|
||||||
|
result.append(output)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0][0] == "encoded_text"
|
||||||
|
# Unicode characters should be escaped as \uXXXX sequences
|
||||||
|
assert "\\n" in result[0][1]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_text_encoder_empty_string():
|
||||||
|
"""Test encoding of an empty string."""
|
||||||
|
block = TextEncoderBlock()
|
||||||
|
result = []
|
||||||
|
async for output in block.run(TextEncoderBlock.Input(text="")):
|
||||||
|
result.append(output)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0][0] == "encoded_text"
|
||||||
|
assert result[0][1] == ""
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_text_encoder_error_handling():
|
||||||
|
"""Test that encoding errors are handled gracefully."""
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
block = TextEncoderBlock()
|
||||||
|
result = []
|
||||||
|
|
||||||
|
with patch("codecs.encode", side_effect=Exception("Mocked encoding error")):
|
||||||
|
async for output in block.run(TextEncoderBlock.Input(text="test")):
|
||||||
|
result.append(output)
|
||||||
|
|
||||||
|
assert len(result) == 1
|
||||||
|
assert result[0][0] == "error"
|
||||||
|
assert "Mocked encoding error" in result[0][1]
|
||||||
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
37
autogpt_platform/backend/backend/blocks/video/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
"""Video editing blocks for AutoGPT Platform.
|
||||||
|
|
||||||
|
This module provides blocks for:
|
||||||
|
- Downloading videos from URLs (YouTube, Vimeo, news sites, direct links)
|
||||||
|
- Clipping/trimming video segments
|
||||||
|
- Concatenating multiple videos
|
||||||
|
- Adding text overlays
|
||||||
|
- Adding AI-generated narration
|
||||||
|
- Getting media duration
|
||||||
|
- Looping videos
|
||||||
|
- Adding audio to videos
|
||||||
|
|
||||||
|
Dependencies:
|
||||||
|
- yt-dlp: For video downloading
|
||||||
|
- moviepy: For video editing operations
|
||||||
|
- elevenlabs: For AI narration (optional)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from backend.blocks.video.add_audio import AddAudioToVideoBlock
|
||||||
|
from backend.blocks.video.clip import VideoClipBlock
|
||||||
|
from backend.blocks.video.concat import VideoConcatBlock
|
||||||
|
from backend.blocks.video.download import VideoDownloadBlock
|
||||||
|
from backend.blocks.video.duration import MediaDurationBlock
|
||||||
|
from backend.blocks.video.loop import LoopVideoBlock
|
||||||
|
from backend.blocks.video.narration import VideoNarrationBlock
|
||||||
|
from backend.blocks.video.text_overlay import VideoTextOverlayBlock
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"AddAudioToVideoBlock",
|
||||||
|
"LoopVideoBlock",
|
||||||
|
"MediaDurationBlock",
|
||||||
|
"VideoClipBlock",
|
||||||
|
"VideoConcatBlock",
|
||||||
|
"VideoDownloadBlock",
|
||||||
|
"VideoNarrationBlock",
|
||||||
|
"VideoTextOverlayBlock",
|
||||||
|
]
|
||||||
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
131
autogpt_platform/backend/backend/blocks/video/_utils.py
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
"""Shared utilities for video blocks."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Known operation tags added by video blocks
|
||||||
|
_VIDEO_OPS = (
|
||||||
|
r"(?:clip|overlay|narrated|looped|concat|audio_attached|with_audio|narration)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Matches: {node_exec_id}_{operation}_ where node_exec_id contains a UUID
|
||||||
|
_BLOCK_PREFIX_RE = re.compile(
|
||||||
|
r"^[a-zA-Z0-9_-]*"
|
||||||
|
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||||
|
r"[a-zA-Z0-9_-]*"
|
||||||
|
r"_" + _VIDEO_OPS + r"_"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Matches: a lone {node_exec_id}_ prefix (no operation keyword, e.g. download output)
|
||||||
|
_UUID_PREFIX_RE = re.compile(
|
||||||
|
r"^[a-zA-Z0-9_-]*"
|
||||||
|
r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||||
|
r"[a-zA-Z0-9_-]*_"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_source_name(input_path: str, max_length: int = 50) -> str:
|
||||||
|
"""Extract the original source filename by stripping block-generated prefixes.
|
||||||
|
|
||||||
|
Iteratively removes {node_exec_id}_{operation}_ prefixes that accumulate
|
||||||
|
when chaining video blocks, recovering the original human-readable name.
|
||||||
|
|
||||||
|
Safe for plain filenames (no UUID -> no stripping).
|
||||||
|
Falls back to "video" if everything is stripped.
|
||||||
|
"""
|
||||||
|
stem = Path(input_path).stem
|
||||||
|
|
||||||
|
# Pass 1: strip {node_exec_id}_{operation}_ prefixes iteratively
|
||||||
|
while _BLOCK_PREFIX_RE.match(stem):
|
||||||
|
stem = _BLOCK_PREFIX_RE.sub("", stem, count=1)
|
||||||
|
|
||||||
|
# Pass 2: strip a lone {node_exec_id}_ prefix (e.g. from download block)
|
||||||
|
if _UUID_PREFIX_RE.match(stem):
|
||||||
|
stem = _UUID_PREFIX_RE.sub("", stem, count=1)
|
||||||
|
|
||||||
|
if not stem:
|
||||||
|
return "video"
|
||||||
|
|
||||||
|
return stem[:max_length]
|
||||||
|
|
||||||
|
|
||||||
|
def get_video_codecs(output_path: str) -> tuple[str, str]:
|
||||||
|
"""Get appropriate video and audio codecs based on output file extension.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
output_path: Path to the output file (used to determine extension)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (video_codec, audio_codec)
|
||||||
|
|
||||||
|
Codec mappings:
|
||||||
|
- .mp4: H.264 + AAC (universal compatibility)
|
||||||
|
- .webm: VP8 + Vorbis (web streaming)
|
||||||
|
- .mkv: H.264 + AAC (container supports many codecs)
|
||||||
|
- .mov: H.264 + AAC (Apple QuickTime, widely compatible)
|
||||||
|
- .m4v: H.264 + AAC (Apple iTunes/devices)
|
||||||
|
- .avi: MPEG-4 + MP3 (legacy Windows)
|
||||||
|
"""
|
||||||
|
ext = os.path.splitext(output_path)[1].lower()
|
||||||
|
|
||||||
|
codec_map: dict[str, tuple[str, str]] = {
|
||||||
|
".mp4": ("libx264", "aac"),
|
||||||
|
".webm": ("libvpx", "libvorbis"),
|
||||||
|
".mkv": ("libx264", "aac"),
|
||||||
|
".mov": ("libx264", "aac"),
|
||||||
|
".m4v": ("libx264", "aac"),
|
||||||
|
".avi": ("mpeg4", "libmp3lame"),
|
||||||
|
}
|
||||||
|
|
||||||
|
return codec_map.get(ext, ("libx264", "aac"))
|
||||||
|
|
||||||
|
|
||||||
|
def strip_chapters_inplace(video_path: str) -> None:
|
||||||
|
"""Strip chapter metadata from a media file in-place using ffmpeg.
|
||||||
|
|
||||||
|
MoviePy 2.x crashes with IndexError when parsing files with embedded
|
||||||
|
chapter metadata (https://github.com/Zulko/moviepy/issues/2419).
|
||||||
|
This strips chapters without re-encoding.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
video_path: Absolute path to the media file to strip chapters from.
|
||||||
|
"""
|
||||||
|
base, ext = os.path.splitext(video_path)
|
||||||
|
tmp_path = base + ".tmp" + ext
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
[
|
||||||
|
"ffmpeg",
|
||||||
|
"-y",
|
||||||
|
"-i",
|
||||||
|
video_path,
|
||||||
|
"-map_chapters",
|
||||||
|
"-1",
|
||||||
|
"-codec",
|
||||||
|
"copy",
|
||||||
|
tmp_path,
|
||||||
|
],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=300,
|
||||||
|
)
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.warning(
|
||||||
|
"ffmpeg chapter strip failed (rc=%d): %s",
|
||||||
|
result.returncode,
|
||||||
|
result.stderr,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
os.replace(tmp_path, video_path)
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.warning("ffmpeg not found; skipping chapter strip")
|
||||||
|
finally:
|
||||||
|
if os.path.exists(tmp_path):
|
||||||
|
os.unlink(tmp_path)
|
||||||
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
113
autogpt_platform/backend/backend/blocks/video/add_audio.py
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
"""AddAudioToVideoBlock - Attach an audio track to a video file."""
|
||||||
|
|
||||||
|
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class AddAudioToVideoBlock(Block):
|
||||||
|
"""Add (attach) an audio track to an existing video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
video_in: MediaFileType = SchemaField(
|
||||||
|
description="Video input (URL, data URI, or local path)."
|
||||||
|
)
|
||||||
|
audio_in: MediaFileType = SchemaField(
|
||||||
|
description="Audio input (URL, data URI, or local path)."
|
||||||
|
)
|
||||||
|
volume: float = SchemaField(
|
||||||
|
description="Volume scale for the newly attached audio track (1.0 = original).",
|
||||||
|
default=1.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: MediaFileType = SchemaField(
|
||||||
|
description="Final video (with attached audio), as a path or data URI."
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="3503748d-62b6-4425-91d6-725b064af509",
|
||||||
|
description="Block to attach an audio file to a video file using moviepy.",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=AddAudioToVideoBlock.Input,
|
||||||
|
output_schema=AddAudioToVideoBlock.Output,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
assert execution_context.node_exec_id is not None
|
||||||
|
graph_exec_id = execution_context.graph_exec_id
|
||||||
|
node_exec_id = execution_context.node_exec_id
|
||||||
|
|
||||||
|
# 1) Store the inputs locally
|
||||||
|
local_video_path = await store_media_file(
|
||||||
|
file=input_data.video_in,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
local_audio_path = await store_media_file(
|
||||||
|
file=input_data.audio_in,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
|
||||||
|
video_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||||
|
audio_abspath = get_exec_file_path(graph_exec_id, local_audio_path)
|
||||||
|
|
||||||
|
# 2) Load video + audio with moviepy
|
||||||
|
strip_chapters_inplace(video_abspath)
|
||||||
|
strip_chapters_inplace(audio_abspath)
|
||||||
|
video_clip = None
|
||||||
|
audio_clip = None
|
||||||
|
final_clip = None
|
||||||
|
try:
|
||||||
|
video_clip = VideoFileClip(video_abspath)
|
||||||
|
audio_clip = AudioFileClip(audio_abspath)
|
||||||
|
# Optionally scale volume
|
||||||
|
if input_data.volume != 1.0:
|
||||||
|
audio_clip = audio_clip.with_volume_scaled(input_data.volume)
|
||||||
|
|
||||||
|
# 3) Attach the new audio track
|
||||||
|
final_clip = video_clip.with_audio(audio_clip)
|
||||||
|
|
||||||
|
# 4) Write to output file
|
||||||
|
source = extract_source_name(local_video_path)
|
||||||
|
output_filename = MediaFileType(f"{node_exec_id}_with_audio_{source}.mp4")
|
||||||
|
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||||
|
final_clip.write_videofile(
|
||||||
|
output_abspath, codec="libx264", audio_codec="aac"
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
if final_clip:
|
||||||
|
final_clip.close()
|
||||||
|
if audio_clip:
|
||||||
|
audio_clip.close()
|
||||||
|
if video_clip:
|
||||||
|
video_clip.close()
|
||||||
|
|
||||||
|
# 5) Return output - for_block_output returns workspace:// if available, else data URI
|
||||||
|
video_out = await store_media_file(
|
||||||
|
file=output_filename,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_out", video_out
|
||||||
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
167
autogpt_platform/backend/backend/blocks/video/clip.py
Normal file
@@ -0,0 +1,167 @@
|
|||||||
|
"""VideoClipBlock - Extract a segment from a video file."""
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.video._utils import (
|
||||||
|
extract_source_name,
|
||||||
|
get_video_codecs,
|
||||||
|
strip_chapters_inplace,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class VideoClipBlock(Block):
|
||||||
|
"""Extract a time segment from a video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
video_in: MediaFileType = SchemaField(
|
||||||
|
description="Input video (URL, data URI, or local path)"
|
||||||
|
)
|
||||||
|
start_time: float = SchemaField(description="Start time in seconds", ge=0.0)
|
||||||
|
end_time: float = SchemaField(description="End time in seconds", ge=0.0)
|
||||||
|
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||||
|
description="Output format", default="mp4", advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: MediaFileType = SchemaField(
|
||||||
|
description="Clipped video file (path or data URI)"
|
||||||
|
)
|
||||||
|
duration: float = SchemaField(description="Clip duration in seconds")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="8f539119-e580-4d86-ad41-86fbcb22abb1",
|
||||||
|
description="Extract a time segment from a video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={
|
||||||
|
"video_in": "/tmp/test.mp4",
|
||||||
|
"start_time": 0.0,
|
||||||
|
"end_time": 10.0,
|
||||||
|
},
|
||||||
|
test_output=[("video_out", str), ("duration", float)],
|
||||||
|
test_mock={
|
||||||
|
"_clip_video": lambda *args: 10.0,
|
||||||
|
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||||
|
"_store_output_video": lambda *args, **kwargs: "clip_test.mp4",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_input_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store input video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_output_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store output video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _clip_video(
|
||||||
|
self,
|
||||||
|
video_abspath: str,
|
||||||
|
output_abspath: str,
|
||||||
|
start_time: float,
|
||||||
|
end_time: float,
|
||||||
|
) -> float:
|
||||||
|
"""Extract a clip from a video. Extracted for testability."""
|
||||||
|
clip = None
|
||||||
|
subclip = None
|
||||||
|
try:
|
||||||
|
strip_chapters_inplace(video_abspath)
|
||||||
|
clip = VideoFileClip(video_abspath)
|
||||||
|
subclip = clip.subclipped(start_time, end_time)
|
||||||
|
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||||
|
subclip.write_videofile(
|
||||||
|
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||||
|
)
|
||||||
|
return subclip.duration
|
||||||
|
finally:
|
||||||
|
if subclip:
|
||||||
|
subclip.close()
|
||||||
|
if clip:
|
||||||
|
clip.close()
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
node_exec_id: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
# Validate time range
|
||||||
|
if input_data.end_time <= input_data.start_time:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
|
||||||
|
# Store the input video locally
|
||||||
|
local_video_path = await self._store_input_video(
|
||||||
|
execution_context, input_data.video_in
|
||||||
|
)
|
||||||
|
video_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, local_video_path
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build output path
|
||||||
|
source = extract_source_name(local_video_path)
|
||||||
|
output_filename = MediaFileType(
|
||||||
|
f"{node_exec_id}_clip_{source}.{input_data.output_format}"
|
||||||
|
)
|
||||||
|
output_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
duration = self._clip_video(
|
||||||
|
video_abspath,
|
||||||
|
output_abspath,
|
||||||
|
input_data.start_time,
|
||||||
|
input_data.end_time,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return as workspace path or data URI based on context
|
||||||
|
video_out = await self._store_output_video(
|
||||||
|
execution_context, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_out", video_out
|
||||||
|
yield "duration", duration
|
||||||
|
|
||||||
|
except BlockExecutionError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to clip video: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
) from e
|
||||||
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
227
autogpt_platform/backend/backend/blocks/video/concat.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
"""VideoConcatBlock - Concatenate multiple video clips into one."""
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from moviepy import concatenate_videoclips
|
||||||
|
from moviepy.video.fx import CrossFadeIn, CrossFadeOut, FadeIn, FadeOut
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.video._utils import (
|
||||||
|
extract_source_name,
|
||||||
|
get_video_codecs,
|
||||||
|
strip_chapters_inplace,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class VideoConcatBlock(Block):
|
||||||
|
"""Merge multiple video clips into one continuous video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
videos: list[MediaFileType] = SchemaField(
|
||||||
|
description="List of video files to concatenate (in order)"
|
||||||
|
)
|
||||||
|
transition: Literal["none", "crossfade", "fade_black"] = SchemaField(
|
||||||
|
description="Transition between clips", default="none"
|
||||||
|
)
|
||||||
|
transition_duration: int = SchemaField(
|
||||||
|
description="Transition duration in seconds",
|
||||||
|
default=1,
|
||||||
|
ge=0,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
output_format: Literal["mp4", "webm", "mkv", "mov"] = SchemaField(
|
||||||
|
description="Output format", default="mp4", advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: MediaFileType = SchemaField(
|
||||||
|
description="Concatenated video file (path or data URI)"
|
||||||
|
)
|
||||||
|
total_duration: float = SchemaField(description="Total duration in seconds")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="9b0f531a-1118-487f-aeec-3fa63ea8900a",
|
||||||
|
description="Merge multiple video clips into one continuous video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={
|
||||||
|
"videos": ["/tmp/a.mp4", "/tmp/b.mp4"],
|
||||||
|
},
|
||||||
|
test_output=[
|
||||||
|
("video_out", str),
|
||||||
|
("total_duration", float),
|
||||||
|
],
|
||||||
|
test_mock={
|
||||||
|
"_concat_videos": lambda *args: 20.0,
|
||||||
|
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||||
|
"_store_output_video": lambda *args, **kwargs: "concat_test.mp4",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_input_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store input video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_output_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store output video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _concat_videos(
|
||||||
|
self,
|
||||||
|
video_abspaths: list[str],
|
||||||
|
output_abspath: str,
|
||||||
|
transition: str,
|
||||||
|
transition_duration: int,
|
||||||
|
) -> float:
|
||||||
|
"""Concatenate videos. Extracted for testability.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Total duration of the concatenated video.
|
||||||
|
"""
|
||||||
|
clips = []
|
||||||
|
faded_clips = []
|
||||||
|
final = None
|
||||||
|
try:
|
||||||
|
# Load clips
|
||||||
|
for v in video_abspaths:
|
||||||
|
strip_chapters_inplace(v)
|
||||||
|
clips.append(VideoFileClip(v))
|
||||||
|
|
||||||
|
# Validate transition_duration against shortest clip
|
||||||
|
if transition in {"crossfade", "fade_black"} and transition_duration > 0:
|
||||||
|
min_duration = min(c.duration for c in clips)
|
||||||
|
if transition_duration >= min_duration:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=(
|
||||||
|
f"transition_duration ({transition_duration}s) must be "
|
||||||
|
f"shorter than the shortest clip ({min_duration:.2f}s)"
|
||||||
|
),
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
)
|
||||||
|
|
||||||
|
if transition == "crossfade":
|
||||||
|
for i, clip in enumerate(clips):
|
||||||
|
effects = []
|
||||||
|
if i > 0:
|
||||||
|
effects.append(CrossFadeIn(transition_duration))
|
||||||
|
if i < len(clips) - 1:
|
||||||
|
effects.append(CrossFadeOut(transition_duration))
|
||||||
|
if effects:
|
||||||
|
clip = clip.with_effects(effects)
|
||||||
|
faded_clips.append(clip)
|
||||||
|
final = concatenate_videoclips(
|
||||||
|
faded_clips,
|
||||||
|
method="compose",
|
||||||
|
padding=-transition_duration,
|
||||||
|
)
|
||||||
|
elif transition == "fade_black":
|
||||||
|
for clip in clips:
|
||||||
|
faded = clip.with_effects(
|
||||||
|
[FadeIn(transition_duration), FadeOut(transition_duration)]
|
||||||
|
)
|
||||||
|
faded_clips.append(faded)
|
||||||
|
final = concatenate_videoclips(faded_clips)
|
||||||
|
else:
|
||||||
|
final = concatenate_videoclips(clips)
|
||||||
|
|
||||||
|
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||||
|
final.write_videofile(
|
||||||
|
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||||
|
)
|
||||||
|
|
||||||
|
return final.duration
|
||||||
|
finally:
|
||||||
|
if final:
|
||||||
|
final.close()
|
||||||
|
for clip in faded_clips:
|
||||||
|
clip.close()
|
||||||
|
for clip in clips:
|
||||||
|
clip.close()
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
node_exec_id: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
# Validate minimum clips
|
||||||
|
if len(input_data.videos) < 2:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message="At least 2 videos are required for concatenation",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
|
||||||
|
# Store all input videos locally
|
||||||
|
video_abspaths = []
|
||||||
|
for video in input_data.videos:
|
||||||
|
local_path = await self._store_input_video(execution_context, video)
|
||||||
|
video_abspaths.append(
|
||||||
|
get_exec_file_path(execution_context.graph_exec_id, local_path)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build output path
|
||||||
|
source = (
|
||||||
|
extract_source_name(video_abspaths[0]) if video_abspaths else "video"
|
||||||
|
)
|
||||||
|
output_filename = MediaFileType(
|
||||||
|
f"{node_exec_id}_concat_{source}.{input_data.output_format}"
|
||||||
|
)
|
||||||
|
output_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
total_duration = self._concat_videos(
|
||||||
|
video_abspaths,
|
||||||
|
output_abspath,
|
||||||
|
input_data.transition,
|
||||||
|
input_data.transition_duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return as workspace path or data URI based on context
|
||||||
|
video_out = await self._store_output_video(
|
||||||
|
execution_context, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_out", video_out
|
||||||
|
yield "total_duration", total_duration
|
||||||
|
|
||||||
|
except BlockExecutionError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to concatenate videos: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
) from e
|
||||||
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
172
autogpt_platform/backend/backend/blocks/video/download.py
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
"""VideoDownloadBlock - Download video from URL (YouTube, Vimeo, news sites, direct links)."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import typing
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import yt_dlp
|
||||||
|
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
from yt_dlp import _Params
|
||||||
|
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class VideoDownloadBlock(Block):
|
||||||
|
"""Download video from URL using yt-dlp."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
url: str = SchemaField(
|
||||||
|
description="URL of the video to download (YouTube, Vimeo, direct link, etc.)",
|
||||||
|
placeholder="https://www.youtube.com/watch?v=...",
|
||||||
|
)
|
||||||
|
quality: Literal["best", "1080p", "720p", "480p", "audio_only"] = SchemaField(
|
||||||
|
description="Video quality preference", default="720p"
|
||||||
|
)
|
||||||
|
output_format: Literal["mp4", "webm", "mkv"] = SchemaField(
|
||||||
|
description="Output video format", default="mp4", advanced=True
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_file: MediaFileType = SchemaField(
|
||||||
|
description="Downloaded video (path or data URI)"
|
||||||
|
)
|
||||||
|
duration: float = SchemaField(description="Video duration in seconds")
|
||||||
|
title: str = SchemaField(description="Video title from source")
|
||||||
|
source_url: str = SchemaField(description="Original source URL")
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="c35daabb-cd60-493b-b9ad-51f1fe4b50c4",
|
||||||
|
description="Download video from URL (YouTube, Vimeo, news sites, direct links)",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
disabled=True, # Disable until we can sandbox yt-dlp and handle security implications
|
||||||
|
test_input={
|
||||||
|
"url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
|
||||||
|
"quality": "480p",
|
||||||
|
},
|
||||||
|
test_output=[
|
||||||
|
("video_file", str),
|
||||||
|
("duration", float),
|
||||||
|
("title", str),
|
||||||
|
("source_url", str),
|
||||||
|
],
|
||||||
|
test_mock={
|
||||||
|
"_download_video": lambda *args: (
|
||||||
|
"video.mp4",
|
||||||
|
212.0,
|
||||||
|
"Test Video",
|
||||||
|
),
|
||||||
|
"_store_output_video": lambda *args, **kwargs: "video.mp4",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_output_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store output video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_format_string(self, quality: str) -> str:
|
||||||
|
formats = {
|
||||||
|
"best": "bestvideo+bestaudio/best",
|
||||||
|
"1080p": "bestvideo[height<=1080]+bestaudio/best[height<=1080]",
|
||||||
|
"720p": "bestvideo[height<=720]+bestaudio/best[height<=720]",
|
||||||
|
"480p": "bestvideo[height<=480]+bestaudio/best[height<=480]",
|
||||||
|
"audio_only": "bestaudio/best",
|
||||||
|
}
|
||||||
|
return formats.get(quality, formats["720p"])
|
||||||
|
|
||||||
|
def _download_video(
|
||||||
|
self,
|
||||||
|
url: str,
|
||||||
|
quality: str,
|
||||||
|
output_format: str,
|
||||||
|
output_dir: str,
|
||||||
|
node_exec_id: str,
|
||||||
|
) -> tuple[str, float, str]:
|
||||||
|
"""Download video. Extracted for testability."""
|
||||||
|
output_template = os.path.join(
|
||||||
|
output_dir, f"{node_exec_id}_%(title).50s.%(ext)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
ydl_opts: "_Params" = {
|
||||||
|
"format": f"{self._get_format_string(quality)}/best",
|
||||||
|
"outtmpl": output_template,
|
||||||
|
"merge_output_format": output_format,
|
||||||
|
"quiet": True,
|
||||||
|
"no_warnings": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
info = ydl.extract_info(url, download=True)
|
||||||
|
video_path = ydl.prepare_filename(info)
|
||||||
|
|
||||||
|
# Handle format conversion in filename
|
||||||
|
if not video_path.endswith(f".{output_format}"):
|
||||||
|
video_path = video_path.rsplit(".", 1)[0] + f".{output_format}"
|
||||||
|
|
||||||
|
# Return just the filename, not the full path
|
||||||
|
filename = os.path.basename(video_path)
|
||||||
|
|
||||||
|
return (
|
||||||
|
filename,
|
||||||
|
info.get("duration") or 0.0,
|
||||||
|
info.get("title") or "Unknown",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
node_exec_id: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
|
||||||
|
# Get the exec file directory
|
||||||
|
output_dir = get_exec_file_path(execution_context.graph_exec_id, "")
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
filename, duration, title = self._download_video(
|
||||||
|
input_data.url,
|
||||||
|
input_data.quality,
|
||||||
|
input_data.output_format,
|
||||||
|
output_dir,
|
||||||
|
node_exec_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return as workspace path or data URI based on context
|
||||||
|
video_out = await self._store_output_video(
|
||||||
|
execution_context, MediaFileType(filename)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_file", video_out
|
||||||
|
yield "duration", duration
|
||||||
|
yield "title", title
|
||||||
|
yield "source_url", input_data.url
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to download video: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
) from e
|
||||||
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
77
autogpt_platform/backend/backend/blocks/video/duration.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
"""MediaDurationBlock - Get the duration of a media file."""
|
||||||
|
|
||||||
|
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.video._utils import strip_chapters_inplace
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class MediaDurationBlock(Block):
|
||||||
|
"""Get the duration of a media file (video or audio)."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
media_in: MediaFileType = SchemaField(
|
||||||
|
description="Media input (URL, data URI, or local path)."
|
||||||
|
)
|
||||||
|
is_video: bool = SchemaField(
|
||||||
|
description="Whether the media is a video (True) or audio (False).",
|
||||||
|
default=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
duration: float = SchemaField(
|
||||||
|
description="Duration of the media file (in seconds)."
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6",
|
||||||
|
description="Block to get the duration of a media file.",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=MediaDurationBlock.Input,
|
||||||
|
output_schema=MediaDurationBlock.Output,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
# 1) Store the input media locally
|
||||||
|
local_media_path = await store_media_file(
|
||||||
|
file=input_data.media_in,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
media_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, local_media_path
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2) Strip chapters to avoid MoviePy crash, then load the clip
|
||||||
|
strip_chapters_inplace(media_abspath)
|
||||||
|
clip = None
|
||||||
|
try:
|
||||||
|
if input_data.is_video:
|
||||||
|
clip = VideoFileClip(media_abspath)
|
||||||
|
else:
|
||||||
|
clip = AudioFileClip(media_abspath)
|
||||||
|
|
||||||
|
duration = clip.duration
|
||||||
|
finally:
|
||||||
|
if clip:
|
||||||
|
clip.close()
|
||||||
|
|
||||||
|
yield "duration", duration
|
||||||
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
115
autogpt_platform/backend/backend/blocks/video/loop.py
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
"""LoopVideoBlock - Loop a video to a given duration or number of repeats."""
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from moviepy.video.fx.Loop import Loop
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.video._utils import extract_source_name, strip_chapters_inplace
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class LoopVideoBlock(Block):
|
||||||
|
"""Loop (repeat) a video clip until a given duration or number of loops."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
video_in: MediaFileType = SchemaField(
|
||||||
|
description="The input video (can be a URL, data URI, or local path)."
|
||||||
|
)
|
||||||
|
duration: Optional[float] = SchemaField(
|
||||||
|
description="Target duration (in seconds) to loop the video to. Either duration or n_loops must be provided.",
|
||||||
|
default=None,
|
||||||
|
ge=0.0,
|
||||||
|
le=3600.0, # Max 1 hour to prevent disk exhaustion
|
||||||
|
)
|
||||||
|
n_loops: Optional[int] = SchemaField(
|
||||||
|
description="Number of times to repeat the video. Either n_loops or duration must be provided.",
|
||||||
|
default=None,
|
||||||
|
ge=1,
|
||||||
|
le=10, # Max 10 loops to prevent disk exhaustion
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: MediaFileType = SchemaField(
|
||||||
|
description="Looped video returned either as a relative path or a data URI."
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="8bf9eef6-5451-4213-b265-25306446e94b",
|
||||||
|
description="Block to loop a video to a given duration or number of repeats.",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=LoopVideoBlock.Input,
|
||||||
|
output_schema=LoopVideoBlock.Output,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
assert execution_context.node_exec_id is not None
|
||||||
|
graph_exec_id = execution_context.graph_exec_id
|
||||||
|
node_exec_id = execution_context.node_exec_id
|
||||||
|
|
||||||
|
# 1) Store the input video locally
|
||||||
|
local_video_path = await store_media_file(
|
||||||
|
file=input_data.video_in,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
input_abspath = get_exec_file_path(graph_exec_id, local_video_path)
|
||||||
|
|
||||||
|
# 2) Load the clip
|
||||||
|
strip_chapters_inplace(input_abspath)
|
||||||
|
clip = None
|
||||||
|
looped_clip = None
|
||||||
|
try:
|
||||||
|
clip = VideoFileClip(input_abspath)
|
||||||
|
|
||||||
|
# 3) Apply the loop effect
|
||||||
|
if input_data.duration:
|
||||||
|
# Loop until we reach the specified duration
|
||||||
|
looped_clip = clip.with_effects([Loop(duration=input_data.duration)])
|
||||||
|
elif input_data.n_loops:
|
||||||
|
looped_clip = clip.with_effects([Loop(n=input_data.n_loops)])
|
||||||
|
else:
|
||||||
|
raise ValueError("Either 'duration' or 'n_loops' must be provided.")
|
||||||
|
|
||||||
|
assert isinstance(looped_clip, VideoFileClip)
|
||||||
|
|
||||||
|
# 4) Save the looped output
|
||||||
|
source = extract_source_name(local_video_path)
|
||||||
|
output_filename = MediaFileType(f"{node_exec_id}_looped_{source}.mp4")
|
||||||
|
output_abspath = get_exec_file_path(graph_exec_id, output_filename)
|
||||||
|
|
||||||
|
looped_clip = looped_clip.with_audio(clip.audio)
|
||||||
|
looped_clip.write_videofile(
|
||||||
|
output_abspath, codec="libx264", audio_codec="aac"
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
if looped_clip:
|
||||||
|
looped_clip.close()
|
||||||
|
if clip:
|
||||||
|
clip.close()
|
||||||
|
|
||||||
|
# Return output - for_block_output returns workspace:// if available, else data URI
|
||||||
|
video_out = await store_media_file(
|
||||||
|
file=output_filename,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_out", video_out
|
||||||
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
267
autogpt_platform/backend/backend/blocks/video/narration.py
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
"""VideoNarrationBlock - Generate AI voice narration and add to video."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from elevenlabs import ElevenLabs
|
||||||
|
from moviepy import CompositeAudioClip
|
||||||
|
from moviepy.audio.io.AudioFileClip import AudioFileClip
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.elevenlabs._auth import (
|
||||||
|
TEST_CREDENTIALS,
|
||||||
|
TEST_CREDENTIALS_INPUT,
|
||||||
|
ElevenLabsCredentials,
|
||||||
|
ElevenLabsCredentialsInput,
|
||||||
|
)
|
||||||
|
from backend.blocks.video._utils import (
|
||||||
|
extract_source_name,
|
||||||
|
get_video_codecs,
|
||||||
|
strip_chapters_inplace,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import CredentialsField, SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class VideoNarrationBlock(Block):
|
||||||
|
"""Generate AI narration and add to video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
credentials: ElevenLabsCredentialsInput = CredentialsField(
|
||||||
|
description="ElevenLabs API key for voice synthesis"
|
||||||
|
)
|
||||||
|
video_in: MediaFileType = SchemaField(
|
||||||
|
description="Input video (URL, data URI, or local path)"
|
||||||
|
)
|
||||||
|
script: str = SchemaField(description="Narration script text")
|
||||||
|
voice_id: str = SchemaField(
|
||||||
|
description="ElevenLabs voice ID", default="21m00Tcm4TlvDq8ikWAM" # Rachel
|
||||||
|
)
|
||||||
|
model_id: Literal[
|
||||||
|
"eleven_multilingual_v2",
|
||||||
|
"eleven_flash_v2_5",
|
||||||
|
"eleven_turbo_v2_5",
|
||||||
|
"eleven_turbo_v2",
|
||||||
|
] = SchemaField(
|
||||||
|
description="ElevenLabs TTS model",
|
||||||
|
default="eleven_multilingual_v2",
|
||||||
|
)
|
||||||
|
mix_mode: Literal["replace", "mix", "ducking"] = SchemaField(
|
||||||
|
description="How to combine with original audio. 'ducking' applies stronger attenuation than 'mix'.",
|
||||||
|
default="ducking",
|
||||||
|
)
|
||||||
|
narration_volume: float = SchemaField(
|
||||||
|
description="Narration volume (0.0 to 2.0)",
|
||||||
|
default=1.0,
|
||||||
|
ge=0.0,
|
||||||
|
le=2.0,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
original_volume: float = SchemaField(
|
||||||
|
description="Original audio volume when mixing (0.0 to 1.0)",
|
||||||
|
default=0.3,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: MediaFileType = SchemaField(
|
||||||
|
description="Video with narration (path or data URI)"
|
||||||
|
)
|
||||||
|
audio_file: MediaFileType = SchemaField(
|
||||||
|
description="Generated audio file (path or data URI)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="3d036b53-859c-4b17-9826-ca340f736e0e",
|
||||||
|
description="Generate AI narration and add to video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA, BlockCategory.AI},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
test_input={
|
||||||
|
"video_in": "/tmp/test.mp4",
|
||||||
|
"script": "Hello world",
|
||||||
|
"credentials": TEST_CREDENTIALS_INPUT,
|
||||||
|
},
|
||||||
|
test_credentials=TEST_CREDENTIALS,
|
||||||
|
test_output=[("video_out", str), ("audio_file", str)],
|
||||||
|
test_mock={
|
||||||
|
"_generate_narration_audio": lambda *args: b"mock audio content",
|
||||||
|
"_add_narration_to_video": lambda *args: None,
|
||||||
|
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||||
|
"_store_output_video": lambda *args, **kwargs: "narrated_test.mp4",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_input_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store input video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_output_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store output video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _generate_narration_audio(
|
||||||
|
self, api_key: str, script: str, voice_id: str, model_id: str
|
||||||
|
) -> bytes:
|
||||||
|
"""Generate narration audio via ElevenLabs API."""
|
||||||
|
client = ElevenLabs(api_key=api_key)
|
||||||
|
audio_generator = client.text_to_speech.convert(
|
||||||
|
voice_id=voice_id,
|
||||||
|
text=script,
|
||||||
|
model_id=model_id,
|
||||||
|
)
|
||||||
|
# The SDK returns a generator, collect all chunks
|
||||||
|
return b"".join(audio_generator)
|
||||||
|
|
||||||
|
def _add_narration_to_video(
|
||||||
|
self,
|
||||||
|
video_abspath: str,
|
||||||
|
audio_abspath: str,
|
||||||
|
output_abspath: str,
|
||||||
|
mix_mode: str,
|
||||||
|
narration_volume: float,
|
||||||
|
original_volume: float,
|
||||||
|
) -> None:
|
||||||
|
"""Add narration audio to video. Extracted for testability."""
|
||||||
|
video = None
|
||||||
|
final = None
|
||||||
|
narration_original = None
|
||||||
|
narration_scaled = None
|
||||||
|
original = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
strip_chapters_inplace(video_abspath)
|
||||||
|
video = VideoFileClip(video_abspath)
|
||||||
|
narration_original = AudioFileClip(audio_abspath)
|
||||||
|
narration_scaled = narration_original.with_volume_scaled(narration_volume)
|
||||||
|
narration = narration_scaled
|
||||||
|
|
||||||
|
if mix_mode == "replace":
|
||||||
|
final_audio = narration
|
||||||
|
elif mix_mode == "mix":
|
||||||
|
if video.audio:
|
||||||
|
original = video.audio.with_volume_scaled(original_volume)
|
||||||
|
final_audio = CompositeAudioClip([original, narration])
|
||||||
|
else:
|
||||||
|
final_audio = narration
|
||||||
|
else: # ducking - apply stronger attenuation
|
||||||
|
if video.audio:
|
||||||
|
# Ducking uses a much lower volume for original audio
|
||||||
|
ducking_volume = original_volume * 0.3
|
||||||
|
original = video.audio.with_volume_scaled(ducking_volume)
|
||||||
|
final_audio = CompositeAudioClip([original, narration])
|
||||||
|
else:
|
||||||
|
final_audio = narration
|
||||||
|
|
||||||
|
final = video.with_audio(final_audio)
|
||||||
|
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||||
|
final.write_videofile(
|
||||||
|
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if original:
|
||||||
|
original.close()
|
||||||
|
if narration_scaled:
|
||||||
|
narration_scaled.close()
|
||||||
|
if narration_original:
|
||||||
|
narration_original.close()
|
||||||
|
if final:
|
||||||
|
final.close()
|
||||||
|
if video:
|
||||||
|
video.close()
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
credentials: ElevenLabsCredentials,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
node_exec_id: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
try:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
|
||||||
|
# Store the input video locally
|
||||||
|
local_video_path = await self._store_input_video(
|
||||||
|
execution_context, input_data.video_in
|
||||||
|
)
|
||||||
|
video_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, local_video_path
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate narration audio via ElevenLabs
|
||||||
|
audio_content = self._generate_narration_audio(
|
||||||
|
credentials.api_key.get_secret_value(),
|
||||||
|
input_data.script,
|
||||||
|
input_data.voice_id,
|
||||||
|
input_data.model_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save audio to exec file path
|
||||||
|
audio_filename = MediaFileType(f"{node_exec_id}_narration.mp3")
|
||||||
|
audio_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, audio_filename
|
||||||
|
)
|
||||||
|
os.makedirs(os.path.dirname(audio_abspath), exist_ok=True)
|
||||||
|
with open(audio_abspath, "wb") as f:
|
||||||
|
f.write(audio_content)
|
||||||
|
|
||||||
|
# Add narration to video
|
||||||
|
source = extract_source_name(local_video_path)
|
||||||
|
output_filename = MediaFileType(f"{node_exec_id}_narrated_{source}.mp4")
|
||||||
|
output_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
self._add_narration_to_video(
|
||||||
|
video_abspath,
|
||||||
|
audio_abspath,
|
||||||
|
output_abspath,
|
||||||
|
input_data.mix_mode,
|
||||||
|
input_data.narration_volume,
|
||||||
|
input_data.original_volume,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return as workspace path or data URI based on context
|
||||||
|
video_out = await self._store_output_video(
|
||||||
|
execution_context, output_filename
|
||||||
|
)
|
||||||
|
audio_out = await self._store_output_video(
|
||||||
|
execution_context, audio_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_out", video_out
|
||||||
|
yield "audio_file", audio_out
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to add narration: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
) from e
|
||||||
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
231
autogpt_platform/backend/backend/blocks/video/text_overlay.py
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
"""VideoTextOverlayBlock - Add text overlay to video."""
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from moviepy import CompositeVideoClip, TextClip
|
||||||
|
from moviepy.video.io.VideoFileClip import VideoFileClip
|
||||||
|
|
||||||
|
from backend.blocks.video._utils import (
|
||||||
|
extract_source_name,
|
||||||
|
get_video_codecs,
|
||||||
|
strip_chapters_inplace,
|
||||||
|
)
|
||||||
|
from backend.data.block import (
|
||||||
|
Block,
|
||||||
|
BlockCategory,
|
||||||
|
BlockOutput,
|
||||||
|
BlockSchemaInput,
|
||||||
|
BlockSchemaOutput,
|
||||||
|
)
|
||||||
|
from backend.data.execution import ExecutionContext
|
||||||
|
from backend.data.model import SchemaField
|
||||||
|
from backend.util.exceptions import BlockExecutionError
|
||||||
|
from backend.util.file import MediaFileType, get_exec_file_path, store_media_file
|
||||||
|
|
||||||
|
|
||||||
|
class VideoTextOverlayBlock(Block):
|
||||||
|
"""Add text overlay/caption to video."""
|
||||||
|
|
||||||
|
class Input(BlockSchemaInput):
|
||||||
|
video_in: MediaFileType = SchemaField(
|
||||||
|
description="Input video (URL, data URI, or local path)"
|
||||||
|
)
|
||||||
|
text: str = SchemaField(description="Text to overlay on video")
|
||||||
|
position: Literal[
|
||||||
|
"top",
|
||||||
|
"center",
|
||||||
|
"bottom",
|
||||||
|
"top-left",
|
||||||
|
"top-right",
|
||||||
|
"bottom-left",
|
||||||
|
"bottom-right",
|
||||||
|
] = SchemaField(description="Position of text on screen", default="bottom")
|
||||||
|
start_time: float | None = SchemaField(
|
||||||
|
description="When to show text (seconds). None = entire video",
|
||||||
|
default=None,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
end_time: float | None = SchemaField(
|
||||||
|
description="When to hide text (seconds). None = until end",
|
||||||
|
default=None,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
font_size: int = SchemaField(
|
||||||
|
description="Font size", default=48, ge=12, le=200, advanced=True
|
||||||
|
)
|
||||||
|
font_color: str = SchemaField(
|
||||||
|
description="Font color (hex or name)", default="white", advanced=True
|
||||||
|
)
|
||||||
|
bg_color: str | None = SchemaField(
|
||||||
|
description="Background color behind text (None for transparent)",
|
||||||
|
default=None,
|
||||||
|
advanced=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
class Output(BlockSchemaOutput):
|
||||||
|
video_out: MediaFileType = SchemaField(
|
||||||
|
description="Video with text overlay (path or data URI)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(
|
||||||
|
id="8ef14de6-cc90-430a-8cfa-3a003be92454",
|
||||||
|
description="Add text overlay/caption to video",
|
||||||
|
categories={BlockCategory.MULTIMEDIA},
|
||||||
|
input_schema=self.Input,
|
||||||
|
output_schema=self.Output,
|
||||||
|
disabled=True, # Disable until we can lockdown imagemagick security policy
|
||||||
|
test_input={"video_in": "/tmp/test.mp4", "text": "Hello World"},
|
||||||
|
test_output=[("video_out", str)],
|
||||||
|
test_mock={
|
||||||
|
"_add_text_overlay": lambda *args: None,
|
||||||
|
"_store_input_video": lambda *args, **kwargs: "test.mp4",
|
||||||
|
"_store_output_video": lambda *args, **kwargs: "overlay_test.mp4",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_input_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store input video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_local_processing",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_output_video(
|
||||||
|
self, execution_context: ExecutionContext, file: MediaFileType
|
||||||
|
) -> MediaFileType:
|
||||||
|
"""Store output video. Extracted for testability."""
|
||||||
|
return await store_media_file(
|
||||||
|
file=file,
|
||||||
|
execution_context=execution_context,
|
||||||
|
return_format="for_block_output",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _add_text_overlay(
|
||||||
|
self,
|
||||||
|
video_abspath: str,
|
||||||
|
output_abspath: str,
|
||||||
|
text: str,
|
||||||
|
position: str,
|
||||||
|
start_time: float | None,
|
||||||
|
end_time: float | None,
|
||||||
|
font_size: int,
|
||||||
|
font_color: str,
|
||||||
|
bg_color: str | None,
|
||||||
|
) -> None:
|
||||||
|
"""Add text overlay to video. Extracted for testability."""
|
||||||
|
video = None
|
||||||
|
final = None
|
||||||
|
txt_clip = None
|
||||||
|
try:
|
||||||
|
strip_chapters_inplace(video_abspath)
|
||||||
|
video = VideoFileClip(video_abspath)
|
||||||
|
|
||||||
|
txt_clip = TextClip(
|
||||||
|
text=text,
|
||||||
|
font_size=font_size,
|
||||||
|
color=font_color,
|
||||||
|
bg_color=bg_color,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Position mapping
|
||||||
|
pos_map = {
|
||||||
|
"top": ("center", "top"),
|
||||||
|
"center": ("center", "center"),
|
||||||
|
"bottom": ("center", "bottom"),
|
||||||
|
"top-left": ("left", "top"),
|
||||||
|
"top-right": ("right", "top"),
|
||||||
|
"bottom-left": ("left", "bottom"),
|
||||||
|
"bottom-right": ("right", "bottom"),
|
||||||
|
}
|
||||||
|
|
||||||
|
txt_clip = txt_clip.with_position(pos_map[position])
|
||||||
|
|
||||||
|
# Set timing
|
||||||
|
start = start_time or 0
|
||||||
|
end = end_time or video.duration
|
||||||
|
duration = max(0, end - start)
|
||||||
|
txt_clip = txt_clip.with_start(start).with_end(end).with_duration(duration)
|
||||||
|
|
||||||
|
final = CompositeVideoClip([video, txt_clip])
|
||||||
|
video_codec, audio_codec = get_video_codecs(output_abspath)
|
||||||
|
final.write_videofile(
|
||||||
|
output_abspath, codec=video_codec, audio_codec=audio_codec
|
||||||
|
)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if txt_clip:
|
||||||
|
txt_clip.close()
|
||||||
|
if final:
|
||||||
|
final.close()
|
||||||
|
if video:
|
||||||
|
video.close()
|
||||||
|
|
||||||
|
async def run(
|
||||||
|
self,
|
||||||
|
input_data: Input,
|
||||||
|
*,
|
||||||
|
execution_context: ExecutionContext,
|
||||||
|
node_exec_id: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> BlockOutput:
|
||||||
|
# Validate time range if both are provided
|
||||||
|
if (
|
||||||
|
input_data.start_time is not None
|
||||||
|
and input_data.end_time is not None
|
||||||
|
and input_data.end_time <= input_data.start_time
|
||||||
|
):
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"end_time ({input_data.end_time}) must be greater than start_time ({input_data.start_time})",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
assert execution_context.graph_exec_id is not None
|
||||||
|
|
||||||
|
# Store the input video locally
|
||||||
|
local_video_path = await self._store_input_video(
|
||||||
|
execution_context, input_data.video_in
|
||||||
|
)
|
||||||
|
video_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, local_video_path
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build output path
|
||||||
|
source = extract_source_name(local_video_path)
|
||||||
|
output_filename = MediaFileType(f"{node_exec_id}_overlay_{source}.mp4")
|
||||||
|
output_abspath = get_exec_file_path(
|
||||||
|
execution_context.graph_exec_id, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
self._add_text_overlay(
|
||||||
|
video_abspath,
|
||||||
|
output_abspath,
|
||||||
|
input_data.text,
|
||||||
|
input_data.position,
|
||||||
|
input_data.start_time,
|
||||||
|
input_data.end_time,
|
||||||
|
input_data.font_size,
|
||||||
|
input_data.font_color,
|
||||||
|
input_data.bg_color,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return as workspace path or data URI based on context
|
||||||
|
video_out = await self._store_output_video(
|
||||||
|
execution_context, output_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
yield "video_out", video_out
|
||||||
|
|
||||||
|
except BlockExecutionError:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise BlockExecutionError(
|
||||||
|
message=f"Failed to add text overlay: {e}",
|
||||||
|
block_name=self.name,
|
||||||
|
block_id=str(self.id),
|
||||||
|
) from e
|
||||||
@@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block):
|
|||||||
credentials: WebshareProxyCredentials,
|
credentials: WebshareProxyCredentials,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> BlockOutput:
|
) -> BlockOutput:
|
||||||
|
try:
|
||||||
video_id = self.extract_video_id(input_data.youtube_url)
|
video_id = self.extract_video_id(input_data.youtube_url)
|
||||||
yield "video_id", video_id
|
|
||||||
|
|
||||||
transcript = self.get_transcript(video_id, credentials)
|
transcript = self.get_transcript(video_id, credentials)
|
||||||
transcript_text = self.format_transcript(transcript=transcript)
|
transcript_text = self.format_transcript(transcript=transcript)
|
||||||
|
|
||||||
|
# Only yield after all operations succeed
|
||||||
|
yield "video_id", video_id
|
||||||
yield "transcript", transcript_text
|
yield "transcript", transcript_text
|
||||||
|
except Exception as e:
|
||||||
|
yield "error", str(e)
|
||||||
|
|||||||
@@ -246,7 +246,9 @@ class BlockSchema(BaseModel):
|
|||||||
f"is not of type {CredentialsMetaInput.__name__}"
|
f"is not of type {CredentialsMetaInput.__name__}"
|
||||||
)
|
)
|
||||||
|
|
||||||
credentials_fields[field_name].validate_credentials_field_schema(cls)
|
CredentialsMetaInput.validate_credentials_field_schema(
|
||||||
|
cls.get_field_schema(field_name), field_name
|
||||||
|
)
|
||||||
|
|
||||||
elif field_name in credentials_fields:
|
elif field_name in credentials_fields:
|
||||||
raise KeyError(
|
raise KeyError(
|
||||||
@@ -317,6 +319,8 @@ class BlockSchema(BaseModel):
|
|||||||
"credentials_provider": [config.get("provider", "google")],
|
"credentials_provider": [config.get("provider", "google")],
|
||||||
"credentials_types": [config.get("type", "oauth2")],
|
"credentials_types": [config.get("type", "oauth2")],
|
||||||
"credentials_scopes": config.get("scopes"),
|
"credentials_scopes": config.get("scopes"),
|
||||||
|
"is_auto_credential": True,
|
||||||
|
"input_field_name": info["field_name"],
|
||||||
}
|
}
|
||||||
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
result[kwarg_name] = CredentialsFieldInfo.model_validate(
|
||||||
auto_schema, by_alias=True
|
auto_schema, by_alias=True
|
||||||
|
|||||||
@@ -36,12 +36,14 @@ from backend.blocks.replicate.replicate_block import ReplicateModelBlock
|
|||||||
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
from backend.blocks.smart_decision_maker import SmartDecisionMakerBlock
|
||||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||||
|
from backend.blocks.video.narration import VideoNarrationBlock
|
||||||
from backend.data.block import Block, BlockCost, BlockCostType
|
from backend.data.block import Block, BlockCost, BlockCostType
|
||||||
from backend.integrations.credentials_store import (
|
from backend.integrations.credentials_store import (
|
||||||
aiml_api_credentials,
|
aiml_api_credentials,
|
||||||
anthropic_credentials,
|
anthropic_credentials,
|
||||||
apollo_credentials,
|
apollo_credentials,
|
||||||
did_credentials,
|
did_credentials,
|
||||||
|
elevenlabs_credentials,
|
||||||
enrichlayer_credentials,
|
enrichlayer_credentials,
|
||||||
groq_credentials,
|
groq_credentials,
|
||||||
ideogram_credentials,
|
ideogram_credentials,
|
||||||
@@ -78,6 +80,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
|||||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||||
LlmModel.CLAUDE_4_OPUS: 21,
|
LlmModel.CLAUDE_4_OPUS: 21,
|
||||||
LlmModel.CLAUDE_4_SONNET: 5,
|
LlmModel.CLAUDE_4_SONNET: 5,
|
||||||
|
LlmModel.CLAUDE_4_6_OPUS: 14,
|
||||||
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
LlmModel.CLAUDE_4_5_HAIKU: 4,
|
||||||
LlmModel.CLAUDE_4_5_OPUS: 14,
|
LlmModel.CLAUDE_4_5_OPUS: 14,
|
||||||
LlmModel.CLAUDE_4_5_SONNET: 9,
|
LlmModel.CLAUDE_4_5_SONNET: 9,
|
||||||
@@ -639,4 +642,16 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
|
VideoNarrationBlock: [
|
||||||
|
BlockCost(
|
||||||
|
cost_amount=5, # ElevenLabs TTS cost
|
||||||
|
cost_filter={
|
||||||
|
"credentials": {
|
||||||
|
"id": elevenlabs_credentials.id,
|
||||||
|
"provider": elevenlabs_credentials.provider,
|
||||||
|
"type": elevenlabs_credentials.type,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -134,6 +134,16 @@ async def test_block_credit_reset(server: SpinTestServer):
|
|||||||
month1 = datetime.now(timezone.utc).replace(month=1, day=1)
|
month1 = datetime.now(timezone.utc).replace(month=1, day=1)
|
||||||
user_credit.time_now = lambda: month1
|
user_credit.time_now = lambda: month1
|
||||||
|
|
||||||
|
# IMPORTANT: Set updatedAt to December of previous year to ensure it's
|
||||||
|
# in a different month than month1 (January). This fixes a timing bug
|
||||||
|
# where if the test runs in early February, 35 days ago would be January,
|
||||||
|
# matching the mocked month1 and preventing the refill from triggering.
|
||||||
|
dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15)
|
||||||
|
await UserBalance.prisma().update(
|
||||||
|
where={"userId": DEFAULT_USER_ID},
|
||||||
|
data={"updatedAt": dec_previous_year},
|
||||||
|
)
|
||||||
|
|
||||||
# First call in month 1 should trigger refill
|
# First call in month 1 should trigger refill
|
||||||
balance = await user_credit.get_credits(DEFAULT_USER_ID)
|
balance = await user_credit.get_credits(DEFAULT_USER_ID)
|
||||||
assert balance == REFILL_VALUE # Should get 1000 credits
|
assert balance == REFILL_VALUE # Should get 1000 credits
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import logging
|
|||||||
import uuid
|
import uuid
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
|
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, Self, cast
|
||||||
|
|
||||||
from prisma.enums import SubmissionStatus
|
from prisma.enums import SubmissionStatus
|
||||||
from prisma.models import (
|
from prisma.models import (
|
||||||
@@ -20,7 +20,7 @@ from prisma.types import (
|
|||||||
AgentNodeLinkCreateInput,
|
AgentNodeLinkCreateInput,
|
||||||
StoreListingVersionWhereInput,
|
StoreListingVersionWhereInput,
|
||||||
)
|
)
|
||||||
from pydantic import BaseModel, BeforeValidator, Field, create_model
|
from pydantic import BaseModel, BeforeValidator, Field
|
||||||
from pydantic.fields import computed_field
|
from pydantic.fields import computed_field
|
||||||
|
|
||||||
from backend.blocks.agent import AgentExecutorBlock
|
from backend.blocks.agent import AgentExecutorBlock
|
||||||
@@ -30,7 +30,6 @@ from backend.data.db import prisma as db
|
|||||||
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
|
from backend.data.dynamic_fields import is_tool_pin, sanitize_pin_name
|
||||||
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
|
from backend.data.includes import MAX_GRAPH_VERSIONS_FETCH
|
||||||
from backend.data.model import (
|
from backend.data.model import (
|
||||||
CredentialsField,
|
|
||||||
CredentialsFieldInfo,
|
CredentialsFieldInfo,
|
||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
is_credentials_field_name,
|
is_credentials_field_name,
|
||||||
@@ -45,7 +44,6 @@ from .block import (
|
|||||||
AnyBlockSchema,
|
AnyBlockSchema,
|
||||||
Block,
|
Block,
|
||||||
BlockInput,
|
BlockInput,
|
||||||
BlockSchema,
|
|
||||||
BlockType,
|
BlockType,
|
||||||
EmptySchema,
|
EmptySchema,
|
||||||
get_block,
|
get_block,
|
||||||
@@ -113,10 +111,12 @@ class Link(BaseDbModel):
|
|||||||
|
|
||||||
class Node(BaseDbModel):
|
class Node(BaseDbModel):
|
||||||
block_id: str
|
block_id: str
|
||||||
input_default: BlockInput = {} # dict[input_name, default_value]
|
input_default: BlockInput = Field( # dict[input_name, default_value]
|
||||||
metadata: dict[str, Any] = {}
|
default_factory=dict
|
||||||
input_links: list[Link] = []
|
)
|
||||||
output_links: list[Link] = []
|
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||||
|
input_links: list[Link] = Field(default_factory=list)
|
||||||
|
output_links: list[Link] = Field(default_factory=list)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def credentials_optional(self) -> bool:
|
def credentials_optional(self) -> bool:
|
||||||
@@ -221,18 +221,33 @@ class NodeModel(Node):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class BaseGraph(BaseDbModel):
|
class GraphBaseMeta(BaseDbModel):
|
||||||
|
"""
|
||||||
|
Shared base for `GraphMeta` and `BaseGraph`, with core graph metadata fields.
|
||||||
|
"""
|
||||||
|
|
||||||
version: int = 1
|
version: int = 1
|
||||||
is_active: bool = True
|
is_active: bool = True
|
||||||
name: str
|
name: str
|
||||||
description: str
|
description: str
|
||||||
instructions: str | None = None
|
instructions: str | None = None
|
||||||
recommended_schedule_cron: str | None = None
|
recommended_schedule_cron: str | None = None
|
||||||
nodes: list[Node] = []
|
|
||||||
links: list[Link] = []
|
|
||||||
forked_from_id: str | None = None
|
forked_from_id: str | None = None
|
||||||
forked_from_version: int | None = None
|
forked_from_version: int | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class BaseGraph(GraphBaseMeta):
|
||||||
|
"""
|
||||||
|
Graph with nodes, links, and computed I/O schema fields.
|
||||||
|
|
||||||
|
Used to represent sub-graphs within a `Graph`. Contains the full graph
|
||||||
|
structure including nodes and links, plus computed fields for schemas
|
||||||
|
and trigger info. Does NOT include user_id or created_at (see GraphModel).
|
||||||
|
"""
|
||||||
|
|
||||||
|
nodes: list[Node] = Field(default_factory=list)
|
||||||
|
links: list[Link] = Field(default_factory=list)
|
||||||
|
|
||||||
@computed_field
|
@computed_field
|
||||||
@property
|
@property
|
||||||
def input_schema(self) -> dict[str, Any]:
|
def input_schema(self) -> dict[str, Any]:
|
||||||
@@ -361,44 +376,78 @@ class GraphTriggerInfo(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class Graph(BaseGraph):
|
class Graph(BaseGraph):
|
||||||
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs
|
"""Creatable graph model used in API create/update endpoints."""
|
||||||
|
|
||||||
|
sub_graphs: list[BaseGraph] = Field(default_factory=list) # Flattened sub-graphs
|
||||||
|
|
||||||
|
|
||||||
|
class GraphMeta(GraphBaseMeta):
|
||||||
|
"""
|
||||||
|
Lightweight graph metadata model representing an existing graph from the database,
|
||||||
|
for use in listings and summaries.
|
||||||
|
|
||||||
|
Lacks `GraphModel`'s nodes, links, and expensive computed fields.
|
||||||
|
Use for list endpoints where full graph data is not needed and performance matters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
id: str # type: ignore
|
||||||
|
version: int # type: ignore
|
||||||
|
user_id: str
|
||||||
|
created_at: datetime
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_db(cls, graph: "AgentGraph") -> Self:
|
||||||
|
return cls(
|
||||||
|
id=graph.id,
|
||||||
|
version=graph.version,
|
||||||
|
is_active=graph.isActive,
|
||||||
|
name=graph.name or "",
|
||||||
|
description=graph.description or "",
|
||||||
|
instructions=graph.instructions,
|
||||||
|
recommended_schedule_cron=graph.recommendedScheduleCron,
|
||||||
|
forked_from_id=graph.forkedFromId,
|
||||||
|
forked_from_version=graph.forkedFromVersion,
|
||||||
|
user_id=graph.userId,
|
||||||
|
created_at=graph.createdAt,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GraphModel(Graph, GraphMeta):
|
||||||
|
"""
|
||||||
|
Full graph model representing an existing graph from the database.
|
||||||
|
|
||||||
|
This is the primary model for working with persisted graphs. Includes all
|
||||||
|
graph data (nodes, links, sub_graphs) plus user ownership and timestamps.
|
||||||
|
Provides computed fields (input_schema, output_schema, etc.) used during
|
||||||
|
set-up (frontend) and execution (backend).
|
||||||
|
|
||||||
|
Inherits from:
|
||||||
|
- `Graph`: provides structure (nodes, links, sub_graphs) and computed schemas
|
||||||
|
- `GraphMeta`: provides user_id, created_at for database records
|
||||||
|
"""
|
||||||
|
|
||||||
|
nodes: list[NodeModel] = Field(default_factory=list) # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def starting_nodes(self) -> list[NodeModel]:
|
||||||
|
outbound_nodes = {link.sink_id for link in self.links}
|
||||||
|
input_nodes = {
|
||||||
|
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
|
||||||
|
}
|
||||||
|
return [
|
||||||
|
node
|
||||||
|
for node in self.nodes
|
||||||
|
if node.id not in outbound_nodes or node.id in input_nodes
|
||||||
|
]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
||||||
|
return cast(NodeModel, super().webhook_input_node)
|
||||||
|
|
||||||
@computed_field
|
@computed_field
|
||||||
@property
|
@property
|
||||||
def credentials_input_schema(self) -> dict[str, Any]:
|
def credentials_input_schema(self) -> dict[str, Any]:
|
||||||
schema = self._credentials_input_schema.jsonschema()
|
graph_credentials_inputs = self.regular_credentials_inputs
|
||||||
|
|
||||||
# Determine which credential fields are required based on credentials_optional metadata
|
|
||||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
|
||||||
required_fields = []
|
|
||||||
|
|
||||||
# Build a map of node_id -> node for quick lookup
|
|
||||||
all_nodes = {node.id: node for node in self.nodes}
|
|
||||||
for sub_graph in self.sub_graphs:
|
|
||||||
for node in sub_graph.nodes:
|
|
||||||
all_nodes[node.id] = node
|
|
||||||
|
|
||||||
for field_key, (
|
|
||||||
_field_info,
|
|
||||||
node_field_pairs,
|
|
||||||
) in graph_credentials_inputs.items():
|
|
||||||
# A field is required if ANY node using it has credentials_optional=False
|
|
||||||
is_required = False
|
|
||||||
for node_id, _field_name in node_field_pairs:
|
|
||||||
node = all_nodes.get(node_id)
|
|
||||||
if node and not node.credentials_optional:
|
|
||||||
is_required = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if is_required:
|
|
||||||
required_fields.append(field_key)
|
|
||||||
|
|
||||||
schema["required"] = required_fields
|
|
||||||
return schema
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _credentials_input_schema(self) -> type[BlockSchema]:
|
|
||||||
graph_credentials_inputs = self.aggregate_credentials_inputs()
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
|
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
|
||||||
f"{graph_credentials_inputs}"
|
f"{graph_credentials_inputs}"
|
||||||
@@ -406,8 +455,8 @@ class Graph(BaseGraph):
|
|||||||
|
|
||||||
# Warn if same-provider credentials inputs can't be combined (= bad UX)
|
# Warn if same-provider credentials inputs can't be combined (= bad UX)
|
||||||
graph_cred_fields = list(graph_credentials_inputs.values())
|
graph_cred_fields = list(graph_credentials_inputs.values())
|
||||||
for i, (field, keys) in enumerate(graph_cred_fields):
|
for i, (field, keys, _) in enumerate(graph_cred_fields):
|
||||||
for other_field, other_keys in list(graph_cred_fields)[i + 1 :]:
|
for other_field, other_keys, _ in list(graph_cred_fields)[i + 1 :]:
|
||||||
if field.provider != other_field.provider:
|
if field.provider != other_field.provider:
|
||||||
continue
|
continue
|
||||||
if ProviderName.HTTP in field.provider:
|
if ProviderName.HTTP in field.provider:
|
||||||
@@ -423,31 +472,78 @@ class Graph(BaseGraph):
|
|||||||
f"keys: {keys} <> {other_keys}."
|
f"keys: {keys} <> {other_keys}."
|
||||||
)
|
)
|
||||||
|
|
||||||
fields: dict[str, tuple[type[CredentialsMetaInput], CredentialsMetaInput]] = {
|
# Build JSON schema directly to avoid expensive create_model + validation overhead
|
||||||
agg_field_key: (
|
properties = {}
|
||||||
CredentialsMetaInput[
|
required_fields = []
|
||||||
Literal[tuple(field_info.provider)], # type: ignore
|
|
||||||
Literal[tuple(field_info.supported_types)], # type: ignore
|
for agg_field_key, (
|
||||||
],
|
field_info,
|
||||||
CredentialsField(
|
_,
|
||||||
required_scopes=set(field_info.required_scopes or []),
|
is_required,
|
||||||
discriminator=field_info.discriminator,
|
) in graph_credentials_inputs.items():
|
||||||
discriminator_mapping=field_info.discriminator_mapping,
|
providers = list(field_info.provider)
|
||||||
discriminator_values=field_info.discriminator_values,
|
cred_types = list(field_info.supported_types)
|
||||||
|
|
||||||
|
field_schema: dict[str, Any] = {
|
||||||
|
"credentials_provider": providers,
|
||||||
|
"credentials_types": cred_types,
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": {"title": "Id", "type": "string"},
|
||||||
|
"title": {
|
||||||
|
"anyOf": [{"type": "string"}, {"type": "null"}],
|
||||||
|
"default": None,
|
||||||
|
"title": "Title",
|
||||||
|
},
|
||||||
|
"provider": {
|
||||||
|
"title": "Provider",
|
||||||
|
"type": "string",
|
||||||
|
**(
|
||||||
|
{"enum": providers}
|
||||||
|
if len(providers) > 1
|
||||||
|
else {"const": providers[0]}
|
||||||
),
|
),
|
||||||
)
|
},
|
||||||
for agg_field_key, (field_info, _) in graph_credentials_inputs.items()
|
"type": {
|
||||||
|
"title": "Type",
|
||||||
|
"type": "string",
|
||||||
|
**(
|
||||||
|
{"enum": cred_types}
|
||||||
|
if len(cred_types) > 1
|
||||||
|
else {"const": cred_types[0]}
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["id", "provider", "type"],
|
||||||
}
|
}
|
||||||
|
|
||||||
return create_model(
|
# Add other (optional) field info items
|
||||||
self.name.replace(" ", "") + "CredentialsInputSchema",
|
field_schema.update(
|
||||||
__base__=BlockSchema,
|
field_info.model_dump(
|
||||||
**fields, # type: ignore
|
by_alias=True,
|
||||||
|
exclude_defaults=True,
|
||||||
|
exclude={"provider", "supported_types"}, # already included above
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure field schema is well-formed
|
||||||
|
CredentialsMetaInput.validate_credentials_field_schema(
|
||||||
|
field_schema, agg_field_key
|
||||||
|
)
|
||||||
|
|
||||||
|
properties[agg_field_key] = field_schema
|
||||||
|
if is_required:
|
||||||
|
required_fields.append(agg_field_key)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "object",
|
||||||
|
"properties": properties,
|
||||||
|
"required": required_fields,
|
||||||
|
}
|
||||||
|
|
||||||
def aggregate_credentials_inputs(
|
def aggregate_credentials_inputs(
|
||||||
self,
|
self,
|
||||||
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]]]]:
|
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||||
"""
|
"""
|
||||||
Returns:
|
Returns:
|
||||||
dict[aggregated_field_key, tuple(
|
dict[aggregated_field_key, tuple(
|
||||||
@@ -455,13 +551,19 @@ class Graph(BaseGraph):
|
|||||||
(now includes discriminator_values from matching nodes)
|
(now includes discriminator_values from matching nodes)
|
||||||
set[(node_id, field_name)]: Node credentials fields that are
|
set[(node_id, field_name)]: Node credentials fields that are
|
||||||
compatible with this aggregated field spec
|
compatible with this aggregated field spec
|
||||||
|
bool: True if the field is required (any node has credentials_optional=False)
|
||||||
)]
|
)]
|
||||||
"""
|
"""
|
||||||
# First collect all credential field data with input defaults
|
# First collect all credential field data with input defaults
|
||||||
node_credential_data = []
|
# Track (field_info, (node_id, field_name), is_required) for each credential field
|
||||||
|
node_credential_data: list[tuple[CredentialsFieldInfo, tuple[str, str]]] = []
|
||||||
|
node_required_map: dict[str, bool] = {} # node_id -> is_required
|
||||||
|
|
||||||
for graph in [self] + self.sub_graphs:
|
for graph in [self] + self.sub_graphs:
|
||||||
for node in graph.nodes:
|
for node in graph.nodes:
|
||||||
|
# Track if this node requires credentials (credentials_optional=False means required)
|
||||||
|
node_required_map[node.id] = not node.credentials_optional
|
||||||
|
|
||||||
for (
|
for (
|
||||||
field_name,
|
field_name,
|
||||||
field_info,
|
field_info,
|
||||||
@@ -485,37 +587,43 @@ class Graph(BaseGraph):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Combine credential field info (this will merge discriminator_values automatically)
|
# Combine credential field info (this will merge discriminator_values automatically)
|
||||||
return CredentialsFieldInfo.combine(*node_credential_data)
|
combined = CredentialsFieldInfo.combine(*node_credential_data)
|
||||||
|
|
||||||
|
# Add is_required flag to each aggregated field
|
||||||
class GraphModel(Graph):
|
# A field is required if ANY node using it has credentials_optional=False
|
||||||
user_id: str
|
return {
|
||||||
nodes: list[NodeModel] = [] # type: ignore
|
key: (
|
||||||
|
field_info,
|
||||||
created_at: datetime
|
node_field_pairs,
|
||||||
|
any(
|
||||||
@property
|
node_required_map.get(node_id, True)
|
||||||
def starting_nodes(self) -> list[NodeModel]:
|
for node_id, _ in node_field_pairs
|
||||||
outbound_nodes = {link.sink_id for link in self.links}
|
),
|
||||||
input_nodes = {
|
)
|
||||||
node.id for node in self.nodes if node.block.block_type == BlockType.INPUT
|
for key, (field_info, node_field_pairs) in combined.items()
|
||||||
}
|
}
|
||||||
return [
|
|
||||||
node
|
|
||||||
for node in self.nodes
|
|
||||||
if node.id not in outbound_nodes or node.id in input_nodes
|
|
||||||
]
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def webhook_input_node(self) -> NodeModel | None: # type: ignore
|
def regular_credentials_inputs(
|
||||||
return cast(NodeModel, super().webhook_input_node)
|
self,
|
||||||
|
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||||
|
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
|
||||||
|
return {
|
||||||
|
k: v
|
||||||
|
for k, v in self.aggregate_credentials_inputs().items()
|
||||||
|
if not v[0].is_auto_credential
|
||||||
|
}
|
||||||
|
|
||||||
def meta(self) -> "GraphMeta":
|
@property
|
||||||
"""
|
def auto_credentials_inputs(
|
||||||
Returns a GraphMeta object with metadata about the graph.
|
self,
|
||||||
This is used to return metadata about the graph without exposing nodes and links.
|
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
|
||||||
"""
|
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
|
||||||
return GraphMeta.from_graph(self)
|
return {
|
||||||
|
k: v
|
||||||
|
for k, v in self.aggregate_credentials_inputs().items()
|
||||||
|
if v[0].is_auto_credential
|
||||||
|
}
|
||||||
|
|
||||||
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
||||||
"""
|
"""
|
||||||
@@ -567,6 +675,16 @@ class GraphModel(Graph):
|
|||||||
) and graph_id in graph_id_map:
|
) and graph_id in graph_id_map:
|
||||||
node.input_default["graph_id"] = graph_id_map[graph_id]
|
node.input_default["graph_id"] = graph_id_map[graph_id]
|
||||||
|
|
||||||
|
# Clear auto-credentials references (e.g., _credentials_id in
|
||||||
|
# GoogleDriveFile fields) so the new user must re-authenticate
|
||||||
|
# with their own account
|
||||||
|
for node in graph.nodes:
|
||||||
|
if not node.input_default:
|
||||||
|
continue
|
||||||
|
for key, value in node.input_default.items():
|
||||||
|
if isinstance(value, dict) and "_credentials_id" in value:
|
||||||
|
del value["_credentials_id"]
|
||||||
|
|
||||||
def validate_graph(
|
def validate_graph(
|
||||||
self,
|
self,
|
||||||
for_run: bool = False,
|
for_run: bool = False,
|
||||||
@@ -799,13 +917,14 @@ class GraphModel(Graph):
|
|||||||
if is_static_output_block(link.source_id):
|
if is_static_output_block(link.source_id):
|
||||||
link.is_static = True # Each value block output should be static.
|
link.is_static = True # Each value block output should be static.
|
||||||
|
|
||||||
@staticmethod
|
@classmethod
|
||||||
def from_db(
|
def from_db( # type: ignore[reportIncompatibleMethodOverride]
|
||||||
|
cls,
|
||||||
graph: AgentGraph,
|
graph: AgentGraph,
|
||||||
for_export: bool = False,
|
for_export: bool = False,
|
||||||
sub_graphs: list[AgentGraph] | None = None,
|
sub_graphs: list[AgentGraph] | None = None,
|
||||||
) -> "GraphModel":
|
) -> Self:
|
||||||
return GraphModel(
|
return cls(
|
||||||
id=graph.id,
|
id=graph.id,
|
||||||
user_id=graph.userId if not for_export else "",
|
user_id=graph.userId if not for_export else "",
|
||||||
version=graph.version,
|
version=graph.version,
|
||||||
@@ -831,17 +950,28 @@ class GraphModel(Graph):
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def hide_nodes(self) -> "GraphModelWithoutNodes":
|
||||||
|
"""
|
||||||
|
Returns a copy of the `GraphModel` with nodes, links, and sub-graphs hidden
|
||||||
|
(excluded from serialization). They are still present in the model instance
|
||||||
|
so all computed fields (e.g. `credentials_input_schema`) still work.
|
||||||
|
"""
|
||||||
|
return GraphModelWithoutNodes.model_validate(self, from_attributes=True)
|
||||||
|
|
||||||
class GraphMeta(Graph):
|
|
||||||
user_id: str
|
|
||||||
|
|
||||||
# Easy work-around to prevent exposing nodes and links in the API response
|
class GraphModelWithoutNodes(GraphModel):
|
||||||
nodes: list[NodeModel] = Field(default=[], exclude=True) # type: ignore
|
"""
|
||||||
links: list[Link] = Field(default=[], exclude=True)
|
GraphModel variant that excludes nodes, links, and sub-graphs from serialization.
|
||||||
|
|
||||||
@staticmethod
|
Used in contexts like the store where exposing internal graph structure
|
||||||
def from_graph(graph: GraphModel) -> "GraphMeta":
|
is not desired. Inherits all computed fields from GraphModel but marks
|
||||||
return GraphMeta(**graph.model_dump())
|
nodes and links as excluded from JSON output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
nodes: list[NodeModel] = Field(default_factory=list, exclude=True)
|
||||||
|
links: list[Link] = Field(default_factory=list, exclude=True)
|
||||||
|
|
||||||
|
sub_graphs: list[BaseGraph] = Field(default_factory=list, exclude=True)
|
||||||
|
|
||||||
|
|
||||||
class GraphsPaginated(BaseModel):
|
class GraphsPaginated(BaseModel):
|
||||||
@@ -912,21 +1042,11 @@ async def list_graphs_paginated(
|
|||||||
where=where_clause,
|
where=where_clause,
|
||||||
distinct=["id"],
|
distinct=["id"],
|
||||||
order={"version": "desc"},
|
order={"version": "desc"},
|
||||||
include=AGENT_GRAPH_INCLUDE,
|
|
||||||
skip=offset,
|
skip=offset,
|
||||||
take=page_size,
|
take=page_size,
|
||||||
)
|
)
|
||||||
|
|
||||||
graph_models: list[GraphMeta] = []
|
graph_models = [GraphMeta.from_db(graph) for graph in graphs]
|
||||||
for graph in graphs:
|
|
||||||
try:
|
|
||||||
graph_meta = GraphModel.from_db(graph).meta()
|
|
||||||
# Trigger serialization to validate that the graph is well formed
|
|
||||||
graph_meta.model_dump()
|
|
||||||
graph_models.append(graph_meta)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error processing graph {graph.id}: {e}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
return GraphsPaginated(
|
return GraphsPaginated(
|
||||||
graphs=graph_models,
|
graphs=graph_models,
|
||||||
|
|||||||
@@ -463,3 +463,328 @@ def test_node_credentials_optional_with_other_metadata():
|
|||||||
assert node.credentials_optional is True
|
assert node.credentials_optional is True
|
||||||
assert node.metadata["position"] == {"x": 100, "y": 200}
|
assert node.metadata["position"] == {"x": 100, "y": 200}
|
||||||
assert node.metadata["customized_name"] == "My Custom Node"
|
assert node.metadata["customized_name"] == "My Custom Node"
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
|
||||||
|
def test_combine_preserves_is_auto_credential_flag():
|
||||||
|
"""
|
||||||
|
CredentialsFieldInfo.combine() must propagate is_auto_credential and
|
||||||
|
input_field_name to the combined result. Regression test for reviewer
|
||||||
|
finding that combine() dropped these fields.
|
||||||
|
"""
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
auto_field = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["google"],
|
||||||
|
"credentials_types": ["oauth2"],
|
||||||
|
"credentials_scopes": ["drive.readonly"],
|
||||||
|
"is_auto_credential": True,
|
||||||
|
"input_field_name": "spreadsheet",
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# combine() takes *args of (field_info, key) tuples
|
||||||
|
combined = CredentialsFieldInfo.combine(
|
||||||
|
(auto_field, ("node-1", "credentials")),
|
||||||
|
(auto_field, ("node-2", "credentials")),
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(combined) == 1
|
||||||
|
group_key = next(iter(combined))
|
||||||
|
combined_info, combined_keys = combined[group_key]
|
||||||
|
|
||||||
|
assert combined_info.is_auto_credential is True
|
||||||
|
assert combined_info.input_field_name == "spreadsheet"
|
||||||
|
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
|
||||||
|
|
||||||
|
|
||||||
|
def test_combine_preserves_regular_credential_defaults():
|
||||||
|
"""Regular credentials should have is_auto_credential=False after combine()."""
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
regular_field = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["github"],
|
||||||
|
"credentials_types": ["api_key"],
|
||||||
|
"is_auto_credential": False,
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
combined = CredentialsFieldInfo.combine(
|
||||||
|
(regular_field, ("node-1", "credentials")),
|
||||||
|
)
|
||||||
|
|
||||||
|
group_key = next(iter(combined))
|
||||||
|
combined_info, _ = combined[group_key]
|
||||||
|
|
||||||
|
assert combined_info.is_auto_credential is False
|
||||||
|
assert combined_info.input_field_name is None
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def test_reassign_ids_clears_credentials_id():
|
||||||
|
"""
|
||||||
|
[SECRT-1772] _reassign_ids should clear _credentials_id from
|
||||||
|
GoogleDriveFile-style input_default fields so forked agents
|
||||||
|
don't retain the original creator's credential references.
|
||||||
|
"""
|
||||||
|
from backend.data.graph import GraphModel
|
||||||
|
|
||||||
|
node = Node(
|
||||||
|
id="node-1",
|
||||||
|
block_id=StoreValueBlock().id,
|
||||||
|
input_default={
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "original-cred-id",
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||||
|
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
graph = Graph(
|
||||||
|
id="test-graph",
|
||||||
|
name="Test",
|
||||||
|
description="Test",
|
||||||
|
nodes=[node],
|
||||||
|
links=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||||
|
|
||||||
|
# _credentials_id key should be removed (not set to None) so that
|
||||||
|
# _acquire_auto_credentials correctly errors instead of treating it as chained data
|
||||||
|
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_reassign_ids_preserves_non_credential_fields():
|
||||||
|
"""
|
||||||
|
Regression guard: _reassign_ids should NOT modify non-credential fields
|
||||||
|
like name, mimeType, id, url.
|
||||||
|
"""
|
||||||
|
from backend.data.graph import GraphModel
|
||||||
|
|
||||||
|
node = Node(
|
||||||
|
id="node-1",
|
||||||
|
block_id=StoreValueBlock().id,
|
||||||
|
input_default={
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "cred-abc",
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||||
|
"url": "https://docs.google.com/spreadsheets/d/file-123",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
graph = Graph(
|
||||||
|
id="test-graph",
|
||||||
|
name="Test",
|
||||||
|
description="Test",
|
||||||
|
nodes=[node],
|
||||||
|
links=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||||
|
|
||||||
|
field = graph.nodes[0].input_default["spreadsheet"]
|
||||||
|
assert field["id"] == "file-123"
|
||||||
|
assert field["name"] == "test.xlsx"
|
||||||
|
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
|
||||||
|
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
|
||||||
|
|
||||||
|
|
||||||
|
def test_reassign_ids_handles_no_credentials():
|
||||||
|
"""
|
||||||
|
Regression guard: _reassign_ids should not error when input_default
|
||||||
|
has no dict fields with _credentials_id.
|
||||||
|
"""
|
||||||
|
from backend.data.graph import GraphModel
|
||||||
|
|
||||||
|
node = Node(
|
||||||
|
id="node-1",
|
||||||
|
block_id=StoreValueBlock().id,
|
||||||
|
input_default={
|
||||||
|
"input": "some value",
|
||||||
|
"another_input": 42,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
graph = Graph(
|
||||||
|
id="test-graph",
|
||||||
|
name="Test",
|
||||||
|
description="Test",
|
||||||
|
nodes=[node],
|
||||||
|
links=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||||
|
|
||||||
|
# Should not error, fields unchanged
|
||||||
|
assert graph.nodes[0].input_default["input"] == "some value"
|
||||||
|
assert graph.nodes[0].input_default["another_input"] == 42
|
||||||
|
|
||||||
|
|
||||||
|
def test_reassign_ids_handles_multiple_credential_fields():
|
||||||
|
"""
|
||||||
|
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
|
||||||
|
ALL of them should be cleared.
|
||||||
|
"""
|
||||||
|
from backend.data.graph import GraphModel
|
||||||
|
|
||||||
|
node = Node(
|
||||||
|
id="node-1",
|
||||||
|
block_id=StoreValueBlock().id,
|
||||||
|
input_default={
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "cred-1",
|
||||||
|
"id": "file-1",
|
||||||
|
"name": "file1.xlsx",
|
||||||
|
},
|
||||||
|
"doc_file": {
|
||||||
|
"_credentials_id": "cred-2",
|
||||||
|
"id": "file-2",
|
||||||
|
"name": "file2.docx",
|
||||||
|
},
|
||||||
|
"plain_input": "not a dict",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
graph = Graph(
|
||||||
|
id="test-graph",
|
||||||
|
name="Test",
|
||||||
|
description="Test",
|
||||||
|
nodes=[node],
|
||||||
|
links=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
|
||||||
|
|
||||||
|
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
|
||||||
|
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
|
||||||
|
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests for discriminate() field propagation
|
||||||
|
def test_discriminate_preserves_is_auto_credential_flag():
|
||||||
|
"""
|
||||||
|
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
|
||||||
|
input_field_name to the discriminated result. Regression test for
|
||||||
|
discriminate() dropping these fields (same class of bug as combine()).
|
||||||
|
"""
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
auto_field = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["google", "openai"],
|
||||||
|
"credentials_types": ["oauth2"],
|
||||||
|
"credentials_scopes": ["drive.readonly"],
|
||||||
|
"is_auto_credential": True,
|
||||||
|
"input_field_name": "spreadsheet",
|
||||||
|
"discriminator": "model",
|
||||||
|
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
discriminated = auto_field.discriminate("gemini")
|
||||||
|
|
||||||
|
assert discriminated.is_auto_credential is True
|
||||||
|
assert discriminated.input_field_name == "spreadsheet"
|
||||||
|
assert discriminated.provider == frozenset(["google"])
|
||||||
|
|
||||||
|
|
||||||
|
def test_discriminate_preserves_regular_credential_defaults():
|
||||||
|
"""Regular credentials should have is_auto_credential=False after discriminate()."""
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
regular_field = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["google", "openai"],
|
||||||
|
"credentials_types": ["api_key"],
|
||||||
|
"is_auto_credential": False,
|
||||||
|
"discriminator": "model",
|
||||||
|
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
discriminated = regular_field.discriminate("gpt-4")
|
||||||
|
|
||||||
|
assert discriminated.is_auto_credential is False
|
||||||
|
assert discriminated.input_field_name is None
|
||||||
|
assert discriminated.provider == frozenset(["openai"])
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests for credentials_input_schema excluding auto_credentials
|
||||||
|
def test_credentials_input_schema_excludes_auto_creds():
|
||||||
|
"""
|
||||||
|
GraphModel.credentials_input_schema should exclude auto_credentials
|
||||||
|
(is_auto_credential=True) from the schema. Auto_credentials are
|
||||||
|
transparently resolved at execution time via file picker data.
|
||||||
|
"""
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from unittest.mock import PropertyMock, patch
|
||||||
|
|
||||||
|
from backend.data.graph import GraphModel, NodeModel
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
regular_field_info = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["github"],
|
||||||
|
"credentials_types": ["api_key"],
|
||||||
|
"is_auto_credential": False,
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
graph = GraphModel(
|
||||||
|
id="test-graph",
|
||||||
|
version=1,
|
||||||
|
name="Test",
|
||||||
|
description="Test",
|
||||||
|
user_id="test-user",
|
||||||
|
created_at=datetime.now(timezone.utc),
|
||||||
|
nodes=[
|
||||||
|
NodeModel(
|
||||||
|
id="node-1",
|
||||||
|
block_id=StoreValueBlock().id,
|
||||||
|
input_default={},
|
||||||
|
graph_id="test-graph",
|
||||||
|
graph_version=1,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
links=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
|
||||||
|
regular_only = {
|
||||||
|
"github_credentials": (
|
||||||
|
regular_field_info,
|
||||||
|
{("node-1", "credentials")},
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
with patch.object(
|
||||||
|
type(graph),
|
||||||
|
"regular_credentials_inputs",
|
||||||
|
new_callable=PropertyMock,
|
||||||
|
return_value=regular_only,
|
||||||
|
):
|
||||||
|
schema = graph.credentials_input_schema
|
||||||
|
field_names = set(schema.get("properties", {}).keys())
|
||||||
|
# Should include regular credential but NOT auto_credential
|
||||||
|
assert "github_credentials" in field_names
|
||||||
|
assert "google_credentials" not in field_names
|
||||||
|
|||||||
@@ -163,7 +163,6 @@ class User(BaseModel):
|
|||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from prisma.models import User as PrismaUser
|
from prisma.models import User as PrismaUser
|
||||||
|
|
||||||
from backend.data.block import BlockSchema
|
|
||||||
|
|
||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -508,15 +507,13 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
|||||||
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
|
def allowed_cred_types(cls) -> tuple[CredentialsType, ...]:
|
||||||
return get_args(cls.model_fields["type"].annotation)
|
return get_args(cls.model_fields["type"].annotation)
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def validate_credentials_field_schema(cls, model: type["BlockSchema"]):
|
def validate_credentials_field_schema(
|
||||||
|
field_schema: dict[str, Any], field_name: str
|
||||||
|
):
|
||||||
"""Validates the schema of a credentials input field"""
|
"""Validates the schema of a credentials input field"""
|
||||||
field_name = next(
|
|
||||||
name for name, type in model.get_credentials_fields().items() if type is cls
|
|
||||||
)
|
|
||||||
field_schema = model.jsonschema()["properties"][field_name]
|
|
||||||
try:
|
try:
|
||||||
schema_extra = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
|
field_info = CredentialsFieldInfo[CP, CT].model_validate(field_schema)
|
||||||
except ValidationError as e:
|
except ValidationError as e:
|
||||||
if "Field required [type=missing" not in str(e):
|
if "Field required [type=missing" not in str(e):
|
||||||
raise
|
raise
|
||||||
@@ -526,11 +523,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
|||||||
f"{field_schema}"
|
f"{field_schema}"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
providers = cls.allowed_providers()
|
providers = field_info.provider
|
||||||
if (
|
if (
|
||||||
providers is not None
|
providers is not None
|
||||||
and len(providers) > 1
|
and len(providers) > 1
|
||||||
and not schema_extra.discriminator
|
and not field_info.discriminator
|
||||||
):
|
):
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
f"Multi-provider CredentialsField '{field_name}' "
|
f"Multi-provider CredentialsField '{field_name}' "
|
||||||
@@ -574,6 +571,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
discriminator: Optional[str] = None
|
discriminator: Optional[str] = None
|
||||||
discriminator_mapping: Optional[dict[str, CP]] = None
|
discriminator_mapping: Optional[dict[str, CP]] = None
|
||||||
discriminator_values: set[Any] = Field(default_factory=set)
|
discriminator_values: set[Any] = Field(default_factory=set)
|
||||||
|
is_auto_credential: bool = False
|
||||||
|
input_field_name: Optional[str] = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def combine(
|
def combine(
|
||||||
@@ -654,6 +653,9 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
+ "_credentials"
|
+ "_credentials"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Propagate is_auto_credential from the combined field.
|
||||||
|
# All fields in a group should share the same is_auto_credential
|
||||||
|
# value since auto and regular credentials serve different purposes.
|
||||||
result[group_key] = (
|
result[group_key] = (
|
||||||
CredentialsFieldInfo[CP, CT](
|
CredentialsFieldInfo[CP, CT](
|
||||||
credentials_provider=combined.provider,
|
credentials_provider=combined.provider,
|
||||||
@@ -662,6 +664,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
discriminator=combined.discriminator,
|
discriminator=combined.discriminator,
|
||||||
discriminator_mapping=combined.discriminator_mapping,
|
discriminator_mapping=combined.discriminator_mapping,
|
||||||
discriminator_values=set(all_discriminator_values),
|
discriminator_values=set(all_discriminator_values),
|
||||||
|
is_auto_credential=combined.is_auto_credential,
|
||||||
|
input_field_name=combined.input_field_name,
|
||||||
),
|
),
|
||||||
combined_keys,
|
combined_keys,
|
||||||
)
|
)
|
||||||
@@ -687,6 +691,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
|
|||||||
discriminator=self.discriminator,
|
discriminator=self.discriminator,
|
||||||
discriminator_mapping=self.discriminator_mapping,
|
discriminator_mapping=self.discriminator_mapping,
|
||||||
discriminator_values=self.discriminator_values,
|
discriminator_values=self.discriminator_values,
|
||||||
|
is_auto_credential=self.is_auto_credential,
|
||||||
|
input_field_name=self.input_field_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -211,22 +211,6 @@ class AgentRejectionData(BaseNotificationData):
|
|||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
||||||
class WaitlistLaunchData(BaseNotificationData):
|
|
||||||
"""Notification data for when an agent from a waitlist is launched."""
|
|
||||||
|
|
||||||
agent_name: str
|
|
||||||
waitlist_name: str
|
|
||||||
store_url: str
|
|
||||||
launched_at: datetime
|
|
||||||
|
|
||||||
@field_validator("launched_at")
|
|
||||||
@classmethod
|
|
||||||
def validate_timezone(cls, value: datetime):
|
|
||||||
if value.tzinfo is None:
|
|
||||||
raise ValueError("datetime must have timezone information")
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
NotificationData = Annotated[
|
NotificationData = Annotated[
|
||||||
Union[
|
Union[
|
||||||
AgentRunData,
|
AgentRunData,
|
||||||
@@ -239,7 +223,6 @@ NotificationData = Annotated[
|
|||||||
DailySummaryData,
|
DailySummaryData,
|
||||||
RefundRequestData,
|
RefundRequestData,
|
||||||
BaseSummaryData,
|
BaseSummaryData,
|
||||||
WaitlistLaunchData,
|
|
||||||
],
|
],
|
||||||
Field(discriminator="type"),
|
Field(discriminator="type"),
|
||||||
]
|
]
|
||||||
@@ -290,7 +273,6 @@ def get_notif_data_type(
|
|||||||
NotificationType.REFUND_PROCESSED: RefundRequestData,
|
NotificationType.REFUND_PROCESSED: RefundRequestData,
|
||||||
NotificationType.AGENT_APPROVED: AgentApprovalData,
|
NotificationType.AGENT_APPROVED: AgentApprovalData,
|
||||||
NotificationType.AGENT_REJECTED: AgentRejectionData,
|
NotificationType.AGENT_REJECTED: AgentRejectionData,
|
||||||
NotificationType.WAITLIST_LAUNCH: WaitlistLaunchData,
|
|
||||||
}[notification_type]
|
}[notification_type]
|
||||||
|
|
||||||
|
|
||||||
@@ -336,7 +318,6 @@ class NotificationTypeOverride:
|
|||||||
NotificationType.REFUND_PROCESSED: QueueType.ADMIN,
|
NotificationType.REFUND_PROCESSED: QueueType.ADMIN,
|
||||||
NotificationType.AGENT_APPROVED: QueueType.IMMEDIATE,
|
NotificationType.AGENT_APPROVED: QueueType.IMMEDIATE,
|
||||||
NotificationType.AGENT_REJECTED: QueueType.IMMEDIATE,
|
NotificationType.AGENT_REJECTED: QueueType.IMMEDIATE,
|
||||||
NotificationType.WAITLIST_LAUNCH: QueueType.IMMEDIATE,
|
|
||||||
}
|
}
|
||||||
return BATCHING_RULES.get(self.notification_type, QueueType.IMMEDIATE)
|
return BATCHING_RULES.get(self.notification_type, QueueType.IMMEDIATE)
|
||||||
|
|
||||||
@@ -356,7 +337,6 @@ class NotificationTypeOverride:
|
|||||||
NotificationType.REFUND_PROCESSED: "refund_processed.html",
|
NotificationType.REFUND_PROCESSED: "refund_processed.html",
|
||||||
NotificationType.AGENT_APPROVED: "agent_approved.html",
|
NotificationType.AGENT_APPROVED: "agent_approved.html",
|
||||||
NotificationType.AGENT_REJECTED: "agent_rejected.html",
|
NotificationType.AGENT_REJECTED: "agent_rejected.html",
|
||||||
NotificationType.WAITLIST_LAUNCH: "waitlist_launch.html",
|
|
||||||
}[self.notification_type]
|
}[self.notification_type]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -374,7 +354,6 @@ class NotificationTypeOverride:
|
|||||||
NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed",
|
NotificationType.REFUND_PROCESSED: "Refund for ${{data.amount / 100}} to {{data.user_name}} has been processed",
|
||||||
NotificationType.AGENT_APPROVED: "🎉 Your agent '{{data.agent_name}}' has been approved!",
|
NotificationType.AGENT_APPROVED: "🎉 Your agent '{{data.agent_name}}' has been approved!",
|
||||||
NotificationType.AGENT_REJECTED: "Your agent '{{data.agent_name}}' needs some updates",
|
NotificationType.AGENT_REJECTED: "Your agent '{{data.agent_name}}' needs some updates",
|
||||||
NotificationType.WAITLIST_LAUNCH: "🚀 {{data.agent_name}} is now available!",
|
|
||||||
}[self.notification_type]
|
}[self.notification_type]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -172,6 +172,81 @@ def execute_graph(
|
|||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
|
async def _acquire_auto_credentials(
|
||||||
|
input_model: type[BlockSchema],
|
||||||
|
input_data: dict[str, Any],
|
||||||
|
creds_manager: "IntegrationCredentialsManager",
|
||||||
|
user_id: str,
|
||||||
|
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
|
||||||
|
"""
|
||||||
|
Resolve auto_credentials from GoogleDriveFileField-style inputs.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
|
||||||
|
credential locks to release after execution completes.
|
||||||
|
"""
|
||||||
|
extra_exec_kwargs: dict[str, Any] = {}
|
||||||
|
locks: list[AsyncRedisLock] = []
|
||||||
|
|
||||||
|
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
|
||||||
|
# on a later field will strand locks acquired for earlier fields. They'll
|
||||||
|
# auto-expire via Redis TTL, but add a try/except to release partial locks
|
||||||
|
# if that becomes a real scenario.
|
||||||
|
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
||||||
|
field_name = info["field_name"]
|
||||||
|
field_data = input_data.get(field_name)
|
||||||
|
|
||||||
|
if field_data and isinstance(field_data, dict):
|
||||||
|
# Check if _credentials_id key exists in the field data
|
||||||
|
if "_credentials_id" in field_data:
|
||||||
|
cred_id = field_data["_credentials_id"]
|
||||||
|
if cred_id:
|
||||||
|
# Credential ID provided - acquire credentials
|
||||||
|
provider = info.get("config", {}).get(
|
||||||
|
"provider", "external service"
|
||||||
|
)
|
||||||
|
file_name = field_data.get("name", "selected file")
|
||||||
|
try:
|
||||||
|
credentials, lock = await creds_manager.acquire(
|
||||||
|
user_id, cred_id
|
||||||
|
)
|
||||||
|
locks.append(lock)
|
||||||
|
extra_exec_kwargs[kwarg_name] = credentials
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError(
|
||||||
|
f"{provider.capitalize()} credentials for "
|
||||||
|
f"'{file_name}' in field '{field_name}' are not "
|
||||||
|
f"available in your account. "
|
||||||
|
f"This can happen if the agent was created by another "
|
||||||
|
f"user or the credentials were deleted. "
|
||||||
|
f"Please open the agent in the builder and re-select "
|
||||||
|
f"the file to authenticate with your own account."
|
||||||
|
)
|
||||||
|
# else: _credentials_id is explicitly None, skip (chained data)
|
||||||
|
else:
|
||||||
|
# _credentials_id key missing entirely - this is an error
|
||||||
|
provider = info.get("config", {}).get("provider", "external service")
|
||||||
|
file_name = field_data.get("name", "selected file")
|
||||||
|
raise ValueError(
|
||||||
|
f"Authentication missing for '{file_name}' in field "
|
||||||
|
f"'{field_name}'. Please re-select the file to authenticate "
|
||||||
|
f"with {provider.capitalize()}."
|
||||||
|
)
|
||||||
|
elif field_data is None and field_name not in input_data:
|
||||||
|
# Field not in input_data at all = connected from upstream block, skip
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# field_data is None/empty but key IS in input_data = user didn't select
|
||||||
|
provider = info.get("config", {}).get("provider", "external service")
|
||||||
|
raise ValueError(
|
||||||
|
f"No file selected for '{field_name}'. "
|
||||||
|
f"Please select a file to provide "
|
||||||
|
f"{provider.capitalize()} authentication."
|
||||||
|
)
|
||||||
|
|
||||||
|
return extra_exec_kwargs, locks
|
||||||
|
|
||||||
|
|
||||||
async def execute_node(
|
async def execute_node(
|
||||||
node: Node,
|
node: Node,
|
||||||
data: NodeExecutionEntry,
|
data: NodeExecutionEntry,
|
||||||
@@ -271,41 +346,14 @@ async def execute_node(
|
|||||||
extra_exec_kwargs[field_name] = credentials
|
extra_exec_kwargs[field_name] = credentials
|
||||||
|
|
||||||
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
|
||||||
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
|
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
|
||||||
field_name = info["field_name"]
|
input_model=input_model,
|
||||||
field_data = input_data.get(field_name)
|
input_data=input_data,
|
||||||
if field_data and isinstance(field_data, dict):
|
creds_manager=creds_manager,
|
||||||
# Check if _credentials_id key exists in the field data
|
user_id=user_id,
|
||||||
if "_credentials_id" in field_data:
|
|
||||||
cred_id = field_data["_credentials_id"]
|
|
||||||
if cred_id:
|
|
||||||
# Credential ID provided - acquire credentials
|
|
||||||
provider = info.get("config", {}).get(
|
|
||||||
"provider", "external service"
|
|
||||||
)
|
|
||||||
file_name = field_data.get("name", "selected file")
|
|
||||||
try:
|
|
||||||
credentials, lock = await creds_manager.acquire(
|
|
||||||
user_id, cred_id
|
|
||||||
)
|
|
||||||
creds_locks.append(lock)
|
|
||||||
extra_exec_kwargs[kwarg_name] = credentials
|
|
||||||
except ValueError:
|
|
||||||
# Credential was deleted or doesn't exist
|
|
||||||
raise ValueError(
|
|
||||||
f"Authentication expired for '{file_name}' in field '{field_name}'. "
|
|
||||||
f"The saved {provider.capitalize()} credentials no longer exist. "
|
|
||||||
f"Please re-select the file to re-authenticate."
|
|
||||||
)
|
|
||||||
# else: _credentials_id is explicitly None, skip credentials (for chained data)
|
|
||||||
else:
|
|
||||||
# _credentials_id key missing entirely - this is an error
|
|
||||||
provider = info.get("config", {}).get("provider", "external service")
|
|
||||||
file_name = field_data.get("name", "selected file")
|
|
||||||
raise ValueError(
|
|
||||||
f"Authentication missing for '{file_name}' in field '{field_name}'. "
|
|
||||||
f"Please re-select the file to authenticate with {provider.capitalize()}."
|
|
||||||
)
|
)
|
||||||
|
extra_exec_kwargs.update(auto_extra_kwargs)
|
||||||
|
creds_locks.extend(auto_locks)
|
||||||
|
|
||||||
output_size = 0
|
output_size = 0
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,320 @@
|
|||||||
|
"""
|
||||||
|
Tests for auto_credentials handling in execute_node().
|
||||||
|
|
||||||
|
These test the _acquire_auto_credentials() helper function extracted from
|
||||||
|
execute_node() (manager.py lines 273-308).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from pytest_mock import MockerFixture
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def google_drive_file_data():
|
||||||
|
return {
|
||||||
|
"valid": {
|
||||||
|
"_credentials_id": "cred-id-123",
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||||
|
},
|
||||||
|
"chained": {
|
||||||
|
"_credentials_id": None,
|
||||||
|
"id": "file-456",
|
||||||
|
"name": "chained.xlsx",
|
||||||
|
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||||
|
},
|
||||||
|
"missing_key": {
|
||||||
|
"id": "file-789",
|
||||||
|
"name": "bad.xlsx",
|
||||||
|
"mimeType": "application/vnd.google-apps.spreadsheet",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_input_model(mocker: MockerFixture):
|
||||||
|
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
|
||||||
|
input_model = mocker.MagicMock()
|
||||||
|
input_model.get_auto_credentials_fields.return_value = {
|
||||||
|
"credentials": {
|
||||||
|
"field_name": "spreadsheet",
|
||||||
|
"config": {
|
||||||
|
"provider": "google",
|
||||||
|
"type": "oauth2",
|
||||||
|
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return input_model
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_creds_manager(mocker: MockerFixture):
|
||||||
|
manager = mocker.AsyncMock()
|
||||||
|
mock_lock = mocker.AsyncMock()
|
||||||
|
mock_creds = mocker.MagicMock()
|
||||||
|
mock_creds.id = "cred-id-123"
|
||||||
|
mock_creds.provider = "google"
|
||||||
|
manager.acquire.return_value = (mock_creds, mock_lock)
|
||||||
|
return manager, mock_creds, mock_lock
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_happy_path(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
google_drive_file_data,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""When field_data has a valid _credentials_id, credentials should be acquired."""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, mock_creds, mock_lock = mock_creds_manager
|
||||||
|
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||||
|
|
||||||
|
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
||||||
|
assert extra_kwargs["credentials"] == mock_creds
|
||||||
|
assert mock_lock in locks
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_field_none_static_raises(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[THE BUG FIX TEST — OPEN-2895]
|
||||||
|
When field_data is None and the key IS in input_data (user didn't select a file),
|
||||||
|
should raise ValueError instead of silently skipping.
|
||||||
|
"""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, _ = mock_creds_manager
|
||||||
|
# Key is present but value is None = user didn't select a file
|
||||||
|
input_data = {"spreadsheet": None}
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="No file selected"):
|
||||||
|
await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_field_absent_skips(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
When the field key is NOT in input_data at all (upstream connection),
|
||||||
|
should skip without error.
|
||||||
|
"""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, _ = mock_creds_manager
|
||||||
|
# Key not present = connected from upstream block
|
||||||
|
input_data = {}
|
||||||
|
|
||||||
|
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
manager.acquire.assert_not_called()
|
||||||
|
assert "credentials" not in extra_kwargs
|
||||||
|
assert locks == []
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_chained_cred_id_none(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
google_drive_file_data,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
When _credentials_id is explicitly None (chained data from upstream),
|
||||||
|
should skip credential acquisition.
|
||||||
|
"""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, _ = mock_creds_manager
|
||||||
|
input_data = {"spreadsheet": google_drive_file_data["chained"]}
|
||||||
|
|
||||||
|
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
manager.acquire.assert_not_called()
|
||||||
|
assert "credentials" not in extra_kwargs
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_missing_cred_id_key_raises(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
google_drive_file_data,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
When _credentials_id key is missing entirely from field_data dict,
|
||||||
|
should raise ValueError.
|
||||||
|
"""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, _ = mock_creds_manager
|
||||||
|
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="Authentication missing"):
|
||||||
|
await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_ownership_mismatch_error(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
google_drive_file_data,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
|
||||||
|
the error message should mention 'not available' (not 'expired').
|
||||||
|
"""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, _ = mock_creds_manager
|
||||||
|
manager.acquire.side_effect = ValueError(
|
||||||
|
"Credentials #cred-id-123 for user #user-2 not found"
|
||||||
|
)
|
||||||
|
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="not available in your account"):
|
||||||
|
await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-2",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_deleted_credential_error(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
google_drive_file_data,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
|
||||||
|
the error message should mention 'not available' (not 'expired').
|
||||||
|
"""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, _ = mock_creds_manager
|
||||||
|
manager.acquire.side_effect = ValueError(
|
||||||
|
"Credentials #cred-id-123 for user #user-1 not found"
|
||||||
|
)
|
||||||
|
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||||
|
|
||||||
|
with pytest.raises(ValueError, match="not available in your account"):
|
||||||
|
await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_lock_appended(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
google_drive_file_data,
|
||||||
|
mock_input_model,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""Lock from acquire() should be included in returned locks list."""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, _, mock_lock = mock_creds_manager
|
||||||
|
input_data = {"spreadsheet": google_drive_file_data["valid"]}
|
||||||
|
|
||||||
|
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||||
|
input_model=mock_input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert len(locks) == 1
|
||||||
|
assert locks[0] is mock_lock
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_auto_credentials_multiple_fields(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
mock_creds_manager,
|
||||||
|
):
|
||||||
|
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
|
||||||
|
from backend.executor.manager import _acquire_auto_credentials
|
||||||
|
|
||||||
|
manager, mock_creds, mock_lock = mock_creds_manager
|
||||||
|
|
||||||
|
input_model = mocker.MagicMock()
|
||||||
|
input_model.get_auto_credentials_fields.return_value = {
|
||||||
|
"credentials": {
|
||||||
|
"field_name": "spreadsheet",
|
||||||
|
"config": {"provider": "google", "type": "oauth2"},
|
||||||
|
},
|
||||||
|
"credentials2": {
|
||||||
|
"field_name": "doc_file",
|
||||||
|
"config": {"provider": "google", "type": "oauth2"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
input_data = {
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "cred-id-123",
|
||||||
|
"id": "file-1",
|
||||||
|
"name": "file1.xlsx",
|
||||||
|
},
|
||||||
|
"doc_file": {
|
||||||
|
"_credentials_id": None,
|
||||||
|
"id": "file-2",
|
||||||
|
"name": "chained.doc",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
extra_kwargs, locks = await _acquire_auto_credentials(
|
||||||
|
input_model=input_model,
|
||||||
|
input_data=input_data,
|
||||||
|
creds_manager=manager,
|
||||||
|
user_id="user-1",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only the first field should have acquired credentials
|
||||||
|
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
|
||||||
|
assert "credentials" in extra_kwargs
|
||||||
|
assert "credentials2" not in extra_kwargs
|
||||||
|
assert len(locks) == 1
|
||||||
@@ -259,7 +259,8 @@ async def _validate_node_input_credentials(
|
|||||||
|
|
||||||
# Find any fields of type CredentialsMetaInput
|
# Find any fields of type CredentialsMetaInput
|
||||||
credentials_fields = block.input_schema.get_credentials_fields()
|
credentials_fields = block.input_schema.get_credentials_fields()
|
||||||
if not credentials_fields:
|
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
|
||||||
|
if not credentials_fields and not auto_credentials_fields:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Track if any credential field is missing for this node
|
# Track if any credential field is missing for this node
|
||||||
@@ -339,6 +340,47 @@ async def _validate_node_input_credentials(
|
|||||||
] = "Invalid credentials: type/provider mismatch"
|
] = "Invalid credentials: type/provider mismatch"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Validate auto-credentials (GoogleDriveFileField-based)
|
||||||
|
# These have _credentials_id embedded in the file field data
|
||||||
|
if auto_credentials_fields:
|
||||||
|
for _kwarg_name, info in auto_credentials_fields.items():
|
||||||
|
field_name = info["field_name"]
|
||||||
|
# Check input_default and nodes_input_masks for the field value
|
||||||
|
field_value = node.input_default.get(field_name)
|
||||||
|
if nodes_input_masks and node.id in nodes_input_masks:
|
||||||
|
field_value = nodes_input_masks[node.id].get(
|
||||||
|
field_name, field_value
|
||||||
|
)
|
||||||
|
|
||||||
|
if field_value and isinstance(field_value, dict):
|
||||||
|
if "_credentials_id" not in field_value:
|
||||||
|
# Key removed (e.g., on fork) — needs re-auth
|
||||||
|
has_missing_credentials = True
|
||||||
|
credential_errors[node.id][field_name] = (
|
||||||
|
"Authentication missing for the selected file. "
|
||||||
|
"Please re-select the file to authenticate with "
|
||||||
|
"your own account."
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
cred_id = field_value.get("_credentials_id")
|
||||||
|
if cred_id and isinstance(cred_id, str):
|
||||||
|
try:
|
||||||
|
creds_store = get_integration_credentials_store()
|
||||||
|
creds = await creds_store.get_creds_by_id(user_id, cred_id)
|
||||||
|
except Exception as e:
|
||||||
|
has_missing_credentials = True
|
||||||
|
credential_errors[node.id][
|
||||||
|
field_name
|
||||||
|
] = f"Credentials not available: {e}"
|
||||||
|
continue
|
||||||
|
if not creds:
|
||||||
|
has_missing_credentials = True
|
||||||
|
credential_errors[node.id][field_name] = (
|
||||||
|
"The saved credentials are not available "
|
||||||
|
"for your account. Please re-select the file to "
|
||||||
|
"authenticate with your own account."
|
||||||
|
)
|
||||||
|
|
||||||
# If node has optional credentials and any are missing, mark for skipping
|
# If node has optional credentials and any are missing, mark for skipping
|
||||||
# But only if there are no other errors for this node
|
# But only if there are no other errors for this node
|
||||||
if (
|
if (
|
||||||
@@ -370,10 +412,11 @@ def make_node_credentials_input_map(
|
|||||||
"""
|
"""
|
||||||
result: dict[str, dict[str, JsonValue]] = {}
|
result: dict[str, dict[str, JsonValue]] = {}
|
||||||
|
|
||||||
# Get aggregated credentials fields for the graph
|
# Only map regular credentials (not auto_credentials, which are resolved
|
||||||
graph_cred_inputs = graph.aggregate_credentials_inputs()
|
# at execution time from _credentials_id in file field data)
|
||||||
|
graph_cred_inputs = graph.regular_credentials_inputs
|
||||||
|
|
||||||
for graph_input_name, (_, compatible_node_fields) in graph_cred_inputs.items():
|
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
|
||||||
# Best-effort map: skip missing items
|
# Best-effort map: skip missing items
|
||||||
if graph_input_name not in graph_credentials_input:
|
if graph_input_name not in graph_credentials_input:
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -907,3 +907,335 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
|
|||||||
|
|
||||||
# Verify both parent and child status updates
|
# Verify both parent and child status updates
|
||||||
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests for auto_credentials validation in _validate_node_input_credentials
|
||||||
|
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_node_input_credentials_auto_creds_valid(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
|
||||||
|
that exists in the store, validation should pass without errors.
|
||||||
|
"""
|
||||||
|
from backend.executor.utils import _validate_node_input_credentials
|
||||||
|
|
||||||
|
mock_node = mocker.MagicMock()
|
||||||
|
mock_node.id = "node-with-auto-creds"
|
||||||
|
mock_node.credentials_optional = False
|
||||||
|
mock_node.input_default = {
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "valid-cred-id",
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_block = mocker.MagicMock()
|
||||||
|
# No regular credentials fields
|
||||||
|
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||||
|
# Has auto_credentials fields
|
||||||
|
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||||
|
"credentials": {
|
||||||
|
"field_name": "spreadsheet",
|
||||||
|
"config": {"provider": "google", "type": "oauth2"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mock_node.block = mock_block
|
||||||
|
|
||||||
|
mock_graph = mocker.MagicMock()
|
||||||
|
mock_graph.nodes = [mock_node]
|
||||||
|
|
||||||
|
# Mock the credentials store to return valid credentials
|
||||||
|
mock_store = mocker.MagicMock()
|
||||||
|
mock_creds = mocker.MagicMock()
|
||||||
|
mock_creds.id = "valid-cred-id"
|
||||||
|
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
|
||||||
|
mocker.patch(
|
||||||
|
"backend.executor.utils.get_integration_credentials_store",
|
||||||
|
return_value=mock_store,
|
||||||
|
)
|
||||||
|
|
||||||
|
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||||
|
graph=mock_graph,
|
||||||
|
user_id="test-user",
|
||||||
|
nodes_input_masks=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock_node.id not in errors
|
||||||
|
assert mock_node.id not in nodes_to_skip
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_node_input_credentials_auto_creds_missing(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[SECRT-1772] When a node has auto_credentials with a _credentials_id
|
||||||
|
that doesn't exist for the current user, validation should report an error.
|
||||||
|
"""
|
||||||
|
from backend.executor.utils import _validate_node_input_credentials
|
||||||
|
|
||||||
|
mock_node = mocker.MagicMock()
|
||||||
|
mock_node.id = "node-with-bad-auto-creds"
|
||||||
|
mock_node.credentials_optional = False
|
||||||
|
mock_node.input_default = {
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "other-users-cred-id",
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_block = mocker.MagicMock()
|
||||||
|
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||||
|
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||||
|
"credentials": {
|
||||||
|
"field_name": "spreadsheet",
|
||||||
|
"config": {"provider": "google", "type": "oauth2"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mock_node.block = mock_block
|
||||||
|
|
||||||
|
mock_graph = mocker.MagicMock()
|
||||||
|
mock_graph.nodes = [mock_node]
|
||||||
|
|
||||||
|
# Mock the credentials store to return None (cred not found for this user)
|
||||||
|
mock_store = mocker.MagicMock()
|
||||||
|
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
|
||||||
|
mocker.patch(
|
||||||
|
"backend.executor.utils.get_integration_credentials_store",
|
||||||
|
return_value=mock_store,
|
||||||
|
)
|
||||||
|
|
||||||
|
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||||
|
graph=mock_graph,
|
||||||
|
user_id="different-user",
|
||||||
|
nodes_input_masks=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert mock_node.id in errors
|
||||||
|
assert "spreadsheet" in errors[mock_node.id]
|
||||||
|
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_node_input_credentials_both_regular_and_auto(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
|
||||||
|
should have both validated.
|
||||||
|
"""
|
||||||
|
from backend.executor.utils import _validate_node_input_credentials
|
||||||
|
|
||||||
|
mock_node = mocker.MagicMock()
|
||||||
|
mock_node.id = "node-with-both-creds"
|
||||||
|
mock_node.credentials_optional = False
|
||||||
|
mock_node.input_default = {
|
||||||
|
"credentials": {
|
||||||
|
"id": "regular-cred-id",
|
||||||
|
"provider": "github",
|
||||||
|
"type": "api_key",
|
||||||
|
},
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": "auto-cred-id",
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_credentials_field_type = mocker.MagicMock()
|
||||||
|
mock_credentials_meta = mocker.MagicMock()
|
||||||
|
mock_credentials_meta.id = "regular-cred-id"
|
||||||
|
mock_credentials_meta.provider = "github"
|
||||||
|
mock_credentials_meta.type = "api_key"
|
||||||
|
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
|
||||||
|
|
||||||
|
mock_block = mocker.MagicMock()
|
||||||
|
# Regular credentials field
|
||||||
|
mock_block.input_schema.get_credentials_fields.return_value = {
|
||||||
|
"credentials": mock_credentials_field_type,
|
||||||
|
}
|
||||||
|
# Auto-credentials field
|
||||||
|
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||||
|
"auto_credentials": {
|
||||||
|
"field_name": "spreadsheet",
|
||||||
|
"config": {"provider": "google", "type": "oauth2"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mock_node.block = mock_block
|
||||||
|
|
||||||
|
mock_graph = mocker.MagicMock()
|
||||||
|
mock_graph.nodes = [mock_node]
|
||||||
|
|
||||||
|
# Mock the credentials store to return valid credentials for both
|
||||||
|
mock_store = mocker.MagicMock()
|
||||||
|
mock_regular_creds = mocker.MagicMock()
|
||||||
|
mock_regular_creds.id = "regular-cred-id"
|
||||||
|
mock_regular_creds.provider = "github"
|
||||||
|
mock_regular_creds.type = "api_key"
|
||||||
|
|
||||||
|
mock_auto_creds = mocker.MagicMock()
|
||||||
|
mock_auto_creds.id = "auto-cred-id"
|
||||||
|
|
||||||
|
def get_creds_side_effect(user_id, cred_id):
|
||||||
|
if cred_id == "regular-cred-id":
|
||||||
|
return mock_regular_creds
|
||||||
|
elif cred_id == "auto-cred-id":
|
||||||
|
return mock_auto_creds
|
||||||
|
return None
|
||||||
|
|
||||||
|
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
|
||||||
|
mocker.patch(
|
||||||
|
"backend.executor.utils.get_integration_credentials_store",
|
||||||
|
return_value=mock_store,
|
||||||
|
)
|
||||||
|
|
||||||
|
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||||
|
graph=mock_graph,
|
||||||
|
user_id="test-user",
|
||||||
|
nodes_input_masks=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Both should validate successfully - no errors
|
||||||
|
assert mock_node.id not in errors
|
||||||
|
assert mock_node.id not in nodes_to_skip
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
When a node has auto_credentials but the field value has _credentials_id=None
|
||||||
|
(e.g., from upstream connection), validation should skip it without error.
|
||||||
|
"""
|
||||||
|
from backend.executor.utils import _validate_node_input_credentials
|
||||||
|
|
||||||
|
mock_node = mocker.MagicMock()
|
||||||
|
mock_node.id = "node-with-chained-auto-creds"
|
||||||
|
mock_node.credentials_optional = False
|
||||||
|
mock_node.input_default = {
|
||||||
|
"spreadsheet": {
|
||||||
|
"_credentials_id": None,
|
||||||
|
"id": "file-123",
|
||||||
|
"name": "test.xlsx",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_block = mocker.MagicMock()
|
||||||
|
mock_block.input_schema.get_credentials_fields.return_value = {}
|
||||||
|
mock_block.input_schema.get_auto_credentials_fields.return_value = {
|
||||||
|
"credentials": {
|
||||||
|
"field_name": "spreadsheet",
|
||||||
|
"config": {"provider": "google", "type": "oauth2"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mock_node.block = mock_block
|
||||||
|
|
||||||
|
mock_graph = mocker.MagicMock()
|
||||||
|
mock_graph.nodes = [mock_node]
|
||||||
|
|
||||||
|
errors, nodes_to_skip = await _validate_node_input_credentials(
|
||||||
|
graph=mock_graph,
|
||||||
|
user_id="test-user",
|
||||||
|
nodes_input_masks=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# No error - chained data with None cred_id is valid
|
||||||
|
assert mock_node.id not in errors
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def test_credentials_field_info_auto_credential_tag():
|
||||||
|
"""
|
||||||
|
[Path 4] CredentialsFieldInfo should support is_auto_credential and
|
||||||
|
input_field_name fields for distinguishing auto from regular credentials.
|
||||||
|
"""
|
||||||
|
from backend.data.model import CredentialsFieldInfo
|
||||||
|
|
||||||
|
# Regular credential should have is_auto_credential=False by default
|
||||||
|
regular = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["github"],
|
||||||
|
"credentials_types": ["api_key"],
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
assert regular.is_auto_credential is False
|
||||||
|
assert regular.input_field_name is None
|
||||||
|
|
||||||
|
# Auto credential should have is_auto_credential=True
|
||||||
|
auto = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["google"],
|
||||||
|
"credentials_types": ["oauth2"],
|
||||||
|
"is_auto_credential": True,
|
||||||
|
"input_field_name": "spreadsheet",
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
assert auto.is_auto_credential is True
|
||||||
|
assert auto.input_field_name == "spreadsheet"
|
||||||
|
|
||||||
|
|
||||||
|
def test_make_node_credentials_input_map_excludes_auto_creds(
|
||||||
|
mocker: MockerFixture,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
[Path 4] make_node_credentials_input_map should only include regular credentials,
|
||||||
|
not auto_credentials (which are resolved at execution time).
|
||||||
|
"""
|
||||||
|
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
|
||||||
|
from backend.executor.utils import make_node_credentials_input_map
|
||||||
|
from backend.integrations.providers import ProviderName
|
||||||
|
|
||||||
|
# Create a mock graph with aggregate_credentials_inputs that returns
|
||||||
|
# both regular and auto credentials
|
||||||
|
mock_graph = mocker.MagicMock()
|
||||||
|
|
||||||
|
regular_field_info = CredentialsFieldInfo.model_validate(
|
||||||
|
{
|
||||||
|
"credentials_provider": ["github"],
|
||||||
|
"credentials_types": ["api_key"],
|
||||||
|
"is_auto_credential": False,
|
||||||
|
},
|
||||||
|
by_alias=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock regular_credentials_inputs property (auto_credentials are excluded)
|
||||||
|
mock_graph.regular_credentials_inputs = {
|
||||||
|
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
|
||||||
|
}
|
||||||
|
|
||||||
|
graph_credentials_input = {
|
||||||
|
"github_creds": CredentialsMetaInput(
|
||||||
|
id="cred-123",
|
||||||
|
provider=ProviderName("github"),
|
||||||
|
type="api_key",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
|
||||||
|
|
||||||
|
# Regular credentials should be mapped
|
||||||
|
assert "node-1" in result
|
||||||
|
assert "credentials" in result["node-1"]
|
||||||
|
|
||||||
|
# Auto credentials should NOT appear in the result
|
||||||
|
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
|
||||||
|
for node_id, fields in result.items():
|
||||||
|
for field_name, value in fields.items():
|
||||||
|
# Verify no auto-credential phantom entries
|
||||||
|
if isinstance(value, dict):
|
||||||
|
assert "_credentials_id" not in value
|
||||||
|
|||||||
@@ -224,6 +224,14 @@ openweathermap_credentials = APIKeyCredentials(
|
|||||||
expires_at=None,
|
expires_at=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
elevenlabs_credentials = APIKeyCredentials(
|
||||||
|
id="f4a8b6c2-3d1e-4f5a-9b8c-7d6e5f4a3b2c",
|
||||||
|
provider="elevenlabs",
|
||||||
|
api_key=SecretStr(settings.secrets.elevenlabs_api_key),
|
||||||
|
title="Use Credits for ElevenLabs",
|
||||||
|
expires_at=None,
|
||||||
|
)
|
||||||
|
|
||||||
DEFAULT_CREDENTIALS = [
|
DEFAULT_CREDENTIALS = [
|
||||||
ollama_credentials,
|
ollama_credentials,
|
||||||
revid_credentials,
|
revid_credentials,
|
||||||
@@ -252,6 +260,7 @@ DEFAULT_CREDENTIALS = [
|
|||||||
v0_credentials,
|
v0_credentials,
|
||||||
webshare_proxy_credentials,
|
webshare_proxy_credentials,
|
||||||
openweathermap_credentials,
|
openweathermap_credentials,
|
||||||
|
elevenlabs_credentials,
|
||||||
]
|
]
|
||||||
|
|
||||||
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
SYSTEM_CREDENTIAL_IDS = {cred.id for cred in DEFAULT_CREDENTIALS}
|
||||||
@@ -366,6 +375,8 @@ class IntegrationCredentialsStore:
|
|||||||
all_credentials.append(webshare_proxy_credentials)
|
all_credentials.append(webshare_proxy_credentials)
|
||||||
if settings.secrets.openweathermap_api_key:
|
if settings.secrets.openweathermap_api_key:
|
||||||
all_credentials.append(openweathermap_credentials)
|
all_credentials.append(openweathermap_credentials)
|
||||||
|
if settings.secrets.elevenlabs_api_key:
|
||||||
|
all_credentials.append(elevenlabs_credentials)
|
||||||
return all_credentials
|
return all_credentials
|
||||||
|
|
||||||
async def get_creds_by_id(
|
async def get_creds_by_id(
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ class ProviderName(str, Enum):
|
|||||||
DISCORD = "discord"
|
DISCORD = "discord"
|
||||||
D_ID = "d_id"
|
D_ID = "d_id"
|
||||||
E2B = "e2b"
|
E2B = "e2b"
|
||||||
|
ELEVENLABS = "elevenlabs"
|
||||||
FAL = "fal"
|
FAL = "fal"
|
||||||
GITHUB = "github"
|
GITHUB = "github"
|
||||||
GOOGLE = "google"
|
GOOGLE = "google"
|
||||||
|
|||||||
@@ -1,59 +0,0 @@
|
|||||||
{# Waitlist Launch Notification Email Template #}
|
|
||||||
{#
|
|
||||||
Template variables:
|
|
||||||
data.agent_name: the name of the launched agent
|
|
||||||
data.waitlist_name: the name of the waitlist the user joined
|
|
||||||
data.store_url: URL to view the agent in the store
|
|
||||||
data.launched_at: when the agent was launched
|
|
||||||
|
|
||||||
Subject: {{ data.agent_name }} is now available!
|
|
||||||
#}
|
|
||||||
|
|
||||||
{% block content %}
|
|
||||||
<h1 style="color: #7c3aed; font-size: 32px; font-weight: 700; margin: 0 0 24px 0; text-align: center;">
|
|
||||||
The wait is over!
|
|
||||||
</h1>
|
|
||||||
|
|
||||||
<p style="color: #586069; font-size: 18px; text-align: center; margin: 0 0 24px 0;">
|
|
||||||
<strong>'{{ data.agent_name }}'</strong> is now live in the AutoGPT Store!
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<div style="height: 32px; background: transparent;"></div>
|
|
||||||
|
|
||||||
<div style="background: #f3e8ff; border: 1px solid #d8b4fe; border-radius: 8px; padding: 20px; margin: 0;">
|
|
||||||
<h3 style="color: #6b21a8; font-size: 16px; font-weight: 600; margin: 0 0 12px 0;">
|
|
||||||
You're one of the first to know!
|
|
||||||
</h3>
|
|
||||||
<p style="color: #6b21a8; margin: 0; font-size: 16px; line-height: 1.5;">
|
|
||||||
You signed up for the <strong>{{ data.waitlist_name }}</strong> waitlist, and we're excited to let you know that this agent is now ready for you to use.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div style="height: 32px; background: transparent;"></div>
|
|
||||||
|
|
||||||
<div style="text-align: center; margin: 24px 0;">
|
|
||||||
<a href="{{ data.store_url }}" style="display: inline-block; background: linear-gradient(135deg, #7c3aed 0%, #5b21b6 100%); color: white; text-decoration: none; padding: 14px 28px; border-radius: 6px; font-weight: 600; font-size: 16px;">
|
|
||||||
Get {{ data.agent_name }} Now
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div style="height: 32px; background: transparent;"></div>
|
|
||||||
|
|
||||||
<div style="background: #d1ecf1; border: 1px solid #bee5eb; border-radius: 8px; padding: 20px; margin: 0;">
|
|
||||||
<h3 style="color: #0c5460; font-size: 16px; font-weight: 600; margin: 0 0 12px 0;">
|
|
||||||
What can you do now?
|
|
||||||
</h3>
|
|
||||||
<ul style="color: #0c5460; margin: 0; padding-left: 18px; font-size: 16px; line-height: 1.6;">
|
|
||||||
<li>Visit the store to learn more about what this agent can do</li>
|
|
||||||
<li>Install and start using the agent right away</li>
|
|
||||||
<li>Share it with others who might find it useful</li>
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div style="height: 32px; background: transparent;"></div>
|
|
||||||
|
|
||||||
<p style="color: #6a737d; font-size: 14px; text-align: center; margin: 24px 0;">
|
|
||||||
Thank you for helping us prioritize what to build! Your interest made this happen.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
{% endblock %}
|
|
||||||
@@ -8,6 +8,8 @@ from pathlib import Path
|
|||||||
from typing import TYPE_CHECKING, Literal
|
from typing import TYPE_CHECKING, Literal
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from backend.util.cloud_storage import get_cloud_storage_handler
|
from backend.util.cloud_storage import get_cloud_storage_handler
|
||||||
from backend.util.request import Requests
|
from backend.util.request import Requests
|
||||||
from backend.util.settings import Config
|
from backend.util.settings import Config
|
||||||
@@ -17,6 +19,35 @@ from backend.util.virus_scanner import scan_content_safe
|
|||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from backend.data.execution import ExecutionContext
|
from backend.data.execution import ExecutionContext
|
||||||
|
|
||||||
|
|
||||||
|
class WorkspaceUri(BaseModel):
|
||||||
|
"""Parsed workspace:// URI."""
|
||||||
|
|
||||||
|
file_ref: str # File ID or path (e.g. "abc123" or "/path/to/file.txt")
|
||||||
|
mime_type: str | None = None # MIME type from fragment (e.g. "video/mp4")
|
||||||
|
is_path: bool = False # True if file_ref is a path (starts with "/")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_workspace_uri(uri: str) -> WorkspaceUri:
|
||||||
|
"""Parse a workspace:// URI into its components.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
"workspace://abc123" → WorkspaceUri(file_ref="abc123", mime_type=None, is_path=False)
|
||||||
|
"workspace://abc123#video/mp4" → WorkspaceUri(file_ref="abc123", mime_type="video/mp4", is_path=False)
|
||||||
|
"workspace:///path/to/file.txt" → WorkspaceUri(file_ref="/path/to/file.txt", mime_type=None, is_path=True)
|
||||||
|
"""
|
||||||
|
raw = uri.removeprefix("workspace://")
|
||||||
|
mime_type: str | None = None
|
||||||
|
if "#" in raw:
|
||||||
|
raw, fragment = raw.split("#", 1)
|
||||||
|
mime_type = fragment or None
|
||||||
|
return WorkspaceUri(
|
||||||
|
file_ref=raw,
|
||||||
|
mime_type=mime_type,
|
||||||
|
is_path=raw.startswith("/"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Return format options for store_media_file
|
# Return format options for store_media_file
|
||||||
# - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc.
|
# - "for_local_processing": Returns local file path - use with ffmpeg, MoviePy, PIL, etc.
|
||||||
# - "for_external_api": Returns data URI (base64) - use when sending content to external APIs
|
# - "for_external_api": Returns data URI (base64) - use when sending content to external APIs
|
||||||
@@ -183,22 +214,20 @@ async def store_media_file(
|
|||||||
"This file type is only available in CoPilot sessions."
|
"This file type is only available in CoPilot sessions."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Parse workspace reference
|
# Parse workspace reference (strips #mimeType fragment from file ID)
|
||||||
# workspace://abc123 - by file ID
|
ws = parse_workspace_uri(file)
|
||||||
# workspace:///path/to/file.txt - by virtual path
|
|
||||||
file_ref = file[12:] # Remove "workspace://"
|
|
||||||
|
|
||||||
if file_ref.startswith("/"):
|
if ws.is_path:
|
||||||
# Path reference
|
# Path reference: workspace:///path/to/file.txt
|
||||||
workspace_content = await workspace_manager.read_file(file_ref)
|
workspace_content = await workspace_manager.read_file(ws.file_ref)
|
||||||
file_info = await workspace_manager.get_file_info_by_path(file_ref)
|
file_info = await workspace_manager.get_file_info_by_path(ws.file_ref)
|
||||||
filename = sanitize_filename(
|
filename = sanitize_filename(
|
||||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# ID reference
|
# ID reference: workspace://abc123 or workspace://abc123#video/mp4
|
||||||
workspace_content = await workspace_manager.read_file_by_id(file_ref)
|
workspace_content = await workspace_manager.read_file_by_id(ws.file_ref)
|
||||||
file_info = await workspace_manager.get_file_info(file_ref)
|
file_info = await workspace_manager.get_file_info(ws.file_ref)
|
||||||
filename = sanitize_filename(
|
filename = sanitize_filename(
|
||||||
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
file_info.name if file_info else f"{uuid.uuid4()}.bin"
|
||||||
)
|
)
|
||||||
@@ -334,7 +363,21 @@ async def store_media_file(
|
|||||||
|
|
||||||
# Don't re-save if input was already from workspace
|
# Don't re-save if input was already from workspace
|
||||||
if is_from_workspace:
|
if is_from_workspace:
|
||||||
# Return original workspace reference
|
# Return original workspace reference, ensuring MIME type fragment
|
||||||
|
ws = parse_workspace_uri(file)
|
||||||
|
if not ws.mime_type:
|
||||||
|
# Add MIME type fragment if missing (older refs without it)
|
||||||
|
try:
|
||||||
|
if ws.is_path:
|
||||||
|
info = await workspace_manager.get_file_info_by_path(
|
||||||
|
ws.file_ref
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
info = await workspace_manager.get_file_info(ws.file_ref)
|
||||||
|
if info:
|
||||||
|
return MediaFileType(f"{file}#{info.mimeType}")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
return MediaFileType(file)
|
return MediaFileType(file)
|
||||||
|
|
||||||
# Save new content to workspace
|
# Save new content to workspace
|
||||||
@@ -346,7 +389,7 @@ async def store_media_file(
|
|||||||
filename=filename,
|
filename=filename,
|
||||||
overwrite=True,
|
overwrite=True,
|
||||||
)
|
)
|
||||||
return MediaFileType(f"workspace://{file_record.id}")
|
return MediaFileType(f"workspace://{file_record.id}#{file_record.mimeType}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Invalid return_format: {return_format}")
|
raise ValueError(f"Invalid return_format: {return_format}")
|
||||||
|
|||||||
@@ -656,6 +656,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
|||||||
e2b_api_key: str = Field(default="", description="E2B API key")
|
e2b_api_key: str = Field(default="", description="E2B API key")
|
||||||
nvidia_api_key: str = Field(default="", description="Nvidia API key")
|
nvidia_api_key: str = Field(default="", description="Nvidia API key")
|
||||||
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
mem0_api_key: str = Field(default="", description="Mem0 API key")
|
||||||
|
elevenlabs_api_key: str = Field(default="", description="ElevenLabs API key")
|
||||||
|
|
||||||
linear_client_id: str = Field(default="", description="Linear client ID")
|
linear_client_id: str = Field(default="", description="Linear client ID")
|
||||||
linear_client_secret: str = Field(default="", description="Linear client secret")
|
linear_client_secret: str = Field(default="", description="Linear client secret")
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ from backend.data.workspace import (
|
|||||||
soft_delete_workspace_file,
|
soft_delete_workspace_file,
|
||||||
)
|
)
|
||||||
from backend.util.settings import Config
|
from backend.util.settings import Config
|
||||||
|
from backend.util.virus_scanner import scan_content_safe
|
||||||
from backend.util.workspace_storage import compute_file_checksum, get_workspace_storage
|
from backend.util.workspace_storage import compute_file_checksum, get_workspace_storage
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -187,6 +188,9 @@ class WorkspaceManager:
|
|||||||
f"{Config().max_file_size_mb}MB limit"
|
f"{Config().max_file_size_mb}MB limit"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Virus scan content before persisting (defense in depth)
|
||||||
|
await scan_content_safe(content, filename=filename)
|
||||||
|
|
||||||
# Determine path with session scoping
|
# Determine path with session scoping
|
||||||
if path is None:
|
if path is None:
|
||||||
path = f"/{filename}"
|
path = f"/{filename}"
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
-- CreateEnum
|
|
||||||
CREATE TYPE "WaitlistExternalStatus" AS ENUM ('DONE', 'NOT_STARTED', 'CANCELED', 'WORK_IN_PROGRESS');
|
|
||||||
|
|
||||||
-- AlterEnum
|
|
||||||
ALTER TYPE "NotificationType" ADD VALUE 'WAITLIST_LAUNCH';
|
|
||||||
|
|
||||||
-- CreateTable
|
|
||||||
CREATE TABLE "WaitlistEntry" (
|
|
||||||
"id" TEXT NOT NULL,
|
|
||||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
"updatedAt" TIMESTAMP(3) NOT NULL,
|
|
||||||
"storeListingId" TEXT,
|
|
||||||
"owningUserId" TEXT NOT NULL,
|
|
||||||
"slug" TEXT NOT NULL,
|
|
||||||
"search" tsvector DEFAULT ''::tsvector,
|
|
||||||
"name" TEXT NOT NULL,
|
|
||||||
"subHeading" TEXT NOT NULL,
|
|
||||||
"videoUrl" TEXT,
|
|
||||||
"agentOutputDemoUrl" TEXT,
|
|
||||||
"imageUrls" TEXT[],
|
|
||||||
"description" TEXT NOT NULL,
|
|
||||||
"categories" TEXT[],
|
|
||||||
"status" "WaitlistExternalStatus" NOT NULL DEFAULT 'NOT_STARTED',
|
|
||||||
"votes" INTEGER NOT NULL DEFAULT 0,
|
|
||||||
"unaffiliatedEmailUsers" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
|
||||||
"isDeleted" BOOLEAN NOT NULL DEFAULT false,
|
|
||||||
|
|
||||||
CONSTRAINT "WaitlistEntry_pkey" PRIMARY KEY ("id")
|
|
||||||
);
|
|
||||||
|
|
||||||
-- CreateTable
|
|
||||||
CREATE TABLE "_joinedWaitlists" (
|
|
||||||
"A" TEXT NOT NULL,
|
|
||||||
"B" TEXT NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE UNIQUE INDEX "_joinedWaitlists_AB_unique" ON "_joinedWaitlists"("A", "B");
|
|
||||||
|
|
||||||
-- CreateIndex
|
|
||||||
CREATE INDEX "_joinedWaitlists_B_index" ON "_joinedWaitlists"("B");
|
|
||||||
|
|
||||||
-- AddForeignKey
|
|
||||||
ALTER TABLE "WaitlistEntry" ADD CONSTRAINT "WaitlistEntry_storeListingId_fkey" FOREIGN KEY ("storeListingId") REFERENCES "StoreListing"("id") ON DELETE SET NULL ON UPDATE CASCADE;
|
|
||||||
|
|
||||||
-- AddForeignKey
|
|
||||||
ALTER TABLE "WaitlistEntry" ADD CONSTRAINT "WaitlistEntry_owningUserId_fkey" FOREIGN KEY ("owningUserId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
|
||||||
|
|
||||||
-- AddForeignKey
|
|
||||||
ALTER TABLE "_joinedWaitlists" ADD CONSTRAINT "_joinedWaitlists_A_fkey" FOREIGN KEY ("A") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
|
||||||
|
|
||||||
-- AddForeignKey
|
|
||||||
ALTER TABLE "_joinedWaitlists" ADD CONSTRAINT "_joinedWaitlists_B_fkey" FOREIGN KEY ("B") REFERENCES "WaitlistEntry"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
|
||||||
47
autogpt_platform/backend/poetry.lock
generated
47
autogpt_platform/backend/poetry.lock
generated
@@ -1169,6 +1169,29 @@ attrs = ">=21.3.0"
|
|||||||
e2b = ">=1.5.4,<2.0.0"
|
e2b = ">=1.5.4,<2.0.0"
|
||||||
httpx = ">=0.20.0,<1.0.0"
|
httpx = ">=0.20.0,<1.0.0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "elevenlabs"
|
||||||
|
version = "1.59.0"
|
||||||
|
description = ""
|
||||||
|
optional = false
|
||||||
|
python-versions = "<4.0,>=3.8"
|
||||||
|
groups = ["main"]
|
||||||
|
files = [
|
||||||
|
{file = "elevenlabs-1.59.0-py3-none-any.whl", hash = "sha256:468145db81a0bc867708b4a8619699f75583e9481b395ec1339d0b443da771ed"},
|
||||||
|
{file = "elevenlabs-1.59.0.tar.gz", hash = "sha256:16e735bd594e86d415dd445d249c8cc28b09996cfd627fbc10102c0a84698859"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
httpx = ">=0.21.2"
|
||||||
|
pydantic = ">=1.9.2"
|
||||||
|
pydantic-core = ">=2.18.2,<3.0.0"
|
||||||
|
requests = ">=2.20"
|
||||||
|
typing_extensions = ">=4.0.0"
|
||||||
|
websockets = ">=11.0"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
pyaudio = ["pyaudio (>=0.2.14)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "email-validator"
|
name = "email-validator"
|
||||||
version = "2.2.0"
|
version = "2.2.0"
|
||||||
@@ -7361,6 +7384,28 @@ files = [
|
|||||||
defusedxml = ">=0.7.1,<0.8.0"
|
defusedxml = ">=0.7.1,<0.8.0"
|
||||||
requests = "*"
|
requests = "*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "yt-dlp"
|
||||||
|
version = "2025.12.8"
|
||||||
|
description = "A feature-rich command-line audio/video downloader"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.10"
|
||||||
|
groups = ["main"]
|
||||||
|
files = [
|
||||||
|
{file = "yt_dlp-2025.12.8-py3-none-any.whl", hash = "sha256:36e2584342e409cfbfa0b5e61448a1c5189e345cf4564294456ee509e7d3e065"},
|
||||||
|
{file = "yt_dlp-2025.12.8.tar.gz", hash = "sha256:b773c81bb6b71cb2c111cfb859f453c7a71cf2ef44eff234ff155877184c3e4f"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
build = ["build", "hatchling (>=1.27.0)", "pip", "setuptools (>=71.0.2)", "wheel"]
|
||||||
|
curl-cffi = ["curl-cffi (>=0.5.10,<0.6.dev0 || >=0.10.dev0,<0.14) ; implementation_name == \"cpython\""]
|
||||||
|
default = ["brotli ; implementation_name == \"cpython\"", "brotlicffi ; implementation_name != \"cpython\"", "certifi", "mutagen", "pycryptodomex", "requests (>=2.32.2,<3)", "urllib3 (>=2.0.2,<3)", "websockets (>=13.0)", "yt-dlp-ejs (==0.3.2)"]
|
||||||
|
dev = ["autopep8 (>=2.0,<3.0)", "pre-commit", "pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||||
|
pyinstaller = ["pyinstaller (>=6.17.0)"]
|
||||||
|
secretstorage = ["cffi", "secretstorage"]
|
||||||
|
static-analysis = ["autopep8 (>=2.0,<3.0)", "ruff (>=0.14.0,<0.15.0)"]
|
||||||
|
test = ["pytest (>=8.1,<9.0)", "pytest-rerunfailures (>=14.0,<15.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "zerobouncesdk"
|
name = "zerobouncesdk"
|
||||||
version = "1.1.2"
|
version = "1.1.2"
|
||||||
@@ -7512,4 +7557,4 @@ cffi = ["cffi (>=1.11)"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.1"
|
lock-version = "2.1"
|
||||||
python-versions = ">=3.10,<3.14"
|
python-versions = ">=3.10,<3.14"
|
||||||
content-hash = "ee5742dc1a9df50dfc06d4b26a1682cbb2b25cab6b79ce5625ec272f93e4f4bf"
|
content-hash = "8239323f9ae6713224dffd1fe8ba8b449fe88b6c3c7a90940294a74f43a0387a"
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ click = "^8.2.0"
|
|||||||
cryptography = "^45.0"
|
cryptography = "^45.0"
|
||||||
discord-py = "^2.5.2"
|
discord-py = "^2.5.2"
|
||||||
e2b-code-interpreter = "^1.5.2"
|
e2b-code-interpreter = "^1.5.2"
|
||||||
|
elevenlabs = "^1.50.0"
|
||||||
fastapi = "^0.116.1"
|
fastapi = "^0.116.1"
|
||||||
feedparser = "^6.0.11"
|
feedparser = "^6.0.11"
|
||||||
flake8 = "^7.3.0"
|
flake8 = "^7.3.0"
|
||||||
@@ -71,6 +72,7 @@ tweepy = "^4.16.0"
|
|||||||
uvicorn = { extras = ["standard"], version = "^0.35.0" }
|
uvicorn = { extras = ["standard"], version = "^0.35.0" }
|
||||||
websockets = "^15.0"
|
websockets = "^15.0"
|
||||||
youtube-transcript-api = "^1.2.1"
|
youtube-transcript-api = "^1.2.1"
|
||||||
|
yt-dlp = "2025.12.08"
|
||||||
zerobouncesdk = "^1.1.2"
|
zerobouncesdk = "^1.1.2"
|
||||||
# NOTE: please insert new dependencies in their alphabetical location
|
# NOTE: please insert new dependencies in their alphabetical location
|
||||||
pytest-snapshot = "^0.9.0"
|
pytest-snapshot = "^0.9.0"
|
||||||
|
|||||||
@@ -70,10 +70,6 @@ model User {
|
|||||||
OAuthAuthorizationCodes OAuthAuthorizationCode[]
|
OAuthAuthorizationCodes OAuthAuthorizationCode[]
|
||||||
OAuthAccessTokens OAuthAccessToken[]
|
OAuthAccessTokens OAuthAccessToken[]
|
||||||
OAuthRefreshTokens OAuthRefreshToken[]
|
OAuthRefreshTokens OAuthRefreshToken[]
|
||||||
|
|
||||||
// Waitlist relations
|
|
||||||
waitlistEntries WaitlistEntry[]
|
|
||||||
joinedWaitlists WaitlistEntry[] @relation("joinedWaitlists")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum OnboardingStep {
|
enum OnboardingStep {
|
||||||
@@ -348,7 +344,6 @@ enum NotificationType {
|
|||||||
REFUND_PROCESSED
|
REFUND_PROCESSED
|
||||||
AGENT_APPROVED
|
AGENT_APPROVED
|
||||||
AGENT_REJECTED
|
AGENT_REJECTED
|
||||||
WAITLIST_LAUNCH
|
|
||||||
}
|
}
|
||||||
|
|
||||||
model NotificationEvent {
|
model NotificationEvent {
|
||||||
@@ -954,7 +949,6 @@ model StoreListing {
|
|||||||
|
|
||||||
// Relations
|
// Relations
|
||||||
Versions StoreListingVersion[] @relation("ListingVersions")
|
Versions StoreListingVersion[] @relation("ListingVersions")
|
||||||
waitlistEntries WaitlistEntry[]
|
|
||||||
|
|
||||||
// Unique index on agentId to ensure only one listing per agent, regardless of number of versions the agent has.
|
// Unique index on agentId to ensure only one listing per agent, regardless of number of versions the agent has.
|
||||||
@@unique([agentGraphId])
|
@@unique([agentGraphId])
|
||||||
@@ -1086,47 +1080,6 @@ model StoreListingReview {
|
|||||||
@@index([reviewByUserId])
|
@@index([reviewByUserId])
|
||||||
}
|
}
|
||||||
|
|
||||||
enum WaitlistExternalStatus {
|
|
||||||
DONE
|
|
||||||
NOT_STARTED
|
|
||||||
CANCELED
|
|
||||||
WORK_IN_PROGRESS
|
|
||||||
}
|
|
||||||
|
|
||||||
model WaitlistEntry {
|
|
||||||
id String @id @default(uuid())
|
|
||||||
createdAt DateTime @default(now())
|
|
||||||
updatedAt DateTime @updatedAt
|
|
||||||
|
|
||||||
storeListingId String?
|
|
||||||
StoreListing StoreListing? @relation(fields: [storeListingId], references: [id], onDelete: SetNull)
|
|
||||||
|
|
||||||
owningUserId String
|
|
||||||
OwningUser User @relation(fields: [owningUserId], references: [id])
|
|
||||||
|
|
||||||
slug String
|
|
||||||
search Unsupported("tsvector")? @default(dbgenerated("''::tsvector"))
|
|
||||||
|
|
||||||
// Content fields
|
|
||||||
name String
|
|
||||||
subHeading String
|
|
||||||
videoUrl String?
|
|
||||||
agentOutputDemoUrl String?
|
|
||||||
imageUrls String[]
|
|
||||||
description String
|
|
||||||
categories String[]
|
|
||||||
|
|
||||||
//Waitlist specific fields
|
|
||||||
status WaitlistExternalStatus @default(NOT_STARTED)
|
|
||||||
votes Int @default(0) // Hide from frontend api
|
|
||||||
joinedUsers User[] @relation("joinedWaitlists")
|
|
||||||
// NOTE: DO NOT DOUBLE SEND TO THESE USERS, IF THEY HAVE SIGNED UP SINCE THEY MAY HAVE ALREADY RECEIVED AN EMAIL
|
|
||||||
// DOUBLE CHECK WHEN SENDING THAT THEY ARE NOT IN THE JOINED USERS LIST ALSO
|
|
||||||
unaffiliatedEmailUsers String[] @default([])
|
|
||||||
|
|
||||||
isDeleted Boolean @default(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
enum SubmissionStatus {
|
enum SubmissionStatus {
|
||||||
DRAFT // Being prepared, not yet submitted
|
DRAFT // Being prepared, not yet submitted
|
||||||
PENDING // Submitted, awaiting review
|
PENDING // Submitted, awaiting review
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
"credentials_input_schema": {
|
"credentials_input_schema": {
|
||||||
"properties": {},
|
"properties": {},
|
||||||
"required": [],
|
"required": [],
|
||||||
"title": "TestGraphCredentialsInputSchema",
|
|
||||||
"type": "object"
|
"type": "object"
|
||||||
},
|
},
|
||||||
"description": "A test graph",
|
"description": "A test graph",
|
||||||
|
|||||||
@@ -1,34 +1,14 @@
|
|||||||
[
|
[
|
||||||
{
|
{
|
||||||
"credentials_input_schema": {
|
"created_at": "2025-09-04T13:37:00",
|
||||||
"properties": {},
|
|
||||||
"required": [],
|
|
||||||
"title": "TestGraphCredentialsInputSchema",
|
|
||||||
"type": "object"
|
|
||||||
},
|
|
||||||
"description": "A test graph",
|
"description": "A test graph",
|
||||||
"forked_from_id": null,
|
"forked_from_id": null,
|
||||||
"forked_from_version": null,
|
"forked_from_version": null,
|
||||||
"has_external_trigger": false,
|
|
||||||
"has_human_in_the_loop": false,
|
|
||||||
"has_sensitive_action": false,
|
|
||||||
"id": "graph-123",
|
"id": "graph-123",
|
||||||
"input_schema": {
|
|
||||||
"properties": {},
|
|
||||||
"required": [],
|
|
||||||
"type": "object"
|
|
||||||
},
|
|
||||||
"instructions": null,
|
"instructions": null,
|
||||||
"is_active": true,
|
"is_active": true,
|
||||||
"name": "Test Graph",
|
"name": "Test Graph",
|
||||||
"output_schema": {
|
|
||||||
"properties": {},
|
|
||||||
"required": [],
|
|
||||||
"type": "object"
|
|
||||||
},
|
|
||||||
"recommended_schedule_cron": null,
|
"recommended_schedule_cron": null,
|
||||||
"sub_graphs": [],
|
|
||||||
"trigger_setup_info": null,
|
|
||||||
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
"user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||||
"version": 1
|
"version": 1
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||||
import { useState } from "react";
|
import { useState } from "react";
|
||||||
import { getSchemaDefaultCredentials } from "../../helpers";
|
import { getSchemaDefaultCredentials } from "../../helpers";
|
||||||
@@ -9,7 +9,7 @@ type Credential = CredentialsMetaInput | undefined;
|
|||||||
type Credentials = Record<string, Credential>;
|
type Credentials = Record<string, Credential>;
|
||||||
|
|
||||||
type Props = {
|
type Props = {
|
||||||
agent: GraphMeta | null;
|
agent: GraphModel | null;
|
||||||
siblingInputs?: Record<string, any>;
|
siblingInputs?: Record<string, any>;
|
||||||
onCredentialsChange: (
|
onCredentialsChange: (
|
||||||
credentials: Record<string, CredentialsMetaInput>,
|
credentials: Record<string, CredentialsMetaInput>,
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||||
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
|
import { BlockIOCredentialsSubSchema } from "@/lib/autogpt-server-api/types";
|
||||||
|
|
||||||
export function getCredentialFields(
|
export function getCredentialFields(
|
||||||
agent: GraphMeta | null,
|
agent: GraphModel | null,
|
||||||
): AgentCredentialsFields {
|
): AgentCredentialsFields {
|
||||||
if (!agent) return {};
|
if (!agent) return {};
|
||||||
|
|
||||||
|
|||||||
@@ -3,10 +3,10 @@ import type {
|
|||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
} from "@/lib/autogpt-server-api/types";
|
} from "@/lib/autogpt-server-api/types";
|
||||||
import type { InputValues } from "./types";
|
import type { InputValues } from "./types";
|
||||||
import { GraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
import { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||||
|
|
||||||
export function computeInitialAgentInputs(
|
export function computeInitialAgentInputs(
|
||||||
agent: GraphMeta | null,
|
agent: GraphModel | null,
|
||||||
existingInputs?: InputValues | null,
|
existingInputs?: InputValues | null,
|
||||||
): InputValues {
|
): InputValues {
|
||||||
const properties = agent?.input_schema?.properties || {};
|
const properties = agent?.input_schema?.properties || {};
|
||||||
@@ -29,7 +29,7 @@ export function computeInitialAgentInputs(
|
|||||||
}
|
}
|
||||||
|
|
||||||
type IsRunDisabledParams = {
|
type IsRunDisabledParams = {
|
||||||
agent: GraphMeta | null;
|
agent: GraphModel | null;
|
||||||
isRunning: boolean;
|
isRunning: boolean;
|
||||||
agentInputs: InputValues | null | undefined;
|
agentInputs: InputValues | null | undefined;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
import { Sidebar } from "@/components/__legacy__/Sidebar";
|
||||||
import { Users, DollarSign, UserSearch, FileText, Clock } from "lucide-react";
|
import { Users, DollarSign, UserSearch, FileText } from "lucide-react";
|
||||||
|
|
||||||
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
import { IconSliders } from "@/components/__legacy__/ui/icons";
|
||||||
|
|
||||||
@@ -11,11 +11,6 @@ const sidebarLinkGroups = [
|
|||||||
href: "/admin/marketplace",
|
href: "/admin/marketplace",
|
||||||
icon: <Users className="h-6 w-6" />,
|
icon: <Users className="h-6 w-6" />,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
text: "Waitlist Management",
|
|
||||||
href: "/admin/waitlist",
|
|
||||||
icon: <Clock className="h-6 w-6" />,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
text: "User Spending",
|
text: "User Spending",
|
||||||
href: "/admin/spending",
|
href: "/admin/spending",
|
||||||
|
|||||||
@@ -1,217 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useQueryClient } from "@tanstack/react-query";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Input } from "@/components/atoms/Input/Input";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import {
|
|
||||||
usePostV2CreateWaitlist,
|
|
||||||
getGetV2ListAllWaitlistsQueryKey,
|
|
||||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
|
||||||
import { Plus } from "@phosphor-icons/react";
|
|
||||||
|
|
||||||
export function CreateWaitlistButton() {
|
|
||||||
const [open, setOpen] = useState(false);
|
|
||||||
const { toast } = useToast();
|
|
||||||
const queryClient = useQueryClient();
|
|
||||||
|
|
||||||
const createWaitlistMutation = usePostV2CreateWaitlist({
|
|
||||||
mutation: {
|
|
||||||
onSuccess: (response) => {
|
|
||||||
if (response.status === 200) {
|
|
||||||
toast({
|
|
||||||
title: "Success",
|
|
||||||
description: "Waitlist created successfully",
|
|
||||||
});
|
|
||||||
setOpen(false);
|
|
||||||
setFormData({
|
|
||||||
name: "",
|
|
||||||
slug: "",
|
|
||||||
subHeading: "",
|
|
||||||
description: "",
|
|
||||||
categories: "",
|
|
||||||
imageUrls: "",
|
|
||||||
videoUrl: "",
|
|
||||||
agentOutputDemoUrl: "",
|
|
||||||
});
|
|
||||||
queryClient.invalidateQueries({
|
|
||||||
queryKey: getGetV2ListAllWaitlistsQueryKey(),
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to create waitlist",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
onError: (error) => {
|
|
||||||
console.error("Error creating waitlist:", error);
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to create waitlist",
|
|
||||||
});
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const [formData, setFormData] = useState({
|
|
||||||
name: "",
|
|
||||||
slug: "",
|
|
||||||
subHeading: "",
|
|
||||||
description: "",
|
|
||||||
categories: "",
|
|
||||||
imageUrls: "",
|
|
||||||
videoUrl: "",
|
|
||||||
agentOutputDemoUrl: "",
|
|
||||||
});
|
|
||||||
|
|
||||||
function handleInputChange(id: string, value: string) {
|
|
||||||
setFormData((prev) => ({
|
|
||||||
...prev,
|
|
||||||
[id]: value,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
function generateSlug(name: string) {
|
|
||||||
return name
|
|
||||||
.toLowerCase()
|
|
||||||
.replace(/[^a-z0-9]+/g, "-")
|
|
||||||
.replace(/^-|-$/g, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
function handleSubmit(e: React.FormEvent) {
|
|
||||||
e.preventDefault();
|
|
||||||
|
|
||||||
createWaitlistMutation.mutate({
|
|
||||||
data: {
|
|
||||||
name: formData.name,
|
|
||||||
slug: formData.slug || generateSlug(formData.name),
|
|
||||||
subHeading: formData.subHeading,
|
|
||||||
description: formData.description,
|
|
||||||
categories: formData.categories
|
|
||||||
? formData.categories.split(",").map((c) => c.trim())
|
|
||||||
: [],
|
|
||||||
imageUrls: formData.imageUrls
|
|
||||||
? formData.imageUrls.split(",").map((u) => u.trim())
|
|
||||||
: [],
|
|
||||||
videoUrl: formData.videoUrl || null,
|
|
||||||
agentOutputDemoUrl: formData.agentOutputDemoUrl || null,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<Button onClick={() => setOpen(true)}>
|
|
||||||
<Plus size={16} className="mr-2" />
|
|
||||||
Create Waitlist
|
|
||||||
</Button>
|
|
||||||
|
|
||||||
<Dialog
|
|
||||||
title="Create New Waitlist"
|
|
||||||
controlled={{
|
|
||||||
isOpen: open,
|
|
||||||
set: async (isOpen) => setOpen(isOpen),
|
|
||||||
}}
|
|
||||||
onClose={() => setOpen(false)}
|
|
||||||
styling={{ maxWidth: "600px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Content>
|
|
||||||
<p className="mb-4 text-sm text-zinc-500">
|
|
||||||
Create a new waitlist for an upcoming agent. Users can sign up to be
|
|
||||||
notified when it launches.
|
|
||||||
</p>
|
|
||||||
<form onSubmit={handleSubmit} className="flex flex-col gap-2">
|
|
||||||
<Input
|
|
||||||
id="name"
|
|
||||||
label="Name"
|
|
||||||
value={formData.name}
|
|
||||||
onChange={(e) => handleInputChange("name", e.target.value)}
|
|
||||||
placeholder="SEO Analysis Agent"
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="slug"
|
|
||||||
label="Slug"
|
|
||||||
value={formData.slug}
|
|
||||||
onChange={(e) => handleInputChange("slug", e.target.value)}
|
|
||||||
placeholder="seo-analysis-agent (auto-generated if empty)"
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="subHeading"
|
|
||||||
label="Subheading"
|
|
||||||
value={formData.subHeading}
|
|
||||||
onChange={(e) => handleInputChange("subHeading", e.target.value)}
|
|
||||||
placeholder="Analyze your website's SEO in minutes"
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="description"
|
|
||||||
label="Description"
|
|
||||||
type="textarea"
|
|
||||||
value={formData.description}
|
|
||||||
onChange={(e) => handleInputChange("description", e.target.value)}
|
|
||||||
placeholder="Detailed description of what this agent does..."
|
|
||||||
rows={4}
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="categories"
|
|
||||||
label="Categories (comma-separated)"
|
|
||||||
value={formData.categories}
|
|
||||||
onChange={(e) => handleInputChange("categories", e.target.value)}
|
|
||||||
placeholder="SEO, Marketing, Analysis"
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="imageUrls"
|
|
||||||
label="Image URLs (comma-separated)"
|
|
||||||
value={formData.imageUrls}
|
|
||||||
onChange={(e) => handleInputChange("imageUrls", e.target.value)}
|
|
||||||
placeholder="https://example.com/image1.jpg, https://example.com/image2.jpg"
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="videoUrl"
|
|
||||||
label="Video URL (optional)"
|
|
||||||
value={formData.videoUrl}
|
|
||||||
onChange={(e) => handleInputChange("videoUrl", e.target.value)}
|
|
||||||
placeholder="https://youtube.com/watch?v=..."
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="agentOutputDemoUrl"
|
|
||||||
label="Output Demo URL (optional)"
|
|
||||||
value={formData.agentOutputDemoUrl}
|
|
||||||
onChange={(e) =>
|
|
||||||
handleInputChange("agentOutputDemoUrl", e.target.value)
|
|
||||||
}
|
|
||||||
placeholder="https://example.com/demo-output.mp4"
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="secondary"
|
|
||||||
onClick={() => setOpen(false)}
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button type="submit" loading={createWaitlistMutation.isPending}>
|
|
||||||
Create Waitlist
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,221 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Input } from "@/components/atoms/Input/Input";
|
|
||||||
import { Select } from "@/components/atoms/Select/Select";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
|
||||||
import { usePutV2UpdateWaitlist } from "@/app/api/__generated__/endpoints/admin/admin";
|
|
||||||
import type { WaitlistAdminResponse } from "@/app/api/__generated__/models/waitlistAdminResponse";
|
|
||||||
import type { WaitlistUpdateRequest } from "@/app/api/__generated__/models/waitlistUpdateRequest";
|
|
||||||
import { WaitlistExternalStatus } from "@/app/api/__generated__/models/waitlistExternalStatus";
|
|
||||||
|
|
||||||
type EditWaitlistDialogProps = {
|
|
||||||
waitlist: WaitlistAdminResponse;
|
|
||||||
onClose: () => void;
|
|
||||||
onSave: () => void;
|
|
||||||
};
|
|
||||||
|
|
||||||
const STATUS_OPTIONS = [
|
|
||||||
{ value: WaitlistExternalStatus.NOT_STARTED, label: "Not Started" },
|
|
||||||
{ value: WaitlistExternalStatus.WORK_IN_PROGRESS, label: "Work In Progress" },
|
|
||||||
{ value: WaitlistExternalStatus.DONE, label: "Done" },
|
|
||||||
{ value: WaitlistExternalStatus.CANCELED, label: "Canceled" },
|
|
||||||
];
|
|
||||||
|
|
||||||
export function EditWaitlistDialog({
|
|
||||||
waitlist,
|
|
||||||
onClose,
|
|
||||||
onSave,
|
|
||||||
}: EditWaitlistDialogProps) {
|
|
||||||
const { toast } = useToast();
|
|
||||||
const updateWaitlistMutation = usePutV2UpdateWaitlist();
|
|
||||||
|
|
||||||
const [formData, setFormData] = useState({
|
|
||||||
name: waitlist.name,
|
|
||||||
slug: waitlist.slug,
|
|
||||||
subHeading: waitlist.subHeading,
|
|
||||||
description: waitlist.description,
|
|
||||||
categories: waitlist.categories.join(", "),
|
|
||||||
imageUrls: waitlist.imageUrls.join(", "),
|
|
||||||
videoUrl: waitlist.videoUrl || "",
|
|
||||||
agentOutputDemoUrl: waitlist.agentOutputDemoUrl || "",
|
|
||||||
status: waitlist.status,
|
|
||||||
storeListingId: waitlist.storeListingId || "",
|
|
||||||
});
|
|
||||||
|
|
||||||
function handleInputChange(id: string, value: string) {
|
|
||||||
setFormData((prev) => ({
|
|
||||||
...prev,
|
|
||||||
[id]: value,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
function handleStatusChange(value: string) {
|
|
||||||
setFormData((prev) => ({
|
|
||||||
...prev,
|
|
||||||
status: value as WaitlistExternalStatus,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
async function handleSubmit(e: React.FormEvent) {
|
|
||||||
e.preventDefault();
|
|
||||||
|
|
||||||
const updateData: WaitlistUpdateRequest = {
|
|
||||||
name: formData.name,
|
|
||||||
slug: formData.slug,
|
|
||||||
subHeading: formData.subHeading,
|
|
||||||
description: formData.description,
|
|
||||||
categories: formData.categories
|
|
||||||
? formData.categories.split(",").map((c) => c.trim())
|
|
||||||
: [],
|
|
||||||
imageUrls: formData.imageUrls
|
|
||||||
? formData.imageUrls.split(",").map((u) => u.trim())
|
|
||||||
: [],
|
|
||||||
videoUrl: formData.videoUrl || null,
|
|
||||||
agentOutputDemoUrl: formData.agentOutputDemoUrl || null,
|
|
||||||
status: formData.status,
|
|
||||||
storeListingId: formData.storeListingId || null,
|
|
||||||
};
|
|
||||||
|
|
||||||
updateWaitlistMutation.mutate(
|
|
||||||
{ waitlistId: waitlist.id, data: updateData },
|
|
||||||
{
|
|
||||||
onSuccess: (response) => {
|
|
||||||
if (response.status === 200) {
|
|
||||||
toast({
|
|
||||||
title: "Success",
|
|
||||||
description: "Waitlist updated successfully",
|
|
||||||
});
|
|
||||||
onSave();
|
|
||||||
} else {
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to update waitlist",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
onError: () => {
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to update waitlist",
|
|
||||||
});
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Edit Waitlist"
|
|
||||||
controlled={{
|
|
||||||
isOpen: true,
|
|
||||||
set: async (open) => {
|
|
||||||
if (!open) onClose();
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
onClose={onClose}
|
|
||||||
styling={{ maxWidth: "600px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Content>
|
|
||||||
<p className="mb-4 text-sm text-zinc-500">
|
|
||||||
Update the waitlist details. Changes will be reflected immediately.
|
|
||||||
</p>
|
|
||||||
<form onSubmit={handleSubmit} className="flex flex-col gap-2">
|
|
||||||
<Input
|
|
||||||
id="name"
|
|
||||||
label="Name"
|
|
||||||
value={formData.name}
|
|
||||||
onChange={(e) => handleInputChange("name", e.target.value)}
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="slug"
|
|
||||||
label="Slug"
|
|
||||||
value={formData.slug}
|
|
||||||
onChange={(e) => handleInputChange("slug", e.target.value)}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="subHeading"
|
|
||||||
label="Subheading"
|
|
||||||
value={formData.subHeading}
|
|
||||||
onChange={(e) => handleInputChange("subHeading", e.target.value)}
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="description"
|
|
||||||
label="Description"
|
|
||||||
type="textarea"
|
|
||||||
value={formData.description}
|
|
||||||
onChange={(e) => handleInputChange("description", e.target.value)}
|
|
||||||
rows={4}
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Select
|
|
||||||
id="status"
|
|
||||||
label="Status"
|
|
||||||
value={formData.status}
|
|
||||||
onValueChange={handleStatusChange}
|
|
||||||
options={STATUS_OPTIONS}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="categories"
|
|
||||||
label="Categories (comma-separated)"
|
|
||||||
value={formData.categories}
|
|
||||||
onChange={(e) => handleInputChange("categories", e.target.value)}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="imageUrls"
|
|
||||||
label="Image URLs (comma-separated)"
|
|
||||||
value={formData.imageUrls}
|
|
||||||
onChange={(e) => handleInputChange("imageUrls", e.target.value)}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="videoUrl"
|
|
||||||
label="Video URL"
|
|
||||||
value={formData.videoUrl}
|
|
||||||
onChange={(e) => handleInputChange("videoUrl", e.target.value)}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="agentOutputDemoUrl"
|
|
||||||
label="Output Demo URL"
|
|
||||||
value={formData.agentOutputDemoUrl}
|
|
||||||
onChange={(e) =>
|
|
||||||
handleInputChange("agentOutputDemoUrl", e.target.value)
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Input
|
|
||||||
id="storeListingId"
|
|
||||||
label="Store Listing ID (for linking)"
|
|
||||||
value={formData.storeListingId}
|
|
||||||
onChange={(e) =>
|
|
||||||
handleInputChange("storeListingId", e.target.value)
|
|
||||||
}
|
|
||||||
placeholder="Leave empty if not linked"
|
|
||||||
/>
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button type="button" variant="secondary" onClick={onClose}>
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button type="submit" loading={updateWaitlistMutation.isPending}>
|
|
||||||
Save Changes
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</form>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,156 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { User, Envelope, DownloadSimple } from "@phosphor-icons/react";
|
|
||||||
import { useGetV2GetWaitlistSignups } from "@/app/api/__generated__/endpoints/admin/admin";
|
|
||||||
|
|
||||||
type WaitlistSignupsDialogProps = {
|
|
||||||
waitlistId: string;
|
|
||||||
onClose: () => void;
|
|
||||||
};
|
|
||||||
|
|
||||||
export function WaitlistSignupsDialog({
|
|
||||||
waitlistId,
|
|
||||||
onClose,
|
|
||||||
}: WaitlistSignupsDialogProps) {
|
|
||||||
const {
|
|
||||||
data: signupsResponse,
|
|
||||||
isLoading,
|
|
||||||
isError,
|
|
||||||
} = useGetV2GetWaitlistSignups(waitlistId);
|
|
||||||
|
|
||||||
const signups = signupsResponse?.status === 200 ? signupsResponse.data : null;
|
|
||||||
|
|
||||||
function exportToCSV() {
|
|
||||||
if (!signups) return;
|
|
||||||
|
|
||||||
const headers = ["Type", "Email", "User ID", "Username"];
|
|
||||||
const rows = signups.signups.map((signup) => [
|
|
||||||
signup.type,
|
|
||||||
signup.email || "",
|
|
||||||
signup.userId || "",
|
|
||||||
signup.username || "",
|
|
||||||
]);
|
|
||||||
|
|
||||||
const escapeCell = (cell: string) => `"${cell.replace(/"/g, '""')}"`;
|
|
||||||
|
|
||||||
const csvContent = [
|
|
||||||
headers.join(","),
|
|
||||||
...rows.map((row) => row.map(escapeCell).join(",")),
|
|
||||||
].join("\n");
|
|
||||||
|
|
||||||
const blob = new Blob([csvContent], { type: "text/csv" });
|
|
||||||
const url = window.URL.createObjectURL(blob);
|
|
||||||
const a = document.createElement("a");
|
|
||||||
a.href = url;
|
|
||||||
a.download = `waitlist-${waitlistId}-signups.csv`;
|
|
||||||
a.click();
|
|
||||||
window.URL.revokeObjectURL(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
function renderContent() {
|
|
||||||
if (isLoading) {
|
|
||||||
return <div className="py-10 text-center">Loading signups...</div>;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isError) {
|
|
||||||
return (
|
|
||||||
<div className="py-10 text-center text-red-500">
|
|
||||||
Failed to load signups. Please try again.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!signups || signups.signups.length === 0) {
|
|
||||||
return (
|
|
||||||
<div className="py-10 text-center text-gray-500">
|
|
||||||
No signups yet for this waitlist.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<div className="flex justify-end">
|
|
||||||
<Button variant="secondary" size="small" onClick={exportToCSV}>
|
|
||||||
<DownloadSimple className="mr-2 h-4 w-4" size={16} />
|
|
||||||
Export CSV
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
<div className="max-h-[400px] overflow-y-auto rounded-md border">
|
|
||||||
<table className="w-full">
|
|
||||||
<thead className="bg-gray-50 dark:bg-gray-800">
|
|
||||||
<tr>
|
|
||||||
<th className="px-4 py-3 text-left text-sm font-medium">
|
|
||||||
Type
|
|
||||||
</th>
|
|
||||||
<th className="px-4 py-3 text-left text-sm font-medium">
|
|
||||||
Email / Username
|
|
||||||
</th>
|
|
||||||
<th className="px-4 py-3 text-left text-sm font-medium">
|
|
||||||
User ID
|
|
||||||
</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody className="divide-y">
|
|
||||||
{signups.signups.map((signup, index) => (
|
|
||||||
<tr key={index}>
|
|
||||||
<td className="px-4 py-3">
|
|
||||||
{signup.type === "user" ? (
|
|
||||||
<span className="flex items-center gap-1 text-blue-600">
|
|
||||||
<User className="h-4 w-4" size={16} /> User
|
|
||||||
</span>
|
|
||||||
) : (
|
|
||||||
<span className="flex items-center gap-1 text-gray-600">
|
|
||||||
<Envelope className="h-4 w-4" size={16} /> Email
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
</td>
|
|
||||||
<td className="px-4 py-3">
|
|
||||||
{signup.type === "user"
|
|
||||||
? signup.username || signup.email
|
|
||||||
: signup.email}
|
|
||||||
</td>
|
|
||||||
<td className="px-4 py-3 font-mono text-sm">
|
|
||||||
{signup.userId || "-"}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
))}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Waitlist Signups"
|
|
||||||
controlled={{
|
|
||||||
isOpen: true,
|
|
||||||
set: async (open) => {
|
|
||||||
if (!open) onClose();
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
onClose={onClose}
|
|
||||||
styling={{ maxWidth: "700px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Content>
|
|
||||||
<p className="mb-4 text-sm text-zinc-500">
|
|
||||||
{signups
|
|
||||||
? `${signups.totalCount} total signups`
|
|
||||||
: "Loading signups..."}
|
|
||||||
</p>
|
|
||||||
|
|
||||||
{renderContent()}
|
|
||||||
|
|
||||||
<Dialog.Footer>
|
|
||||||
<Button variant="secondary" onClick={onClose}>
|
|
||||||
Close
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,206 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import { useQueryClient } from "@tanstack/react-query";
|
|
||||||
import {
|
|
||||||
Table,
|
|
||||||
TableBody,
|
|
||||||
TableCell,
|
|
||||||
TableHead,
|
|
||||||
TableHeader,
|
|
||||||
TableRow,
|
|
||||||
} from "@/components/__legacy__/ui/table";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import {
|
|
||||||
useGetV2ListAllWaitlists,
|
|
||||||
useDeleteV2DeleteWaitlist,
|
|
||||||
getGetV2ListAllWaitlistsQueryKey,
|
|
||||||
} from "@/app/api/__generated__/endpoints/admin/admin";
|
|
||||||
import type { WaitlistAdminResponse } from "@/app/api/__generated__/models/waitlistAdminResponse";
|
|
||||||
import { EditWaitlistDialog } from "./EditWaitlistDialog";
|
|
||||||
import { WaitlistSignupsDialog } from "./WaitlistSignupsDialog";
|
|
||||||
import { Trash, PencilSimple, Users, Link } from "@phosphor-icons/react";
|
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
|
||||||
|
|
||||||
export function WaitlistTable() {
|
|
||||||
const [editingWaitlist, setEditingWaitlist] =
|
|
||||||
useState<WaitlistAdminResponse | null>(null);
|
|
||||||
const [viewingSignups, setViewingSignups] = useState<string | null>(null);
|
|
||||||
const { toast } = useToast();
|
|
||||||
const queryClient = useQueryClient();
|
|
||||||
|
|
||||||
const { data: response, isLoading, error } = useGetV2ListAllWaitlists();
|
|
||||||
|
|
||||||
const deleteWaitlistMutation = useDeleteV2DeleteWaitlist({
|
|
||||||
mutation: {
|
|
||||||
onSuccess: () => {
|
|
||||||
toast({
|
|
||||||
title: "Success",
|
|
||||||
description: "Waitlist deleted successfully",
|
|
||||||
});
|
|
||||||
queryClient.invalidateQueries({
|
|
||||||
queryKey: getGetV2ListAllWaitlistsQueryKey(),
|
|
||||||
});
|
|
||||||
},
|
|
||||||
onError: (error) => {
|
|
||||||
console.error("Error deleting waitlist:", error);
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to delete waitlist",
|
|
||||||
});
|
|
||||||
},
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
function handleDelete(waitlistId: string) {
|
|
||||||
if (!confirm("Are you sure you want to delete this waitlist?")) return;
|
|
||||||
deleteWaitlistMutation.mutate({ waitlistId });
|
|
||||||
}
|
|
||||||
|
|
||||||
function handleWaitlistSaved() {
|
|
||||||
setEditingWaitlist(null);
|
|
||||||
queryClient.invalidateQueries({
|
|
||||||
queryKey: getGetV2ListAllWaitlistsQueryKey(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function formatStatus(status: string) {
|
|
||||||
const statusColors: Record<string, string> = {
|
|
||||||
NOT_STARTED: "bg-gray-100 text-gray-800",
|
|
||||||
WORK_IN_PROGRESS: "bg-blue-100 text-blue-800",
|
|
||||||
DONE: "bg-green-100 text-green-800",
|
|
||||||
CANCELED: "bg-red-100 text-red-800",
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
|
||||||
<span
|
|
||||||
className={`rounded-full px-2 py-1 text-xs font-medium ${statusColors[status] || "bg-gray-100 text-gray-700"}`}
|
|
||||||
>
|
|
||||||
{status.replace(/_/g, " ")}
|
|
||||||
</span>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function formatDate(dateStr: string) {
|
|
||||||
if (!dateStr) return "-";
|
|
||||||
return new Intl.DateTimeFormat("en-US", {
|
|
||||||
month: "short",
|
|
||||||
day: "numeric",
|
|
||||||
year: "numeric",
|
|
||||||
}).format(new Date(dateStr));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isLoading) {
|
|
||||||
return <div className="py-10 text-center">Loading waitlists...</div>;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
return (
|
|
||||||
<div className="py-10 text-center text-red-500">
|
|
||||||
Error loading waitlists. Please try again.
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const waitlists = response?.status === 200 ? response.data.waitlists : [];
|
|
||||||
|
|
||||||
if (waitlists.length === 0) {
|
|
||||||
return (
|
|
||||||
<div className="py-10 text-center text-gray-500">
|
|
||||||
No waitlists found. Create one to get started!
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<div className="rounded-md border bg-white">
|
|
||||||
<Table>
|
|
||||||
<TableHeader className="bg-gray-50">
|
|
||||||
<TableRow>
|
|
||||||
<TableHead className="font-medium">Name</TableHead>
|
|
||||||
<TableHead className="font-medium">Status</TableHead>
|
|
||||||
<TableHead className="font-medium">Signups</TableHead>
|
|
||||||
<TableHead className="font-medium">Votes</TableHead>
|
|
||||||
<TableHead className="font-medium">Created</TableHead>
|
|
||||||
<TableHead className="font-medium">Linked Agent</TableHead>
|
|
||||||
<TableHead className="font-medium">Actions</TableHead>
|
|
||||||
</TableRow>
|
|
||||||
</TableHeader>
|
|
||||||
<TableBody>
|
|
||||||
{waitlists.map((waitlist) => (
|
|
||||||
<TableRow key={waitlist.id}>
|
|
||||||
<TableCell>
|
|
||||||
<div>
|
|
||||||
<div className="font-medium">{waitlist.name}</div>
|
|
||||||
<div className="text-sm text-gray-500">
|
|
||||||
{waitlist.subHeading}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>{formatStatus(waitlist.status)}</TableCell>
|
|
||||||
<TableCell>{waitlist.signupCount}</TableCell>
|
|
||||||
<TableCell>{waitlist.votes}</TableCell>
|
|
||||||
<TableCell>{formatDate(waitlist.createdAt)}</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
{waitlist.storeListingId ? (
|
|
||||||
<span className="text-green-600">
|
|
||||||
<Link size={16} className="inline" /> Linked
|
|
||||||
</span>
|
|
||||||
) : (
|
|
||||||
<span className="text-gray-400">Not linked</span>
|
|
||||||
)}
|
|
||||||
</TableCell>
|
|
||||||
<TableCell>
|
|
||||||
<div className="flex gap-2">
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => setViewingSignups(waitlist.id)}
|
|
||||||
title="View signups"
|
|
||||||
>
|
|
||||||
<Users size={16} />
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => setEditingWaitlist(waitlist)}
|
|
||||||
title="Edit"
|
|
||||||
>
|
|
||||||
<PencilSimple size={16} />
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="small"
|
|
||||||
onClick={() => handleDelete(waitlist.id)}
|
|
||||||
title="Delete"
|
|
||||||
disabled={deleteWaitlistMutation.isPending}
|
|
||||||
>
|
|
||||||
<Trash size={16} className="text-red-500" />
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
))}
|
|
||||||
</TableBody>
|
|
||||||
</Table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{editingWaitlist && (
|
|
||||||
<EditWaitlistDialog
|
|
||||||
waitlist={editingWaitlist}
|
|
||||||
onClose={() => setEditingWaitlist(null)}
|
|
||||||
onSave={handleWaitlistSaved}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{viewingSignups && (
|
|
||||||
<WaitlistSignupsDialog
|
|
||||||
waitlistId={viewingSignups}
|
|
||||||
onClose={() => setViewingSignups(null)}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
import { withRoleAccess } from "@/lib/withRoleAccess";
|
|
||||||
import { Suspense } from "react";
|
|
||||||
import { WaitlistTable } from "./components/WaitlistTable";
|
|
||||||
import { CreateWaitlistButton } from "./components/CreateWaitlistButton";
|
|
||||||
import { Warning } from "@phosphor-icons/react/dist/ssr";
|
|
||||||
|
|
||||||
function WaitlistDashboard() {
|
|
||||||
return (
|
|
||||||
<div className="mx-auto p-6">
|
|
||||||
<div className="flex flex-col gap-4">
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<div>
|
|
||||||
<h1 className="text-3xl font-bold">Waitlist Management</h1>
|
|
||||||
<p className="text-gray-500">
|
|
||||||
Manage upcoming agent waitlists and track signups
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<CreateWaitlistButton />
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="flex items-start gap-3 rounded-lg border border-amber-300 bg-amber-50 p-4 dark:border-amber-700 dark:bg-amber-950">
|
|
||||||
<Warning
|
|
||||||
className="mt-0.5 h-5 w-5 flex-shrink-0 text-amber-600 dark:text-amber-400"
|
|
||||||
weight="fill"
|
|
||||||
/>
|
|
||||||
<div className="text-sm text-amber-800 dark:text-amber-200">
|
|
||||||
<p className="font-medium">TODO: Email-only signup notifications</p>
|
|
||||||
<p className="mt-1 text-amber-700 dark:text-amber-300">
|
|
||||||
Notifications for email-only signups (users who weren't
|
|
||||||
logged in) have not been implemented yet. Currently only
|
|
||||||
registered users will receive launch emails.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<Suspense
|
|
||||||
fallback={
|
|
||||||
<div className="py-10 text-center">Loading waitlists...</div>
|
|
||||||
}
|
|
||||||
>
|
|
||||||
<WaitlistTable />
|
|
||||||
</Suspense>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
export default async function WaitlistDashboardPage() {
|
|
||||||
const withAdminAccess = await withRoleAccess(["admin"]);
|
|
||||||
const ProtectedWaitlistDashboard = await withAdminAccess(WaitlistDashboard);
|
|
||||||
return <ProtectedWaitlistDashboard />;
|
|
||||||
}
|
|
||||||
@@ -30,6 +30,8 @@ import {
|
|||||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||||
import jaro from "jaro-winkler";
|
import jaro from "jaro-winkler";
|
||||||
|
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||||
|
import { okData } from "@/app/api/helpers";
|
||||||
|
|
||||||
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
|
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
|
||||||
uiKey?: string;
|
uiKey?: string;
|
||||||
@@ -107,6 +109,8 @@ export function BlocksControl({
|
|||||||
.filter((b) => b.uiType !== BlockUIType.AGENT)
|
.filter((b) => b.uiType !== BlockUIType.AGENT)
|
||||||
.sort((a, b) => a.name.localeCompare(b.name));
|
.sort((a, b) => a.name.localeCompare(b.name));
|
||||||
|
|
||||||
|
// Agent blocks are created from GraphMeta which doesn't include schemas.
|
||||||
|
// Schemas will be fetched on-demand when the block is actually added.
|
||||||
const agentBlockList = flows
|
const agentBlockList = flows
|
||||||
.map((flow): _Block => {
|
.map((flow): _Block => {
|
||||||
return {
|
return {
|
||||||
@@ -116,8 +120,9 @@ export function BlocksControl({
|
|||||||
`Ver.${flow.version}` +
|
`Ver.${flow.version}` +
|
||||||
(flow.description ? ` | ${flow.description}` : ""),
|
(flow.description ? ` | ${flow.description}` : ""),
|
||||||
categories: [{ category: "AGENT", description: "" }],
|
categories: [{ category: "AGENT", description: "" }],
|
||||||
inputSchema: flow.input_schema,
|
// Empty schemas - will be populated when block is added
|
||||||
outputSchema: flow.output_schema,
|
inputSchema: { type: "object", properties: {} },
|
||||||
|
outputSchema: { type: "object", properties: {} },
|
||||||
staticOutput: false,
|
staticOutput: false,
|
||||||
uiType: BlockUIType.AGENT,
|
uiType: BlockUIType.AGENT,
|
||||||
costs: [],
|
costs: [],
|
||||||
@@ -125,8 +130,7 @@ export function BlocksControl({
|
|||||||
hardcodedValues: {
|
hardcodedValues: {
|
||||||
graph_id: flow.id,
|
graph_id: flow.id,
|
||||||
graph_version: flow.version,
|
graph_version: flow.version,
|
||||||
input_schema: flow.input_schema,
|
// Schemas will be fetched on-demand when block is added
|
||||||
output_schema: flow.output_schema,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
})
|
})
|
||||||
@@ -182,6 +186,37 @@ export function BlocksControl({
|
|||||||
setSelectedCategory(null);
|
setSelectedCategory(null);
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
|
// Handler to add a block, fetching graph data on-demand for agent blocks
|
||||||
|
const handleAddBlock = useCallback(
|
||||||
|
async (block: _Block & { notAvailable: string | null }) => {
|
||||||
|
if (block.notAvailable) return;
|
||||||
|
|
||||||
|
// For agent blocks, fetch the full graph to get schemas
|
||||||
|
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
|
||||||
|
const graphID = block.hardcodedValues.graph_id as string;
|
||||||
|
const graphVersion = block.hardcodedValues.graph_version as number;
|
||||||
|
const graphData = okData(
|
||||||
|
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||||
|
);
|
||||||
|
|
||||||
|
if (graphData) {
|
||||||
|
addBlock(block.id, block.name, {
|
||||||
|
...block.hardcodedValues,
|
||||||
|
input_schema: graphData.input_schema,
|
||||||
|
output_schema: graphData.output_schema,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Fallback: add without schemas (will be incomplete)
|
||||||
|
console.error("Failed to fetch graph data for agent block");
|
||||||
|
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
addBlock(block.id, block.name, block.hardcodedValues || {});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[addBlock],
|
||||||
|
);
|
||||||
|
|
||||||
// Extract unique categories from blocks
|
// Extract unique categories from blocks
|
||||||
const categories = useMemo(() => {
|
const categories = useMemo(() => {
|
||||||
return Array.from(
|
return Array.from(
|
||||||
@@ -303,10 +338,7 @@ export function BlocksControl({
|
|||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
}}
|
}}
|
||||||
onClick={() =>
|
onClick={() => handleAddBlock(block)}
|
||||||
!block.notAvailable &&
|
|
||||||
addBlock(block.id, block.name, block?.hardcodedValues || {})
|
|
||||||
}
|
|
||||||
title={block.notAvailable ?? undefined}
|
title={block.notAvailable ?? undefined}
|
||||||
>
|
>
|
||||||
<div
|
<div
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { beautifyString } from "@/lib/utils";
|
import { beautifyString } from "@/lib/utils";
|
||||||
import { Clipboard, Maximize2 } from "lucide-react";
|
import { Clipboard, Maximize2 } from "lucide-react";
|
||||||
import React, { useState } from "react";
|
import React, { useMemo, useState } from "react";
|
||||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||||
import {
|
import {
|
||||||
@@ -11,6 +11,12 @@ import {
|
|||||||
TableHeader,
|
TableHeader,
|
||||||
TableRow,
|
TableRow,
|
||||||
} from "../../../../../components/__legacy__/ui/table";
|
} from "../../../../../components/__legacy__/ui/table";
|
||||||
|
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||||
|
import {
|
||||||
|
globalRegistry,
|
||||||
|
OutputItem,
|
||||||
|
} from "@/components/contextual/OutputRenderers";
|
||||||
|
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||||
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
|
||||||
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
import ExpandableOutputDialog from "./ExpandableOutputDialog";
|
||||||
|
|
||||||
@@ -26,6 +32,9 @@ export default function DataTable({
|
|||||||
data,
|
data,
|
||||||
}: DataTableProps) {
|
}: DataTableProps) {
|
||||||
const { toast } = useToast();
|
const { toast } = useToast();
|
||||||
|
const enableEnhancedOutputHandling = useGetFlag(
|
||||||
|
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||||
|
);
|
||||||
const [expandedDialog, setExpandedDialog] = useState<{
|
const [expandedDialog, setExpandedDialog] = useState<{
|
||||||
isOpen: boolean;
|
isOpen: boolean;
|
||||||
execId: string;
|
execId: string;
|
||||||
@@ -33,6 +42,15 @@ export default function DataTable({
|
|||||||
data: any[];
|
data: any[];
|
||||||
} | null>(null);
|
} | null>(null);
|
||||||
|
|
||||||
|
// Prepare renderers for each item when enhanced mode is enabled
|
||||||
|
const getItemRenderer = useMemo(() => {
|
||||||
|
if (!enableEnhancedOutputHandling) return null;
|
||||||
|
return (item: unknown) => {
|
||||||
|
const metadata: OutputMetadata = {};
|
||||||
|
return globalRegistry.getRenderer(item, metadata);
|
||||||
|
};
|
||||||
|
}, [enableEnhancedOutputHandling]);
|
||||||
|
|
||||||
const copyData = (pin: string, data: string) => {
|
const copyData = (pin: string, data: string) => {
|
||||||
navigator.clipboard.writeText(data).then(() => {
|
navigator.clipboard.writeText(data).then(() => {
|
||||||
toast({
|
toast({
|
||||||
@@ -102,7 +120,22 @@ export default function DataTable({
|
|||||||
<Clipboard size={18} />
|
<Clipboard size={18} />
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
{value.map((item, index) => (
|
{value.map((item, index) => {
|
||||||
|
const renderer = getItemRenderer?.(item);
|
||||||
|
if (enableEnhancedOutputHandling && renderer) {
|
||||||
|
const metadata: OutputMetadata = {};
|
||||||
|
return (
|
||||||
|
<React.Fragment key={index}>
|
||||||
|
<OutputItem
|
||||||
|
value={item}
|
||||||
|
metadata={metadata}
|
||||||
|
renderer={renderer}
|
||||||
|
/>
|
||||||
|
{index < value.length - 1 && ", "}
|
||||||
|
</React.Fragment>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return (
|
||||||
<React.Fragment key={index}>
|
<React.Fragment key={index}>
|
||||||
<ContentRenderer
|
<ContentRenderer
|
||||||
value={item}
|
value={item}
|
||||||
@@ -110,7 +143,8 @@ export default function DataTable({
|
|||||||
/>
|
/>
|
||||||
{index < value.length - 1 && ", "}
|
{index < value.length - 1 && ", "}
|
||||||
</React.Fragment>
|
</React.Fragment>
|
||||||
))}
|
);
|
||||||
|
})}
|
||||||
</div>
|
</div>
|
||||||
</TableCell>
|
</TableCell>
|
||||||
</TableRow>
|
</TableRow>
|
||||||
|
|||||||
@@ -29,13 +29,17 @@ import "@xyflow/react/dist/style.css";
|
|||||||
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
|
import { ConnectedEdge, CustomNode } from "../CustomNode/CustomNode";
|
||||||
import "./flow.css";
|
import "./flow.css";
|
||||||
import {
|
import {
|
||||||
|
BlockIORootSchema,
|
||||||
BlockUIType,
|
BlockUIType,
|
||||||
formatEdgeID,
|
formatEdgeID,
|
||||||
GraphExecutionID,
|
GraphExecutionID,
|
||||||
GraphID,
|
GraphID,
|
||||||
GraphMeta,
|
GraphMeta,
|
||||||
LibraryAgent,
|
LibraryAgent,
|
||||||
|
SpecialBlockID,
|
||||||
} from "@/lib/autogpt-server-api";
|
} from "@/lib/autogpt-server-api";
|
||||||
|
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||||
|
import { okData } from "@/app/api/helpers";
|
||||||
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
|
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
|
||||||
import { Key, storage } from "@/services/storage/local-storage";
|
import { Key, storage } from "@/services/storage/local-storage";
|
||||||
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
|
import { findNewlyAddedBlockCoordinates, getTypeColor } from "@/lib/utils";
|
||||||
@@ -687,8 +691,94 @@ const FlowEditor: React.FC<{
|
|||||||
[getNode, updateNode, nodes],
|
[getNode, updateNode, nodes],
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/* Shared helper to create and add a node */
|
||||||
|
const createAndAddNode = useCallback(
|
||||||
|
async (
|
||||||
|
blockID: string,
|
||||||
|
blockName: string,
|
||||||
|
hardcodedValues: Record<string, any>,
|
||||||
|
position: { x: number; y: number },
|
||||||
|
): Promise<CustomNode | null> => {
|
||||||
|
const nodeSchema = availableBlocks.find((node) => node.id === blockID);
|
||||||
|
if (!nodeSchema) {
|
||||||
|
console.error(`Schema not found for block ID: ${blockID}`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For agent blocks, fetch the full graph to get schemas
|
||||||
|
let inputSchema: BlockIORootSchema = nodeSchema.inputSchema;
|
||||||
|
let outputSchema: BlockIORootSchema = nodeSchema.outputSchema;
|
||||||
|
let finalHardcodedValues = hardcodedValues;
|
||||||
|
|
||||||
|
if (blockID === SpecialBlockID.AGENT) {
|
||||||
|
const graphID = hardcodedValues.graph_id as string;
|
||||||
|
const graphVersion = hardcodedValues.graph_version as number;
|
||||||
|
const graphData = okData(
|
||||||
|
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
|
||||||
|
);
|
||||||
|
|
||||||
|
if (graphData) {
|
||||||
|
inputSchema = graphData.input_schema as BlockIORootSchema;
|
||||||
|
outputSchema = graphData.output_schema as BlockIORootSchema;
|
||||||
|
finalHardcodedValues = {
|
||||||
|
...hardcodedValues,
|
||||||
|
input_schema: graphData.input_schema,
|
||||||
|
output_schema: graphData.output_schema,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
console.error("Failed to fetch graph data for agent block");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const newNode: CustomNode = {
|
||||||
|
id: nodeId.toString(),
|
||||||
|
type: "custom",
|
||||||
|
position,
|
||||||
|
data: {
|
||||||
|
blockType: blockName,
|
||||||
|
blockCosts: nodeSchema.costs || [],
|
||||||
|
title: `${blockName} ${nodeId}`,
|
||||||
|
description: nodeSchema.description,
|
||||||
|
categories: nodeSchema.categories,
|
||||||
|
inputSchema: inputSchema,
|
||||||
|
outputSchema: outputSchema,
|
||||||
|
hardcodedValues: finalHardcodedValues,
|
||||||
|
connections: [],
|
||||||
|
isOutputOpen: false,
|
||||||
|
block_id: blockID,
|
||||||
|
isOutputStatic: nodeSchema.staticOutput,
|
||||||
|
uiType: nodeSchema.uiType,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
addNodes(newNode);
|
||||||
|
setNodeId((prevId) => prevId + 1);
|
||||||
|
clearNodesStatusAndOutput();
|
||||||
|
|
||||||
|
history.push({
|
||||||
|
type: "ADD_NODE",
|
||||||
|
payload: { node: { ...newNode, ...newNode.data } },
|
||||||
|
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
|
||||||
|
redo: () => addNodes(newNode),
|
||||||
|
});
|
||||||
|
|
||||||
|
return newNode;
|
||||||
|
},
|
||||||
|
[
|
||||||
|
availableBlocks,
|
||||||
|
nodeId,
|
||||||
|
addNodes,
|
||||||
|
deleteElements,
|
||||||
|
clearNodesStatusAndOutput,
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
const addNode = useCallback(
|
const addNode = useCallback(
|
||||||
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
|
async (
|
||||||
|
blockId: string,
|
||||||
|
nodeType: string,
|
||||||
|
hardcodedValues: Record<string, any> = {},
|
||||||
|
) => {
|
||||||
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
||||||
if (!nodeSchema) {
|
if (!nodeSchema) {
|
||||||
console.error(`Schema not found for block ID: ${blockId}`);
|
console.error(`Schema not found for block ID: ${blockId}`);
|
||||||
@@ -707,73 +797,42 @@ const FlowEditor: React.FC<{
|
|||||||
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
|
// Alternative: We could also use D3 force, Intersection for this (React flow Pro examples)
|
||||||
|
|
||||||
const { x, y } = getViewport();
|
const { x, y } = getViewport();
|
||||||
const viewportCoordinates =
|
const position =
|
||||||
nodeDimensions && Object.keys(nodeDimensions).length > 0
|
nodeDimensions && Object.keys(nodeDimensions).length > 0
|
||||||
? // we will get all the dimension of nodes, then store
|
? findNewlyAddedBlockCoordinates(
|
||||||
findNewlyAddedBlockCoordinates(
|
|
||||||
nodeDimensions,
|
nodeDimensions,
|
||||||
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
|
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
|
||||||
60,
|
60,
|
||||||
1.0,
|
1.0,
|
||||||
)
|
)
|
||||||
: // we will get all the dimension of nodes, then store
|
: {
|
||||||
{
|
|
||||||
x: window.innerWidth / 2 - x,
|
x: window.innerWidth / 2 - x,
|
||||||
y: window.innerHeight / 2 - y,
|
y: window.innerHeight / 2 - y,
|
||||||
};
|
};
|
||||||
|
|
||||||
const newNode: CustomNode = {
|
const newNode = await createAndAddNode(
|
||||||
id: nodeId.toString(),
|
blockId,
|
||||||
type: "custom",
|
nodeType,
|
||||||
position: viewportCoordinates, // Set the position to the calculated viewport center
|
hardcodedValues,
|
||||||
data: {
|
position,
|
||||||
blockType: nodeType,
|
);
|
||||||
blockCosts: nodeSchema.costs,
|
if (!newNode) return;
|
||||||
title: `${nodeType} ${nodeId}`,
|
|
||||||
description: nodeSchema.description,
|
|
||||||
categories: nodeSchema.categories,
|
|
||||||
inputSchema: nodeSchema.inputSchema,
|
|
||||||
outputSchema: nodeSchema.outputSchema,
|
|
||||||
hardcodedValues: hardcodedValues,
|
|
||||||
connections: [],
|
|
||||||
isOutputOpen: false,
|
|
||||||
block_id: blockId,
|
|
||||||
isOutputStatic: nodeSchema.staticOutput,
|
|
||||||
uiType: nodeSchema.uiType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
addNodes(newNode);
|
|
||||||
setNodeId((prevId) => prevId + 1);
|
|
||||||
clearNodesStatusAndOutput(); // Clear status and output when a new node is added
|
|
||||||
|
|
||||||
setViewport(
|
setViewport(
|
||||||
{
|
{
|
||||||
// Rough estimate of the dimension of the node is: 500x400px.
|
x: -position.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
||||||
// Though we skip shifting the X, considering the block menu side-bar.
|
y: -position.y * 0.8 + (window.innerHeight - 400) / 2,
|
||||||
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
|
||||||
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
|
|
||||||
zoom: 0.8,
|
zoom: 0.8,
|
||||||
},
|
},
|
||||||
{ duration: 500 },
|
{ duration: 500 },
|
||||||
);
|
);
|
||||||
|
|
||||||
history.push({
|
|
||||||
type: "ADD_NODE",
|
|
||||||
payload: { node: { ...newNode, ...newNode.data } },
|
|
||||||
undo: () => deleteElements({ nodes: [{ id: newNode.id }] }),
|
|
||||||
redo: () => addNodes(newNode),
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
[
|
[
|
||||||
nodeId,
|
|
||||||
getViewport,
|
getViewport,
|
||||||
setViewport,
|
setViewport,
|
||||||
availableBlocks,
|
availableBlocks,
|
||||||
addNodes,
|
|
||||||
nodeDimensions,
|
nodeDimensions,
|
||||||
deleteElements,
|
createAndAddNode,
|
||||||
clearNodesStatusAndOutput,
|
|
||||||
],
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -920,7 +979,7 @@ const FlowEditor: React.FC<{
|
|||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
const onDrop = useCallback(
|
const onDrop = useCallback(
|
||||||
(event: React.DragEvent) => {
|
async (event: React.DragEvent) => {
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
|
|
||||||
const blockData = event.dataTransfer.getData("application/reactflow");
|
const blockData = event.dataTransfer.getData("application/reactflow");
|
||||||
@@ -935,62 +994,17 @@ const FlowEditor: React.FC<{
|
|||||||
y: event.clientY,
|
y: event.clientY,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Find the block schema
|
await createAndAddNode(
|
||||||
const nodeSchema = availableBlocks.find((node) => node.id === blockId);
|
blockId,
|
||||||
if (!nodeSchema) {
|
blockName,
|
||||||
console.error(`Schema not found for block ID: ${blockId}`);
|
hardcodedValues || {},
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the new node at the drop position
|
|
||||||
const newNode: CustomNode = {
|
|
||||||
id: nodeId.toString(),
|
|
||||||
type: "custom",
|
|
||||||
position,
|
position,
|
||||||
data: {
|
);
|
||||||
blockType: blockName,
|
|
||||||
blockCosts: nodeSchema.costs || [],
|
|
||||||
title: `${blockName} ${nodeId}`,
|
|
||||||
description: nodeSchema.description,
|
|
||||||
categories: nodeSchema.categories,
|
|
||||||
inputSchema: nodeSchema.inputSchema,
|
|
||||||
outputSchema: nodeSchema.outputSchema,
|
|
||||||
hardcodedValues: hardcodedValues,
|
|
||||||
connections: [],
|
|
||||||
isOutputOpen: false,
|
|
||||||
block_id: blockId,
|
|
||||||
uiType: nodeSchema.uiType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
history.push({
|
|
||||||
type: "ADD_NODE",
|
|
||||||
payload: { node: { ...newNode, ...newNode.data } },
|
|
||||||
undo: () => {
|
|
||||||
deleteElements({ nodes: [{ id: newNode.id } as any], edges: [] });
|
|
||||||
},
|
|
||||||
redo: () => {
|
|
||||||
addNodes([newNode]);
|
|
||||||
},
|
|
||||||
});
|
|
||||||
addNodes([newNode]);
|
|
||||||
clearNodesStatusAndOutput();
|
|
||||||
|
|
||||||
setNodeId((prevId) => prevId + 1);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Failed to drop block:", error);
|
console.error("Failed to drop block:", error);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[
|
[screenToFlowPosition, createAndAddNode],
|
||||||
nodeId,
|
|
||||||
availableBlocks,
|
|
||||||
nodes,
|
|
||||||
edges,
|
|
||||||
addNodes,
|
|
||||||
screenToFlowPosition,
|
|
||||||
deleteElements,
|
|
||||||
clearNodesStatusAndOutput,
|
|
||||||
],
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const buildContextValue: BuilderContextType = useMemo(
|
const buildContextValue: BuilderContextType = useMemo(
|
||||||
|
|||||||
@@ -1,8 +1,14 @@
|
|||||||
import React, { useContext, useState } from "react";
|
import React, { useContext, useMemo, useState } from "react";
|
||||||
import { Button } from "@/components/__legacy__/ui/button";
|
import { Button } from "@/components/__legacy__/ui/button";
|
||||||
import { Maximize2 } from "lucide-react";
|
import { Maximize2 } from "lucide-react";
|
||||||
import * as Separator from "@radix-ui/react-separator";
|
import * as Separator from "@radix-ui/react-separator";
|
||||||
import { ContentRenderer } from "@/components/__legacy__/ui/render";
|
import { ContentRenderer } from "@/components/__legacy__/ui/render";
|
||||||
|
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
|
||||||
|
import {
|
||||||
|
globalRegistry,
|
||||||
|
OutputItem,
|
||||||
|
} from "@/components/contextual/OutputRenderers";
|
||||||
|
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||||
|
|
||||||
import { beautifyString } from "@/lib/utils";
|
import { beautifyString } from "@/lib/utils";
|
||||||
|
|
||||||
@@ -21,6 +27,9 @@ export default function NodeOutputs({
|
|||||||
data,
|
data,
|
||||||
}: NodeOutputsProps) {
|
}: NodeOutputsProps) {
|
||||||
const builderContext = useContext(BuilderContext);
|
const builderContext = useContext(BuilderContext);
|
||||||
|
const enableEnhancedOutputHandling = useGetFlag(
|
||||||
|
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
|
||||||
|
);
|
||||||
|
|
||||||
const [expandedDialog, setExpandedDialog] = useState<{
|
const [expandedDialog, setExpandedDialog] = useState<{
|
||||||
isOpen: boolean;
|
isOpen: boolean;
|
||||||
@@ -37,6 +46,15 @@ export default function NodeOutputs({
|
|||||||
|
|
||||||
const { getNodeTitle } = builderContext;
|
const { getNodeTitle } = builderContext;
|
||||||
|
|
||||||
|
// Prepare renderers for each item when enhanced mode is enabled
|
||||||
|
const getItemRenderer = useMemo(() => {
|
||||||
|
if (!enableEnhancedOutputHandling) return null;
|
||||||
|
return (item: unknown) => {
|
||||||
|
const metadata: OutputMetadata = {};
|
||||||
|
return globalRegistry.getRenderer(item, metadata);
|
||||||
|
};
|
||||||
|
}, [enableEnhancedOutputHandling]);
|
||||||
|
|
||||||
const getBeautifiedPinName = (pin: string) => {
|
const getBeautifiedPinName = (pin: string) => {
|
||||||
if (!pin.startsWith("tools_^_")) {
|
if (!pin.startsWith("tools_^_")) {
|
||||||
return beautifyString(pin);
|
return beautifyString(pin);
|
||||||
@@ -87,7 +105,22 @@ export default function NodeOutputs({
|
|||||||
<div className="mt-2">
|
<div className="mt-2">
|
||||||
<strong className="mr-2">Data:</strong>
|
<strong className="mr-2">Data:</strong>
|
||||||
<div className="mt-1">
|
<div className="mt-1">
|
||||||
{dataArray.slice(0, 10).map((item, index) => (
|
{dataArray.slice(0, 10).map((item, index) => {
|
||||||
|
const renderer = getItemRenderer?.(item);
|
||||||
|
if (enableEnhancedOutputHandling && renderer) {
|
||||||
|
const metadata: OutputMetadata = {};
|
||||||
|
return (
|
||||||
|
<React.Fragment key={index}>
|
||||||
|
<OutputItem
|
||||||
|
value={item}
|
||||||
|
metadata={metadata}
|
||||||
|
renderer={renderer}
|
||||||
|
/>
|
||||||
|
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||||
|
</React.Fragment>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return (
|
||||||
<React.Fragment key={index}>
|
<React.Fragment key={index}>
|
||||||
<ContentRenderer
|
<ContentRenderer
|
||||||
value={item}
|
value={item}
|
||||||
@@ -95,7 +128,8 @@ export default function NodeOutputs({
|
|||||||
/>
|
/>
|
||||||
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
{index < Math.min(dataArray.length, 10) - 1 && ", "}
|
||||||
</React.Fragment>
|
</React.Fragment>
|
||||||
))}
|
);
|
||||||
|
})}
|
||||||
{dataArray.length > 10 && (
|
{dataArray.length > 10 && (
|
||||||
<span style={{ color: "#888" }}>
|
<span style={{ color: "#888" }}>
|
||||||
<br />
|
<br />
|
||||||
|
|||||||
@@ -4,13 +4,13 @@ import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/componen
|
|||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||||
import type {
|
import type {
|
||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
GraphMeta,
|
Graph,
|
||||||
} from "@/lib/autogpt-server-api/types";
|
} from "@/lib/autogpt-server-api/types";
|
||||||
|
|
||||||
interface RunInputDialogProps {
|
interface RunInputDialogProps {
|
||||||
isOpen: boolean;
|
isOpen: boolean;
|
||||||
doClose: () => void;
|
doClose: () => void;
|
||||||
graph: GraphMeta;
|
graph: Graph;
|
||||||
doRun?: (
|
doRun?: (
|
||||||
inputs: Record<string, any>,
|
inputs: Record<string, any>,
|
||||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||||
|
|||||||
@@ -9,13 +9,13 @@ import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder
|
|||||||
import {
|
import {
|
||||||
BlockUIType,
|
BlockUIType,
|
||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
GraphMeta,
|
Graph,
|
||||||
} from "@/lib/autogpt-server-api/types";
|
} from "@/lib/autogpt-server-api/types";
|
||||||
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
|
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
|
||||||
import { RunnerInputDialog } from "./RunnerInputUI";
|
import { RunnerInputDialog } from "./RunnerInputUI";
|
||||||
|
|
||||||
interface RunnerUIWrapperProps {
|
interface RunnerUIWrapperProps {
|
||||||
graph: GraphMeta;
|
graph: Graph;
|
||||||
nodes: Node<CustomNodeData>[];
|
nodes: Node<CustomNodeData>[];
|
||||||
graphExecutionError?: string | null;
|
graphExecutionError?: string | null;
|
||||||
saveAndRun: (
|
saveAndRun: (
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { GraphInputSchema } from "@/lib/autogpt-server-api";
|
import { GraphInputSchema } from "@/lib/autogpt-server-api";
|
||||||
import { GraphMetaLike, IncompatibilityInfo } from "./types";
|
import { GraphLike, IncompatibilityInfo } from "./types";
|
||||||
|
|
||||||
// Helper type for schema properties - the generated types are too loose
|
// Helper type for schema properties - the generated types are too loose
|
||||||
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
|
type SchemaProperties = Record<string, GraphInputSchema["properties"][string]>;
|
||||||
@@ -36,7 +36,7 @@ export function getSchemaRequired(schema: unknown): SchemaRequired {
|
|||||||
*/
|
*/
|
||||||
export function createUpdatedAgentNodeInputs(
|
export function createUpdatedAgentNodeInputs(
|
||||||
currentInputs: Record<string, unknown>,
|
currentInputs: Record<string, unknown>,
|
||||||
latestSubGraphVersion: GraphMetaLike,
|
latestSubGraphVersion: GraphLike,
|
||||||
): Record<string, unknown> {
|
): Record<string, unknown> {
|
||||||
return {
|
return {
|
||||||
...currentInputs,
|
...currentInputs,
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
import type { GraphMeta as LegacyGraphMeta } from "@/lib/autogpt-server-api";
|
import type {
|
||||||
|
Graph as LegacyGraph,
|
||||||
|
GraphMeta as LegacyGraphMeta,
|
||||||
|
} from "@/lib/autogpt-server-api";
|
||||||
|
import type { GraphModel as GeneratedGraph } from "@/app/api/__generated__/models/graphModel";
|
||||||
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
import type { GraphMeta as GeneratedGraphMeta } from "@/app/api/__generated__/models/graphMeta";
|
||||||
|
|
||||||
export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
|
export type SubAgentUpdateInfo<T extends GraphLike = GraphLike> = {
|
||||||
hasUpdate: boolean;
|
hasUpdate: boolean;
|
||||||
currentVersion: number;
|
currentVersion: number;
|
||||||
latestVersion: number;
|
latestVersion: number;
|
||||||
@@ -10,7 +14,10 @@ export type SubAgentUpdateInfo<T extends GraphMetaLike = GraphMetaLike> = {
|
|||||||
incompatibilities: IncompatibilityInfo | null;
|
incompatibilities: IncompatibilityInfo | null;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Union type for GraphMeta that works with both legacy and new builder
|
// Union type for Graph (with schemas) that works with both legacy and new builder
|
||||||
|
export type GraphLike = LegacyGraph | GeneratedGraph;
|
||||||
|
|
||||||
|
// Union type for GraphMeta (without schemas) for version detection
|
||||||
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
|
export type GraphMetaLike = LegacyGraphMeta | GeneratedGraphMeta;
|
||||||
|
|
||||||
export type IncompatibilityInfo = {
|
export type IncompatibilityInfo = {
|
||||||
|
|||||||
@@ -1,5 +1,11 @@
|
|||||||
import { useMemo } from "react";
|
import { useMemo } from "react";
|
||||||
import { GraphInputSchema, GraphOutputSchema } from "@/lib/autogpt-server-api";
|
import type {
|
||||||
|
GraphInputSchema,
|
||||||
|
GraphOutputSchema,
|
||||||
|
} from "@/lib/autogpt-server-api";
|
||||||
|
import type { GraphModel } from "@/app/api/__generated__/models/graphModel";
|
||||||
|
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||||
|
import { okData } from "@/app/api/helpers";
|
||||||
import { getEffectiveType } from "@/lib/utils";
|
import { getEffectiveType } from "@/lib/utils";
|
||||||
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
|
import { EdgeLike, getSchemaProperties, getSchemaRequired } from "./helpers";
|
||||||
import {
|
import {
|
||||||
@@ -11,26 +17,38 @@ import {
|
|||||||
/**
|
/**
|
||||||
* Checks if a newer version of a sub-agent is available and determines compatibility
|
* Checks if a newer version of a sub-agent is available and determines compatibility
|
||||||
*/
|
*/
|
||||||
export function useSubAgentUpdate<T extends GraphMetaLike>(
|
export function useSubAgentUpdate(
|
||||||
nodeID: string,
|
nodeID: string,
|
||||||
graphID: string | undefined,
|
graphID: string | undefined,
|
||||||
graphVersion: number | undefined,
|
graphVersion: number | undefined,
|
||||||
currentInputSchema: GraphInputSchema | undefined,
|
currentInputSchema: GraphInputSchema | undefined,
|
||||||
currentOutputSchema: GraphOutputSchema | undefined,
|
currentOutputSchema: GraphOutputSchema | undefined,
|
||||||
connections: EdgeLike[],
|
connections: EdgeLike[],
|
||||||
availableGraphs: T[],
|
availableGraphs: GraphMetaLike[],
|
||||||
): SubAgentUpdateInfo<T> {
|
): SubAgentUpdateInfo<GraphModel> {
|
||||||
// Find the latest version of the same graph
|
// Find the latest version of the same graph
|
||||||
const latestGraph = useMemo(() => {
|
const latestGraphInfo = useMemo(() => {
|
||||||
if (!graphID) return null;
|
if (!graphID) return null;
|
||||||
return availableGraphs.find((graph) => graph.id === graphID) || null;
|
return availableGraphs.find((graph) => graph.id === graphID) || null;
|
||||||
}, [graphID, availableGraphs]);
|
}, [graphID, availableGraphs]);
|
||||||
|
|
||||||
// Check if there's an update available
|
// Check if there's a newer version available
|
||||||
const hasUpdate = useMemo(() => {
|
const hasUpdate = useMemo(() => {
|
||||||
if (!latestGraph || graphVersion === undefined) return false;
|
if (!latestGraphInfo || graphVersion === undefined) return false;
|
||||||
return latestGraph.version! > graphVersion;
|
return latestGraphInfo.version! > graphVersion;
|
||||||
}, [latestGraph, graphVersion]);
|
}, [latestGraphInfo, graphVersion]);
|
||||||
|
|
||||||
|
// Fetch full graph IF an update is detected
|
||||||
|
const { data: latestGraph } = useGetV1GetSpecificGraph(
|
||||||
|
graphID ?? "",
|
||||||
|
{ version: latestGraphInfo?.version },
|
||||||
|
{
|
||||||
|
query: {
|
||||||
|
enabled: hasUpdate && !!graphID && !!latestGraphInfo?.version,
|
||||||
|
select: okData,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// Get connected input and output handles for this specific node
|
// Get connected input and output handles for this specific node
|
||||||
const connectedHandles = useMemo(() => {
|
const connectedHandles = useMemo(() => {
|
||||||
@@ -152,8 +170,8 @@ export function useSubAgentUpdate<T extends GraphMetaLike>(
|
|||||||
return {
|
return {
|
||||||
hasUpdate,
|
hasUpdate,
|
||||||
currentVersion: graphVersion || 0,
|
currentVersion: graphVersion || 0,
|
||||||
latestVersion: latestGraph?.version || 0,
|
latestVersion: latestGraphInfo?.version || 0,
|
||||||
latestGraph,
|
latestGraph: latestGraph || null,
|
||||||
isCompatible: compatibilityResult.isCompatible,
|
isCompatible: compatibilityResult.isCompatible,
|
||||||
incompatibilities: compatibilityResult.incompatibilities,
|
incompatibilities: compatibilityResult.incompatibilities,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ interface GraphStore {
|
|||||||
outputSchema: Record<string, any> | null,
|
outputSchema: Record<string, any> | null,
|
||||||
) => void;
|
) => void;
|
||||||
|
|
||||||
// Available graphs; used for sub-graph updates
|
// Available graphs; used for sub-graph updated version detection
|
||||||
availableSubGraphs: GraphMeta[];
|
availableSubGraphs: GraphMeta[];
|
||||||
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;
|
setAvailableSubGraphs: (graphs: GraphMeta[]) => void;
|
||||||
|
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ import React, {
|
|||||||
import {
|
import {
|
||||||
CredentialsMetaInput,
|
CredentialsMetaInput,
|
||||||
CredentialsType,
|
CredentialsType,
|
||||||
|
Graph,
|
||||||
GraphExecutionID,
|
GraphExecutionID,
|
||||||
GraphMeta,
|
|
||||||
LibraryAgentPreset,
|
LibraryAgentPreset,
|
||||||
LibraryAgentPresetID,
|
LibraryAgentPresetID,
|
||||||
LibraryAgentPresetUpdatable,
|
LibraryAgentPresetUpdatable,
|
||||||
@@ -69,7 +69,7 @@ export function AgentRunDraftView({
|
|||||||
className,
|
className,
|
||||||
recommendedScheduleCron,
|
recommendedScheduleCron,
|
||||||
}: {
|
}: {
|
||||||
graph: GraphMeta;
|
graph: Graph;
|
||||||
agentActions?: ButtonAction[];
|
agentActions?: ButtonAction[];
|
||||||
recommendedScheduleCron?: string | null;
|
recommendedScheduleCron?: string | null;
|
||||||
doRun?: (
|
doRun?: (
|
||||||
|
|||||||
@@ -2,8 +2,8 @@
|
|||||||
import React, { useCallback, useMemo } from "react";
|
import React, { useCallback, useMemo } from "react";
|
||||||
|
|
||||||
import {
|
import {
|
||||||
|
Graph,
|
||||||
GraphExecutionID,
|
GraphExecutionID,
|
||||||
GraphMeta,
|
|
||||||
Schedule,
|
Schedule,
|
||||||
ScheduleID,
|
ScheduleID,
|
||||||
} from "@/lib/autogpt-server-api";
|
} from "@/lib/autogpt-server-api";
|
||||||
@@ -35,7 +35,7 @@ export function AgentScheduleDetailsView({
|
|||||||
onForcedRun,
|
onForcedRun,
|
||||||
doDeleteSchedule,
|
doDeleteSchedule,
|
||||||
}: {
|
}: {
|
||||||
graph: GraphMeta;
|
graph: Graph;
|
||||||
schedule: Schedule;
|
schedule: Schedule;
|
||||||
agentActions: ButtonAction[];
|
agentActions: ButtonAction[];
|
||||||
onForcedRun: (runID: GraphExecutionID) => void;
|
onForcedRun: (runID: GraphExecutionID) => void;
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import { useMainMarketplacePage } from "./useMainMarketplacePage";
|
|||||||
import { FeaturedCreators } from "../FeaturedCreators/FeaturedCreators";
|
import { FeaturedCreators } from "../FeaturedCreators/FeaturedCreators";
|
||||||
import { MainMarketplacePageLoading } from "../MainMarketplacePageLoading";
|
import { MainMarketplacePageLoading } from "../MainMarketplacePageLoading";
|
||||||
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard";
|
||||||
import { WaitlistSection } from "../WaitlistSection/WaitlistSection";
|
|
||||||
|
|
||||||
export const MainMarkeplacePage = () => {
|
export const MainMarkeplacePage = () => {
|
||||||
const { featuredAgents, topAgents, featuredCreators, isLoading, hasError } =
|
const { featuredAgents, topAgents, featuredCreators, isLoading, hasError } =
|
||||||
@@ -47,10 +46,6 @@ export const MainMarkeplacePage = () => {
|
|||||||
{/* 100px margin because our featured sections button are placed 40px below the container */}
|
{/* 100px margin because our featured sections button are placed 40px below the container */}
|
||||||
<Separator className="mb-6 mt-24" />
|
<Separator className="mb-6 mt-24" />
|
||||||
|
|
||||||
{/* Waitlist Section - "Help Shape What's Next" */}
|
|
||||||
<WaitlistSection />
|
|
||||||
<Separator className="mb-6 mt-12" />
|
|
||||||
|
|
||||||
{topAgents && (
|
{topAgents && (
|
||||||
<AgentsSection sectionTitle="Top Agents" agents={topAgents.agents} />
|
<AgentsSection sectionTitle="Top Agents" agents={topAgents.agents} />
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -1,105 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import Image from "next/image";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Check } from "@phosphor-icons/react";
|
|
||||||
|
|
||||||
interface WaitlistCardProps {
|
|
||||||
name: string;
|
|
||||||
subHeading: string;
|
|
||||||
description: string;
|
|
||||||
imageUrl: string | null;
|
|
||||||
isMember?: boolean;
|
|
||||||
onCardClick: () => void;
|
|
||||||
onJoinClick: (e: React.MouseEvent) => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function WaitlistCard({
|
|
||||||
name,
|
|
||||||
subHeading,
|
|
||||||
description,
|
|
||||||
imageUrl,
|
|
||||||
isMember = false,
|
|
||||||
onCardClick,
|
|
||||||
onJoinClick,
|
|
||||||
}: WaitlistCardProps) {
|
|
||||||
function handleJoinClick(e: React.MouseEvent) {
|
|
||||||
e.stopPropagation();
|
|
||||||
onJoinClick(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
className="flex h-[24rem] w-full max-w-md cursor-pointer flex-col items-start rounded-3xl bg-white transition-all duration-300 hover:shadow-lg dark:bg-zinc-900 dark:hover:shadow-gray-700"
|
|
||||||
onClick={onCardClick}
|
|
||||||
data-testid="waitlist-card"
|
|
||||||
role="button"
|
|
||||||
tabIndex={0}
|
|
||||||
aria-label={`${name} waitlist card`}
|
|
||||||
onKeyDown={(e) => {
|
|
||||||
if (e.key === "Enter" || e.key === " ") {
|
|
||||||
onCardClick();
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
{/* Image Section */}
|
|
||||||
<div className="relative aspect-[2/1.2] w-full overflow-hidden rounded-large md:aspect-[2.17/1]">
|
|
||||||
{imageUrl ? (
|
|
||||||
<Image
|
|
||||||
src={imageUrl}
|
|
||||||
alt={`${name} preview image`}
|
|
||||||
fill
|
|
||||||
className="object-cover"
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<div className="flex h-full w-full items-center justify-center bg-gradient-to-br from-neutral-200 to-neutral-300 dark:from-neutral-700 dark:to-neutral-800">
|
|
||||||
<span className="text-4xl font-bold text-neutral-400 dark:text-neutral-500">
|
|
||||||
{name.charAt(0)}
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="mt-3 flex w-full flex-1 flex-col px-4">
|
|
||||||
{/* Name and Subheading */}
|
|
||||||
<div className="flex w-full flex-col">
|
|
||||||
<h3 className="line-clamp-1 font-poppins text-xl font-semibold text-[#272727] dark:text-neutral-100">
|
|
||||||
{name}
|
|
||||||
</h3>
|
|
||||||
<p className="mt-1 line-clamp-1 text-sm text-neutral-500 dark:text-neutral-400">
|
|
||||||
{subHeading}
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Description */}
|
|
||||||
<div className="mt-2 flex w-full flex-col">
|
|
||||||
<p className="line-clamp-5 text-sm font-normal leading-relaxed text-neutral-600 dark:text-neutral-400">
|
|
||||||
{description}
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="flex-grow" />
|
|
||||||
|
|
||||||
{/* Join Waitlist Button */}
|
|
||||||
<div className="mt-4 w-full pb-4">
|
|
||||||
{isMember ? (
|
|
||||||
<Button
|
|
||||||
disabled
|
|
||||||
className="w-full rounded-full bg-green-600 text-white hover:bg-green-600 dark:bg-green-700 dark:hover:bg-green-700"
|
|
||||||
>
|
|
||||||
<Check className="mr-2" size={16} weight="bold" />
|
|
||||||
On the waitlist
|
|
||||||
</Button>
|
|
||||||
) : (
|
|
||||||
<Button
|
|
||||||
onClick={handleJoinClick}
|
|
||||||
className="w-full rounded-full bg-zinc-800 text-white hover:bg-zinc-700 dark:bg-zinc-700 dark:hover:bg-zinc-600"
|
|
||||||
>
|
|
||||||
Join waitlist
|
|
||||||
</Button>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,356 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import Image from "next/image";
|
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
|
||||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
|
||||||
import { Input } from "@/components/atoms/Input/Input";
|
|
||||||
import {
|
|
||||||
Carousel,
|
|
||||||
CarouselContent,
|
|
||||||
CarouselItem,
|
|
||||||
CarouselNext,
|
|
||||||
CarouselPrevious,
|
|
||||||
} from "@/components/__legacy__/ui/carousel";
|
|
||||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
|
||||||
import { Check, Play } from "@phosphor-icons/react";
|
|
||||||
import { useSupabaseStore } from "@/lib/supabase/hooks/useSupabaseStore";
|
|
||||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
|
||||||
import { usePostV2AddSelfToTheAgentWaitlist } from "@/app/api/__generated__/endpoints/store/store";
|
|
||||||
|
|
||||||
interface MediaItem {
|
|
||||||
type: "image" | "video";
|
|
||||||
url: string;
|
|
||||||
label?: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract YouTube video ID from various URL formats
|
|
||||||
function getYouTubeVideoId(url: string): string | null {
|
|
||||||
const regExp =
|
|
||||||
/^.*((youtu.be\/)|(v\/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#&?]*).*/;
|
|
||||||
const match = url.match(regExp);
|
|
||||||
return match && match[7].length === 11 ? match[7] : null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate video URL for security
|
|
||||||
function isValidVideoUrl(url: string): boolean {
|
|
||||||
if (url.startsWith("data:video")) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
const videoExtensions = /\.(mp4|webm|ogg)$/i;
|
|
||||||
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$/;
|
|
||||||
const validUrl = /^(https?:\/\/)/i;
|
|
||||||
const cleanedUrl = url.split("?")[0];
|
|
||||||
return (
|
|
||||||
(validUrl.test(url) && videoExtensions.test(cleanedUrl)) ||
|
|
||||||
youtubeRegex.test(url)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Video player with YouTube embed support
|
|
||||||
function VideoPlayer({
|
|
||||||
url,
|
|
||||||
autoPlay = false,
|
|
||||||
className = "",
|
|
||||||
}: {
|
|
||||||
url: string;
|
|
||||||
autoPlay?: boolean;
|
|
||||||
className?: string;
|
|
||||||
}) {
|
|
||||||
const youtubeId = getYouTubeVideoId(url);
|
|
||||||
|
|
||||||
if (youtubeId) {
|
|
||||||
return (
|
|
||||||
<iframe
|
|
||||||
src={`https://www.youtube.com/embed/${youtubeId}${autoPlay ? "?autoplay=1" : ""}`}
|
|
||||||
title="YouTube video player"
|
|
||||||
className={className}
|
|
||||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
|
|
||||||
sandbox="allow-same-origin allow-scripts allow-presentation"
|
|
||||||
allowFullScreen
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isValidVideoUrl(url)) {
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
className={`flex items-center justify-center bg-zinc-800 ${className}`}
|
|
||||||
>
|
|
||||||
<span className="text-sm text-zinc-400">Invalid video URL</span>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return <video src={url} controls autoPlay={autoPlay} className={className} />;
|
|
||||||
}
|
|
||||||
|
|
||||||
function MediaCarousel({ waitlist }: { waitlist: StoreWaitlistEntry }) {
|
|
||||||
const [activeVideo, setActiveVideo] = useState<string | null>(null);
|
|
||||||
|
|
||||||
// Build media items array: videos first, then images
|
|
||||||
const mediaItems: MediaItem[] = [
|
|
||||||
...(waitlist.videoUrl
|
|
||||||
? [{ type: "video" as const, url: waitlist.videoUrl, label: "Video" }]
|
|
||||||
: []),
|
|
||||||
...(waitlist.agentOutputDemoUrl
|
|
||||||
? [
|
|
||||||
{
|
|
||||||
type: "video" as const,
|
|
||||||
url: waitlist.agentOutputDemoUrl,
|
|
||||||
label: "Demo",
|
|
||||||
},
|
|
||||||
]
|
|
||||||
: []),
|
|
||||||
...waitlist.imageUrls.map((url) => ({ type: "image" as const, url })),
|
|
||||||
];
|
|
||||||
|
|
||||||
if (mediaItems.length === 0) return null;
|
|
||||||
|
|
||||||
// Single item - no carousel needed
|
|
||||||
if (mediaItems.length === 1) {
|
|
||||||
const item = mediaItems[0];
|
|
||||||
return (
|
|
||||||
<div className="relative aspect-[350/196] w-full overflow-hidden rounded-large">
|
|
||||||
{item.type === "image" ? (
|
|
||||||
<Image
|
|
||||||
src={item.url}
|
|
||||||
alt={`${waitlist.name} preview`}
|
|
||||||
fill
|
|
||||||
className="object-cover"
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<VideoPlayer url={item.url} className="h-full w-full object-cover" />
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiple items - use carousel
|
|
||||||
return (
|
|
||||||
<Carousel className="w-full">
|
|
||||||
<CarouselContent>
|
|
||||||
{mediaItems.map((item, index) => (
|
|
||||||
<CarouselItem key={index}>
|
|
||||||
<div className="relative aspect-[350/196] w-full overflow-hidden rounded-large">
|
|
||||||
{item.type === "image" ? (
|
|
||||||
<Image
|
|
||||||
src={item.url}
|
|
||||||
alt={`${waitlist.name} preview ${index + 1}`}
|
|
||||||
fill
|
|
||||||
className="object-cover"
|
|
||||||
/>
|
|
||||||
) : activeVideo === item.url ? (
|
|
||||||
<VideoPlayer
|
|
||||||
url={item.url}
|
|
||||||
autoPlay
|
|
||||||
className="h-full w-full object-cover"
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<button
|
|
||||||
onClick={() => setActiveVideo(item.url)}
|
|
||||||
className="group relative h-full w-full bg-zinc-900"
|
|
||||||
>
|
|
||||||
<div className="absolute inset-0 flex items-center justify-center">
|
|
||||||
<div className="flex h-16 w-16 items-center justify-center rounded-full bg-white/90 transition-transform group-hover:scale-110">
|
|
||||||
<Play size={32} weight="fill" className="text-zinc-800" />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<span className="absolute bottom-3 left-3 text-sm text-white">
|
|
||||||
{item.label}
|
|
||||||
</span>
|
|
||||||
</button>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</CarouselItem>
|
|
||||||
))}
|
|
||||||
</CarouselContent>
|
|
||||||
<CarouselPrevious className="left-2 top-1/2 -translate-y-1/2" />
|
|
||||||
<CarouselNext className="right-2 top-1/2 -translate-y-1/2" />
|
|
||||||
</Carousel>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
interface WaitlistDetailModalProps {
|
|
||||||
waitlist: StoreWaitlistEntry;
|
|
||||||
isMember?: boolean;
|
|
||||||
onClose: () => void;
|
|
||||||
onJoinSuccess?: (waitlistId: string) => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function WaitlistDetailModal({
|
|
||||||
waitlist,
|
|
||||||
isMember = false,
|
|
||||||
onClose,
|
|
||||||
onJoinSuccess,
|
|
||||||
}: WaitlistDetailModalProps) {
|
|
||||||
const { user } = useSupabaseStore();
|
|
||||||
const [email, setEmail] = useState("");
|
|
||||||
const [success, setSuccess] = useState(false);
|
|
||||||
const { toast } = useToast();
|
|
||||||
const joinWaitlistMutation = usePostV2AddSelfToTheAgentWaitlist();
|
|
||||||
|
|
||||||
function handleJoin() {
|
|
||||||
joinWaitlistMutation.mutate(
|
|
||||||
{
|
|
||||||
waitlistId: waitlist.waitlistId,
|
|
||||||
data: { email: user ? undefined : email },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
onSuccess: (response) => {
|
|
||||||
if (response.status === 200) {
|
|
||||||
setSuccess(true);
|
|
||||||
toast({
|
|
||||||
title: "You're on the waitlist!",
|
|
||||||
description: `We'll notify you when ${waitlist.name} goes live.`,
|
|
||||||
});
|
|
||||||
onJoinSuccess?.(waitlist.waitlistId);
|
|
||||||
} else {
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to join waitlist. Please try again.",
|
|
||||||
});
|
|
||||||
}
|
|
||||||
},
|
|
||||||
onError: () => {
|
|
||||||
toast({
|
|
||||||
variant: "destructive",
|
|
||||||
title: "Error",
|
|
||||||
description: "Failed to join waitlist. Please try again.",
|
|
||||||
});
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Success state
|
|
||||||
if (success) {
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title=""
|
|
||||||
controlled={{
|
|
||||||
isOpen: true,
|
|
||||||
set: async (open) => {
|
|
||||||
if (!open) onClose();
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
onClose={onClose}
|
|
||||||
styling={{ maxWidth: "500px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Content>
|
|
||||||
<div className="flex flex-col items-center justify-center py-4 text-center">
|
|
||||||
{/* Party emoji */}
|
|
||||||
<span className="mb-2 text-5xl">🎉</span>
|
|
||||||
|
|
||||||
{/* Title */}
|
|
||||||
<h2 className="mb-2 font-poppins text-[22px] font-medium leading-7 text-zinc-900 dark:text-zinc-100">
|
|
||||||
You're on the waitlist
|
|
||||||
</h2>
|
|
||||||
|
|
||||||
{/* Subtitle */}
|
|
||||||
<p className="text-base leading-[26px] text-zinc-600 dark:text-zinc-400">
|
|
||||||
Thanks for helping us prioritize which agents to build next.
|
|
||||||
We'll notify you when this agent goes live in the
|
|
||||||
marketplace.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Close button */}
|
|
||||||
<Dialog.Footer className="flex justify-center pb-2 pt-4">
|
|
||||||
<Button
|
|
||||||
variant="secondary"
|
|
||||||
onClick={onClose}
|
|
||||||
className="rounded-full border border-zinc-700 bg-white px-4 py-3 text-zinc-900 hover:bg-zinc-100 dark:border-zinc-500 dark:bg-zinc-800 dark:text-zinc-100 dark:hover:bg-zinc-700"
|
|
||||||
>
|
|
||||||
Close
|
|
||||||
</Button>
|
|
||||||
</Dialog.Footer>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main modal - handles both member and non-member states
|
|
||||||
return (
|
|
||||||
<Dialog
|
|
||||||
title="Join the waitlist"
|
|
||||||
controlled={{
|
|
||||||
isOpen: true,
|
|
||||||
set: async (open) => {
|
|
||||||
if (!open) onClose();
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
onClose={onClose}
|
|
||||||
styling={{ maxWidth: "500px" }}
|
|
||||||
>
|
|
||||||
<Dialog.Content>
|
|
||||||
{/* Subtitle */}
|
|
||||||
<p className="mb-6 text-center text-base text-zinc-600 dark:text-zinc-400">
|
|
||||||
Help us decide what to build next — and get notified when this agent
|
|
||||||
is ready
|
|
||||||
</p>
|
|
||||||
|
|
||||||
{/* Media Carousel */}
|
|
||||||
<MediaCarousel waitlist={waitlist} />
|
|
||||||
|
|
||||||
{/* Agent Name */}
|
|
||||||
<h3 className="mt-4 font-poppins text-[22px] font-medium leading-7 text-zinc-800 dark:text-zinc-100">
|
|
||||||
{waitlist.name}
|
|
||||||
</h3>
|
|
||||||
|
|
||||||
{/* Agent Description */}
|
|
||||||
<p className="mt-2 line-clamp-5 text-sm leading-[22px] text-zinc-500 dark:text-zinc-400">
|
|
||||||
{waitlist.description}
|
|
||||||
</p>
|
|
||||||
|
|
||||||
{/* Email input for non-logged-in users who haven't joined */}
|
|
||||||
{!isMember && !user && (
|
|
||||||
<div className="mt-4 pr-1">
|
|
||||||
<Input
|
|
||||||
id="email"
|
|
||||||
label="Email address"
|
|
||||||
type="email"
|
|
||||||
placeholder="you@example.com"
|
|
||||||
value={email}
|
|
||||||
onChange={(e) => setEmail(e.target.value)}
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Footer buttons */}
|
|
||||||
<Dialog.Footer className="sticky bottom-0 mt-6 flex justify-center gap-3 bg-white pb-2 pt-4 dark:bg-zinc-900">
|
|
||||||
{isMember ? (
|
|
||||||
<Button
|
|
||||||
disabled
|
|
||||||
className="rounded-full bg-green-600 px-4 py-3 text-white hover:bg-green-600 dark:bg-green-700 dark:hover:bg-green-700"
|
|
||||||
>
|
|
||||||
<Check size={16} className="mr-2" />
|
|
||||||
You're on the waitlist
|
|
||||||
</Button>
|
|
||||||
) : (
|
|
||||||
<>
|
|
||||||
<Button
|
|
||||||
onClick={handleJoin}
|
|
||||||
loading={joinWaitlistMutation.isPending}
|
|
||||||
disabled={!user && !email}
|
|
||||||
className="rounded-full bg-zinc-800 px-4 py-3 text-white hover:bg-zinc-700 dark:bg-zinc-700 dark:hover:bg-zinc-600"
|
|
||||||
>
|
|
||||||
Join waitlist
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
type="button"
|
|
||||||
variant="secondary"
|
|
||||||
onClick={onClose}
|
|
||||||
className="rounded-full bg-zinc-200 px-4 py-3 text-zinc-900 hover:bg-zinc-300 dark:bg-zinc-700 dark:text-zinc-100 dark:hover:bg-zinc-600"
|
|
||||||
>
|
|
||||||
Not now
|
|
||||||
</Button>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</Dialog.Footer>
|
|
||||||
</Dialog.Content>
|
|
||||||
</Dialog>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useState } from "react";
|
|
||||||
import {
|
|
||||||
Carousel,
|
|
||||||
CarouselContent,
|
|
||||||
CarouselItem,
|
|
||||||
} from "@/components/__legacy__/ui/carousel";
|
|
||||||
import { WaitlistCard } from "../WaitlistCard/WaitlistCard";
|
|
||||||
import { WaitlistDetailModal } from "../WaitlistDetailModal/WaitlistDetailModal";
|
|
||||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
|
||||||
import { useWaitlistSection } from "./useWaitlistSection";
|
|
||||||
|
|
||||||
export function WaitlistSection() {
|
|
||||||
const { waitlists, joinedWaitlistIds, isLoading, hasError, markAsJoined } =
|
|
||||||
useWaitlistSection();
|
|
||||||
const [selectedWaitlist, setSelectedWaitlist] =
|
|
||||||
useState<StoreWaitlistEntry | null>(null);
|
|
||||||
|
|
||||||
function handleOpenModal(waitlist: StoreWaitlistEntry) {
|
|
||||||
setSelectedWaitlist(waitlist);
|
|
||||||
}
|
|
||||||
|
|
||||||
function handleJoinSuccess(waitlistId: string) {
|
|
||||||
markAsJoined(waitlistId);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't render if loading, error, or no waitlists
|
|
||||||
if (isLoading || hasError || !waitlists || waitlists.length === 0) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="flex flex-col items-center justify-center">
|
|
||||||
<div className="w-full max-w-[1360px]">
|
|
||||||
{/* Section Header */}
|
|
||||||
<div className="mb-6">
|
|
||||||
<h2 className="font-poppins text-2xl font-semibold text-[#282828] dark:text-neutral-200">
|
|
||||||
Help Shape What's Next
|
|
||||||
</h2>
|
|
||||||
<p className="mt-2 text-base text-neutral-600 dark:text-neutral-400">
|
|
||||||
These agents are in development. Your interest helps us prioritize
|
|
||||||
what gets built — and we'll notify you when they're ready.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Mobile Carousel View */}
|
|
||||||
<Carousel
|
|
||||||
className="md:hidden"
|
|
||||||
opts={{
|
|
||||||
loop: true,
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<CarouselContent>
|
|
||||||
{waitlists.map((waitlist) => (
|
|
||||||
<CarouselItem
|
|
||||||
key={waitlist.waitlistId}
|
|
||||||
className="min-w-64 max-w-71"
|
|
||||||
>
|
|
||||||
<WaitlistCard
|
|
||||||
name={waitlist.name}
|
|
||||||
subHeading={waitlist.subHeading}
|
|
||||||
description={waitlist.description}
|
|
||||||
imageUrl={waitlist.imageUrls[0] || null}
|
|
||||||
isMember={joinedWaitlistIds.has(waitlist.waitlistId)}
|
|
||||||
onCardClick={() => handleOpenModal(waitlist)}
|
|
||||||
onJoinClick={() => handleOpenModal(waitlist)}
|
|
||||||
/>
|
|
||||||
</CarouselItem>
|
|
||||||
))}
|
|
||||||
</CarouselContent>
|
|
||||||
</Carousel>
|
|
||||||
|
|
||||||
{/* Desktop Grid View */}
|
|
||||||
<div className="hidden grid-cols-1 place-items-center gap-6 md:grid md:grid-cols-2 lg:grid-cols-3">
|
|
||||||
{waitlists.map((waitlist) => (
|
|
||||||
<WaitlistCard
|
|
||||||
key={waitlist.waitlistId}
|
|
||||||
name={waitlist.name}
|
|
||||||
subHeading={waitlist.subHeading}
|
|
||||||
description={waitlist.description}
|
|
||||||
imageUrl={waitlist.imageUrls[0] || null}
|
|
||||||
isMember={joinedWaitlistIds.has(waitlist.waitlistId)}
|
|
||||||
onCardClick={() => handleOpenModal(waitlist)}
|
|
||||||
onJoinClick={() => handleOpenModal(waitlist)}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Single Modal for both viewing and joining */}
|
|
||||||
{selectedWaitlist && (
|
|
||||||
<WaitlistDetailModal
|
|
||||||
waitlist={selectedWaitlist}
|
|
||||||
isMember={joinedWaitlistIds.has(selectedWaitlist.waitlistId)}
|
|
||||||
onClose={() => setSelectedWaitlist(null)}
|
|
||||||
onJoinSuccess={handleJoinSuccess}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
"use client";
|
|
||||||
|
|
||||||
import { useMemo } from "react";
|
|
||||||
import { useSupabaseStore } from "@/lib/supabase/hooks/useSupabaseStore";
|
|
||||||
import {
|
|
||||||
useGetV2GetTheAgentWaitlist,
|
|
||||||
useGetV2GetWaitlistIdsTheCurrentUserHasJoined,
|
|
||||||
getGetV2GetWaitlistIdsTheCurrentUserHasJoinedQueryKey,
|
|
||||||
} from "@/app/api/__generated__/endpoints/store/store";
|
|
||||||
import type { StoreWaitlistEntry } from "@/app/api/__generated__/models/storeWaitlistEntry";
|
|
||||||
import { useQueryClient } from "@tanstack/react-query";
|
|
||||||
|
|
||||||
export function useWaitlistSection() {
|
|
||||||
const { user } = useSupabaseStore();
|
|
||||||
const queryClient = useQueryClient();
|
|
||||||
|
|
||||||
// Fetch waitlists
|
|
||||||
const {
|
|
||||||
data: waitlistsResponse,
|
|
||||||
isLoading: waitlistsLoading,
|
|
||||||
isError: waitlistsError,
|
|
||||||
} = useGetV2GetTheAgentWaitlist();
|
|
||||||
|
|
||||||
// Fetch memberships if logged in
|
|
||||||
const { data: membershipsResponse, isLoading: membershipsLoading } =
|
|
||||||
useGetV2GetWaitlistIdsTheCurrentUserHasJoined({
|
|
||||||
query: {
|
|
||||||
enabled: !!user,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const waitlists: StoreWaitlistEntry[] = useMemo(() => {
|
|
||||||
if (waitlistsResponse?.status === 200) {
|
|
||||||
return waitlistsResponse.data.listings;
|
|
||||||
}
|
|
||||||
return [];
|
|
||||||
}, [waitlistsResponse]);
|
|
||||||
|
|
||||||
const joinedWaitlistIds: Set<string> = useMemo(() => {
|
|
||||||
if (membershipsResponse?.status === 200) {
|
|
||||||
return new Set(membershipsResponse.data);
|
|
||||||
}
|
|
||||||
return new Set();
|
|
||||||
}, [membershipsResponse]);
|
|
||||||
|
|
||||||
const isLoading = waitlistsLoading || (!!user && membershipsLoading);
|
|
||||||
const hasError = waitlistsError;
|
|
||||||
|
|
||||||
// Function to add a waitlist ID to joined set (called after successful join)
|
|
||||||
function markAsJoined(_waitlistId: string) {
|
|
||||||
// Invalidate the memberships query to refetch
|
|
||||||
queryClient.invalidateQueries({
|
|
||||||
queryKey: getGetV2GetWaitlistIdsTheCurrentUserHasJoinedQueryKey(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return { waitlists, joinedWaitlistIds, isLoading, hasError, markAsJoined };
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,7 @@ const isValidVideoUrl = (url: string): boolean => {
|
|||||||
if (url.startsWith("data:video")) {
|
if (url.startsWith("data:video")) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const videoExtensions = /\.(mp4|webm|ogg)$/i;
|
const videoExtensions = /\.(mp4|webm|ogg|mov|avi|mkv|m4v)$/i;
|
||||||
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$/;
|
const youtubeRegex = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$/;
|
||||||
const cleanedUrl = url.split("?")[0];
|
const cleanedUrl = url.split("?")[0];
|
||||||
return (
|
return (
|
||||||
@@ -44,11 +44,29 @@ const isValidAudioUrl = (url: string): boolean => {
|
|||||||
if (url.startsWith("data:audio")) {
|
if (url.startsWith("data:audio")) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const audioExtensions = /\.(mp3|wav)$/i;
|
const audioExtensions = /\.(mp3|wav|ogg|m4a|aac|flac)$/i;
|
||||||
const cleanedUrl = url.split("?")[0];
|
const cleanedUrl = url.split("?")[0];
|
||||||
return isValidMediaUri(url) && audioExtensions.test(cleanedUrl);
|
return isValidMediaUri(url) && audioExtensions.test(cleanedUrl);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const getVideoMimeType = (url: string): string => {
|
||||||
|
if (url.startsWith("data:video/")) {
|
||||||
|
const match = url.match(/^data:(video\/[^;]+)/);
|
||||||
|
return match?.[1] || "video/mp4";
|
||||||
|
}
|
||||||
|
const extension = url.split("?")[0].split(".").pop()?.toLowerCase();
|
||||||
|
const mimeMap: Record<string, string> = {
|
||||||
|
mp4: "video/mp4",
|
||||||
|
webm: "video/webm",
|
||||||
|
ogg: "video/ogg",
|
||||||
|
mov: "video/quicktime",
|
||||||
|
avi: "video/x-msvideo",
|
||||||
|
mkv: "video/x-matroska",
|
||||||
|
m4v: "video/mp4",
|
||||||
|
};
|
||||||
|
return mimeMap[extension || ""] || "video/mp4";
|
||||||
|
};
|
||||||
|
|
||||||
const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
||||||
const videoId = getYouTubeVideoId(videoUrl);
|
const videoId = getYouTubeVideoId(videoUrl);
|
||||||
return (
|
return (
|
||||||
@@ -63,7 +81,7 @@ const VideoRenderer: React.FC<{ videoUrl: string }> = ({ videoUrl }) => {
|
|||||||
></iframe>
|
></iframe>
|
||||||
) : (
|
) : (
|
||||||
<video controls width="100%" height="315">
|
<video controls width="100%" height="315">
|
||||||
<source src={videoUrl} type="video/mp4" />
|
<source src={videoUrl} type={getVideoMimeType(videoUrl)} />
|
||||||
Your browser does not support the video tag.
|
Your browser does not support the video tag.
|
||||||
</video>
|
</video>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -102,18 +102,6 @@ export function ChatMessage({
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function handleClarificationAnswers(answers: Record<string, string>) {
|
|
||||||
if (onSendMessage) {
|
|
||||||
const contextMessage = Object.entries(answers)
|
|
||||||
.map(([keyword, answer]) => `${keyword}: ${answer}`)
|
|
||||||
.join("\n");
|
|
||||||
|
|
||||||
onSendMessage(
|
|
||||||
`I have the answers to your questions:\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const handleCopy = useCallback(
|
const handleCopy = useCallback(
|
||||||
async function handleCopy() {
|
async function handleCopy() {
|
||||||
if (message.type !== "message") return;
|
if (message.type !== "message") return;
|
||||||
@@ -162,6 +150,22 @@ export function ChatMessage({
|
|||||||
.slice(index + 1)
|
.slice(index + 1)
|
||||||
.some((m) => m.type === "message" && m.role === "user");
|
.some((m) => m.type === "message" && m.role === "user");
|
||||||
|
|
||||||
|
const handleClarificationAnswers = (answers: Record<string, string>) => {
|
||||||
|
if (onSendMessage) {
|
||||||
|
// Iterate over questions (preserves original order) instead of answers
|
||||||
|
const contextMessage = message.questions
|
||||||
|
.map((q) => {
|
||||||
|
const answer = answers[q.keyword] || "";
|
||||||
|
return `> ${q.question}\n\n${answer}`;
|
||||||
|
})
|
||||||
|
.join("\n\n");
|
||||||
|
|
||||||
|
onSendMessage(
|
||||||
|
`**Here are my answers:**\n\n${contextMessage}\n\nPlease proceed with creating the agent.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<ClarificationQuestionsWidget
|
<ClarificationQuestionsWidget
|
||||||
questions={message.questions}
|
questions={message.questions}
|
||||||
@@ -346,6 +350,7 @@ export function ChatMessage({
|
|||||||
toolId={message.toolId}
|
toolId={message.toolId}
|
||||||
toolName={message.toolName}
|
toolName={message.toolName}
|
||||||
result={message.result}
|
result={message.result}
|
||||||
|
onSendMessage={onSendMessage}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace";
|
import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { EyeSlash } from "@phosphor-icons/react";
|
import { EyeSlash } from "@phosphor-icons/react";
|
||||||
import React from "react";
|
import React, { useState } from "react";
|
||||||
import ReactMarkdown from "react-markdown";
|
import ReactMarkdown from "react-markdown";
|
||||||
import remarkGfm from "remark-gfm";
|
import remarkGfm from "remark-gfm";
|
||||||
|
|
||||||
@@ -48,7 +48,9 @@ interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
|||||||
*/
|
*/
|
||||||
function resolveWorkspaceUrl(src: string): string {
|
function resolveWorkspaceUrl(src: string): string {
|
||||||
if (src.startsWith("workspace://")) {
|
if (src.startsWith("workspace://")) {
|
||||||
const fileId = src.replace("workspace://", "");
|
// Strip MIME type fragment if present (e.g., workspace://abc123#video/mp4 → abc123)
|
||||||
|
const withoutPrefix = src.replace("workspace://", "");
|
||||||
|
const fileId = withoutPrefix.split("#")[0];
|
||||||
// Use the generated API URL helper to get the correct path
|
// Use the generated API URL helper to get the correct path
|
||||||
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
|
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
|
||||||
// Route through the Next.js proxy (same pattern as customMutator for client-side)
|
// Route through the Next.js proxy (same pattern as customMutator for client-side)
|
||||||
@@ -65,13 +67,49 @@ function isWorkspaceImage(src: string | undefined): boolean {
|
|||||||
return src?.includes("/workspace/files/") ?? false;
|
return src?.includes("/workspace/files/") ?? false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Renders a workspace video with controls and an optional "AI cannot see" badge.
|
||||||
|
*/
|
||||||
|
function WorkspaceVideo({
|
||||||
|
src,
|
||||||
|
aiCannotSee,
|
||||||
|
}: {
|
||||||
|
src: string;
|
||||||
|
aiCannotSee: boolean;
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<span className="relative my-2 inline-block">
|
||||||
|
<video
|
||||||
|
controls
|
||||||
|
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||||
|
preload="metadata"
|
||||||
|
>
|
||||||
|
<source src={src} />
|
||||||
|
Your browser does not support the video tag.
|
||||||
|
</video>
|
||||||
|
{aiCannotSee && (
|
||||||
|
<span
|
||||||
|
className="absolute bottom-2 right-2 flex items-center gap-1 rounded bg-black/70 px-2 py-1 text-xs text-white"
|
||||||
|
title="The AI cannot see this video"
|
||||||
|
>
|
||||||
|
<EyeSlash size={14} />
|
||||||
|
<span>AI cannot see this video</span>
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</span>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Custom image component that shows an indicator when the AI cannot see the image.
|
* Custom image component that shows an indicator when the AI cannot see the image.
|
||||||
|
* Also handles the "video:" alt-text prefix convention to render <video> elements.
|
||||||
|
* For workspace files with unknown types, falls back to <video> if <img> fails.
|
||||||
* Note: src is already transformed by urlTransform, so workspace:// is now /api/workspace/...
|
* Note: src is already transformed by urlTransform, so workspace:// is now /api/workspace/...
|
||||||
*/
|
*/
|
||||||
function MarkdownImage(props: Record<string, unknown>) {
|
function MarkdownImage(props: Record<string, unknown>) {
|
||||||
const src = props.src as string | undefined;
|
const src = props.src as string | undefined;
|
||||||
const alt = props.alt as string | undefined;
|
const alt = props.alt as string | undefined;
|
||||||
|
const [imgFailed, setImgFailed] = useState(false);
|
||||||
|
|
||||||
const aiCannotSee = isWorkspaceImage(src);
|
const aiCannotSee = isWorkspaceImage(src);
|
||||||
|
|
||||||
@@ -84,6 +122,18 @@ function MarkdownImage(props: Record<string, unknown>) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Detect video: prefix in alt text (set by formatOutputValue in helpers.ts)
|
||||||
|
if (alt?.startsWith("video:")) {
|
||||||
|
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the <img> failed to load and this is a workspace file, try as video.
|
||||||
|
// This handles generic output keys like "file_out" where the MIME type
|
||||||
|
// isn't known from the key name alone.
|
||||||
|
if (imgFailed && aiCannotSee) {
|
||||||
|
return <WorkspaceVideo src={src} aiCannotSee={aiCannotSee} />;
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<span className="relative my-2 inline-block">
|
<span className="relative my-2 inline-block">
|
||||||
{/* eslint-disable-next-line @next/next/no-img-element */}
|
{/* eslint-disable-next-line @next/next/no-img-element */}
|
||||||
@@ -92,6 +142,9 @@ function MarkdownImage(props: Record<string, unknown>) {
|
|||||||
alt={alt || "Image"}
|
alt={alt || "Image"}
|
||||||
className="h-auto max-w-full rounded-md border border-zinc-200"
|
className="h-auto max-w-full rounded-md border border-zinc-200"
|
||||||
loading="lazy"
|
loading="lazy"
|
||||||
|
onError={() => {
|
||||||
|
if (aiCannotSee) setImgFailed(true);
|
||||||
|
}}
|
||||||
/>
|
/>
|
||||||
{aiCannotSee && (
|
{aiCannotSee && (
|
||||||
<span
|
<span
|
||||||
|
|||||||
@@ -73,6 +73,7 @@ export function MessageList({
|
|||||||
key={index}
|
key={index}
|
||||||
message={message}
|
message={message}
|
||||||
prevMessage={messages[index - 1]}
|
prevMessage={messages[index - 1]}
|
||||||
|
onSendMessage={onSendMessage}
|
||||||
/>
|
/>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,13 @@ import { shouldSkipAgentOutput } from "../../helpers";
|
|||||||
export interface LastToolResponseProps {
|
export interface LastToolResponseProps {
|
||||||
message: ChatMessageData;
|
message: ChatMessageData;
|
||||||
prevMessage: ChatMessageData | undefined;
|
prevMessage: ChatMessageData | undefined;
|
||||||
|
onSendMessage?: (content: string) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function LastToolResponse({
|
export function LastToolResponse({
|
||||||
message,
|
message,
|
||||||
prevMessage,
|
prevMessage,
|
||||||
|
onSendMessage,
|
||||||
}: LastToolResponseProps) {
|
}: LastToolResponseProps) {
|
||||||
if (message.type !== "tool_response") return null;
|
if (message.type !== "tool_response") return null;
|
||||||
|
|
||||||
@@ -21,6 +23,7 @@ export function LastToolResponse({
|
|||||||
toolId={message.toolId}
|
toolId={message.toolId}
|
||||||
toolName={message.toolName}
|
toolName={message.toolName}
|
||||||
result={message.result}
|
result={message.result}
|
||||||
|
onSendMessage={onSendMessage}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
|
import { Progress } from "@/components/atoms/Progress/Progress";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { useEffect, useRef, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||||
|
import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress";
|
||||||
|
|
||||||
export interface ThinkingMessageProps {
|
export interface ThinkingMessageProps {
|
||||||
className?: string;
|
className?: string;
|
||||||
@@ -11,6 +13,7 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
|||||||
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
|
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
|
||||||
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
const timerRef = useRef<NodeJS.Timeout | null>(null);
|
||||||
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
|
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
|
||||||
|
const progress = useAsymptoticProgress(showCoffeeMessage);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (timerRef.current === null) {
|
if (timerRef.current === null) {
|
||||||
@@ -49,9 +52,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
|
|||||||
<AIChatBubble>
|
<AIChatBubble>
|
||||||
<div className="transition-all duration-500 ease-in-out">
|
<div className="transition-all duration-500 ease-in-out">
|
||||||
{showCoffeeMessage ? (
|
{showCoffeeMessage ? (
|
||||||
|
<div className="flex flex-col items-center gap-3">
|
||||||
|
<div className="flex w-full max-w-[280px] flex-col gap-1.5">
|
||||||
|
<div className="flex items-center justify-between text-xs text-neutral-500">
|
||||||
|
<span>Working on it...</span>
|
||||||
|
<span>{Math.round(progress)}%</span>
|
||||||
|
</div>
|
||||||
|
<Progress value={progress} className="h-2 w-full" />
|
||||||
|
</div>
|
||||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||||
This could take a few minutes, grab a coffee ☕️
|
This could take a few minutes, grab a coffee ☕️
|
||||||
</span>
|
</span>
|
||||||
|
</div>
|
||||||
) : showSlowLoader ? (
|
) : showSlowLoader ? (
|
||||||
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
|
||||||
Taking a bit more time...
|
Taking a bit more time...
|
||||||
|
|||||||
@@ -0,0 +1,50 @@
|
|||||||
|
import { useEffect, useRef, useState } from "react";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hook that returns a progress value that starts fast and slows down,
|
||||||
|
* asymptotically approaching but never reaching the max value.
|
||||||
|
*
|
||||||
|
* Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife))
|
||||||
|
* This creates the "game loading bar" effect where:
|
||||||
|
* - 50% is reached at halfLifeSeconds
|
||||||
|
* - 75% is reached at 2 * halfLifeSeconds
|
||||||
|
* - 87.5% is reached at 3 * halfLifeSeconds
|
||||||
|
* - and so on...
|
||||||
|
*
|
||||||
|
* @param isActive - Whether the progress should be animating
|
||||||
|
* @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30)
|
||||||
|
* @param maxProgress - Maximum progress value to approach (default: 100)
|
||||||
|
* @param intervalMs - Update interval in milliseconds (default: 100)
|
||||||
|
* @returns Current progress value (0-maxProgress)
|
||||||
|
*/
|
||||||
|
export function useAsymptoticProgress(
|
||||||
|
isActive: boolean,
|
||||||
|
halfLifeSeconds = 30,
|
||||||
|
maxProgress = 100,
|
||||||
|
intervalMs = 100,
|
||||||
|
) {
|
||||||
|
const [progress, setProgress] = useState(0);
|
||||||
|
const elapsedTimeRef = useRef(0);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!isActive) {
|
||||||
|
setProgress(0);
|
||||||
|
elapsedTimeRef.current = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const interval = setInterval(() => {
|
||||||
|
elapsedTimeRef.current += intervalMs / 1000;
|
||||||
|
// Half-life approach: progress = max * (1 - 0.5^(time/halfLife))
|
||||||
|
// At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc.
|
||||||
|
const newProgress =
|
||||||
|
maxProgress *
|
||||||
|
(1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds));
|
||||||
|
setProgress(newProgress);
|
||||||
|
}, intervalMs);
|
||||||
|
|
||||||
|
return () => clearInterval(interval);
|
||||||
|
}, [isActive, halfLifeSeconds, maxProgress, intervalMs]);
|
||||||
|
|
||||||
|
return progress;
|
||||||
|
}
|
||||||
@@ -0,0 +1,128 @@
|
|||||||
|
"use client";
|
||||||
|
|
||||||
|
import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library";
|
||||||
|
import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo";
|
||||||
|
import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta";
|
||||||
|
import { RunAgentModal } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal";
|
||||||
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import {
|
||||||
|
CheckCircleIcon,
|
||||||
|
PencilLineIcon,
|
||||||
|
PlayIcon,
|
||||||
|
} from "@phosphor-icons/react";
|
||||||
|
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
agentName: string;
|
||||||
|
libraryAgentId: string;
|
||||||
|
onSendMessage?: (content: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function AgentCreatedPrompt({
|
||||||
|
agentName,
|
||||||
|
libraryAgentId,
|
||||||
|
onSendMessage,
|
||||||
|
}: Props) {
|
||||||
|
// Fetch library agent eagerly so modal is ready when user clicks
|
||||||
|
const { data: libraryAgentResponse, isLoading } = useGetV2GetLibraryAgent(
|
||||||
|
libraryAgentId,
|
||||||
|
{
|
||||||
|
query: {
|
||||||
|
enabled: !!libraryAgentId,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const libraryAgent =
|
||||||
|
libraryAgentResponse?.status === 200 ? libraryAgentResponse.data : null;
|
||||||
|
|
||||||
|
function handleRunWithPlaceholders() {
|
||||||
|
onSendMessage?.(
|
||||||
|
`Run the agent "${agentName}" with placeholder/example values so I can test it.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleRunCreated(execution: GraphExecutionMeta) {
|
||||||
|
onSendMessage?.(
|
||||||
|
`I've started the agent "${agentName}". The execution ID is ${execution.id}. Please monitor its progress and let me know when it completes.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleScheduleCreated(schedule: GraphExecutionJobInfo) {
|
||||||
|
const scheduleInfo = schedule.cron
|
||||||
|
? `with cron schedule "${schedule.cron}"`
|
||||||
|
: "to run on the specified schedule";
|
||||||
|
onSendMessage?.(
|
||||||
|
`I've scheduled the agent "${agentName}" ${scheduleInfo}. The schedule ID is ${schedule.id}.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<AIChatBubble>
|
||||||
|
<div className="flex flex-col gap-4">
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-green-100">
|
||||||
|
<CheckCircleIcon
|
||||||
|
size={18}
|
||||||
|
weight="fill"
|
||||||
|
className="text-green-600"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<Text variant="body-medium" className="text-neutral-900">
|
||||||
|
Agent Created Successfully
|
||||||
|
</Text>
|
||||||
|
<Text variant="small" className="text-neutral-500">
|
||||||
|
"{agentName}" is ready to test
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col gap-2">
|
||||||
|
<Text variant="small-medium" className="text-neutral-700">
|
||||||
|
Ready to test?
|
||||||
|
</Text>
|
||||||
|
<div className="flex flex-wrap gap-2">
|
||||||
|
<Button
|
||||||
|
variant="outline"
|
||||||
|
size="small"
|
||||||
|
onClick={handleRunWithPlaceholders}
|
||||||
|
className="gap-2"
|
||||||
|
>
|
||||||
|
<PlayIcon size={16} />
|
||||||
|
Run with example values
|
||||||
|
</Button>
|
||||||
|
{libraryAgent ? (
|
||||||
|
<RunAgentModal
|
||||||
|
triggerSlot={
|
||||||
|
<Button variant="outline" size="small" className="gap-2">
|
||||||
|
<PencilLineIcon size={16} />
|
||||||
|
Run with my inputs
|
||||||
|
</Button>
|
||||||
|
}
|
||||||
|
agent={libraryAgent}
|
||||||
|
onRunCreated={handleRunCreated}
|
||||||
|
onScheduleCreated={handleScheduleCreated}
|
||||||
|
/>
|
||||||
|
) : (
|
||||||
|
<Button
|
||||||
|
variant="outline"
|
||||||
|
size="small"
|
||||||
|
loading={isLoading}
|
||||||
|
disabled
|
||||||
|
className="gap-2"
|
||||||
|
>
|
||||||
|
<PencilLineIcon size={16} />
|
||||||
|
Run with my inputs
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
<Text variant="small" className="text-neutral-500">
|
||||||
|
or just ask me
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</AIChatBubble>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -2,11 +2,13 @@ import { Text } from "@/components/atoms/Text/Text";
|
|||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import type { ToolResult } from "@/types/chat";
|
import type { ToolResult } from "@/types/chat";
|
||||||
import { WarningCircleIcon } from "@phosphor-icons/react";
|
import { WarningCircleIcon } from "@phosphor-icons/react";
|
||||||
|
import { AgentCreatedPrompt } from "./AgentCreatedPrompt";
|
||||||
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
|
||||||
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
|
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
|
||||||
import {
|
import {
|
||||||
formatToolResponse,
|
formatToolResponse,
|
||||||
getErrorMessage,
|
getErrorMessage,
|
||||||
|
isAgentSavedResponse,
|
||||||
isErrorResponse,
|
isErrorResponse,
|
||||||
} from "./helpers";
|
} from "./helpers";
|
||||||
|
|
||||||
@@ -16,6 +18,7 @@ export interface ToolResponseMessageProps {
|
|||||||
result?: ToolResult;
|
result?: ToolResult;
|
||||||
success?: boolean;
|
success?: boolean;
|
||||||
className?: string;
|
className?: string;
|
||||||
|
onSendMessage?: (content: string) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function ToolResponseMessage({
|
export function ToolResponseMessage({
|
||||||
@@ -24,6 +27,7 @@ export function ToolResponseMessage({
|
|||||||
result,
|
result,
|
||||||
success: _success,
|
success: _success,
|
||||||
className,
|
className,
|
||||||
|
onSendMessage,
|
||||||
}: ToolResponseMessageProps) {
|
}: ToolResponseMessageProps) {
|
||||||
if (isErrorResponse(result)) {
|
if (isErrorResponse(result)) {
|
||||||
const errorMessage = getErrorMessage(result);
|
const errorMessage = getErrorMessage(result);
|
||||||
@@ -43,6 +47,18 @@ export function ToolResponseMessage({
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for agent_saved response - show special prompt
|
||||||
|
const agentSavedData = isAgentSavedResponse(result);
|
||||||
|
if (agentSavedData.isSaved) {
|
||||||
|
return (
|
||||||
|
<AgentCreatedPrompt
|
||||||
|
agentName={agentSavedData.agentName}
|
||||||
|
libraryAgentId={agentSavedData.libraryAgentId}
|
||||||
|
onSendMessage={onSendMessage}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
const formattedText = formatToolResponse(result, toolName);
|
const formattedText = formatToolResponse(result, toolName);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|||||||
@@ -6,6 +6,43 @@ function stripInternalReasoning(content: string): string {
|
|||||||
.trim();
|
.trim();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface AgentSavedData {
|
||||||
|
isSaved: boolean;
|
||||||
|
agentName: string;
|
||||||
|
agentId: string;
|
||||||
|
libraryAgentId: string;
|
||||||
|
libraryAgentLink: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isAgentSavedResponse(result: unknown): AgentSavedData {
|
||||||
|
if (typeof result !== "object" || result === null) {
|
||||||
|
return {
|
||||||
|
isSaved: false,
|
||||||
|
agentName: "",
|
||||||
|
agentId: "",
|
||||||
|
libraryAgentId: "",
|
||||||
|
libraryAgentLink: "",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
const response = result as Record<string, unknown>;
|
||||||
|
if (response.type === "agent_saved") {
|
||||||
|
return {
|
||||||
|
isSaved: true,
|
||||||
|
agentName: (response.agent_name as string) || "Agent",
|
||||||
|
agentId: (response.agent_id as string) || "",
|
||||||
|
libraryAgentId: (response.library_agent_id as string) || "",
|
||||||
|
libraryAgentLink: (response.library_agent_link as string) || "",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
isSaved: false,
|
||||||
|
agentName: "",
|
||||||
|
agentId: "",
|
||||||
|
libraryAgentId: "",
|
||||||
|
libraryAgentLink: "",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
export function isErrorResponse(result: unknown): boolean {
|
export function isErrorResponse(result: unknown): boolean {
|
||||||
if (typeof result === "string") {
|
if (typeof result === "string") {
|
||||||
const lower = result.toLowerCase();
|
const lower = result.toLowerCase();
|
||||||
@@ -39,33 +76,62 @@ export function getErrorMessage(result: unknown): string {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a value is a workspace file reference.
|
* Check if a value is a workspace file reference.
|
||||||
|
* Format: workspace://{fileId} or workspace://{fileId}#{mimeType}
|
||||||
*/
|
*/
|
||||||
function isWorkspaceRef(value: unknown): value is string {
|
function isWorkspaceRef(value: unknown): value is string {
|
||||||
return typeof value === "string" && value.startsWith("workspace://");
|
return typeof value === "string" && value.startsWith("workspace://");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a workspace reference appears to be an image based on common patterns.
|
* Extract MIME type from a workspace reference fragment.
|
||||||
* Since workspace refs don't have extensions, we check the context or assume image
|
* e.g., "workspace://abc123#video/mp4" → "video/mp4"
|
||||||
* for certain block types.
|
* Returns undefined if no fragment is present.
|
||||||
*
|
|
||||||
* TODO: Replace keyword matching with MIME type encoded in workspace ref.
|
|
||||||
* e.g., workspace://abc123#image/png or workspace://abc123#video/mp4
|
|
||||||
* This would let frontend render correctly without fragile keyword matching.
|
|
||||||
*/
|
*/
|
||||||
function isLikelyImageRef(value: string, outputKey?: string): boolean {
|
function getWorkspaceMimeType(value: string): string | undefined {
|
||||||
if (!isWorkspaceRef(value)) return false;
|
const hashIndex = value.indexOf("#");
|
||||||
|
if (hashIndex === -1) return undefined;
|
||||||
|
return value.slice(hashIndex + 1) || undefined;
|
||||||
|
}
|
||||||
|
|
||||||
// Check output key name for video-related hints (these are NOT images)
|
/**
|
||||||
const videoKeywords = ["video", "mp4", "mov", "avi", "webm", "movie", "clip"];
|
* Determine the media category of a workspace ref or data URI.
|
||||||
|
* Uses the MIME type fragment on workspace refs when available,
|
||||||
|
* falls back to output key keyword matching for older refs without it.
|
||||||
|
*/
|
||||||
|
function getMediaCategory(
|
||||||
|
value: string,
|
||||||
|
outputKey?: string,
|
||||||
|
): "video" | "image" | "audio" | "unknown" {
|
||||||
|
// Data URIs carry their own MIME type
|
||||||
|
if (value.startsWith("data:video/")) return "video";
|
||||||
|
if (value.startsWith("data:image/")) return "image";
|
||||||
|
if (value.startsWith("data:audio/")) return "audio";
|
||||||
|
|
||||||
|
// Workspace refs: prefer MIME type fragment
|
||||||
|
if (isWorkspaceRef(value)) {
|
||||||
|
const mime = getWorkspaceMimeType(value);
|
||||||
|
if (mime) {
|
||||||
|
if (mime.startsWith("video/")) return "video";
|
||||||
|
if (mime.startsWith("image/")) return "image";
|
||||||
|
if (mime.startsWith("audio/")) return "audio";
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: keyword matching on output key for older refs without fragment
|
||||||
if (outputKey) {
|
if (outputKey) {
|
||||||
const lowerKey = outputKey.toLowerCase();
|
const lowerKey = outputKey.toLowerCase();
|
||||||
if (videoKeywords.some((kw) => lowerKey.includes(kw))) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check output key name for image-related hints
|
const videoKeywords = [
|
||||||
|
"video",
|
||||||
|
"mp4",
|
||||||
|
"mov",
|
||||||
|
"avi",
|
||||||
|
"webm",
|
||||||
|
"movie",
|
||||||
|
"clip",
|
||||||
|
];
|
||||||
|
if (videoKeywords.some((kw) => lowerKey.includes(kw))) return "video";
|
||||||
|
|
||||||
const imageKeywords = [
|
const imageKeywords = [
|
||||||
"image",
|
"image",
|
||||||
"img",
|
"img",
|
||||||
@@ -76,32 +142,35 @@ function isLikelyImageRef(value: string, outputKey?: string): boolean {
|
|||||||
"icon",
|
"icon",
|
||||||
"screenshot",
|
"screenshot",
|
||||||
];
|
];
|
||||||
if (outputKey) {
|
if (imageKeywords.some((kw) => lowerKey.includes(kw))) return "image";
|
||||||
const lowerKey = outputKey.toLowerCase();
|
|
||||||
if (imageKeywords.some((kw) => lowerKey.includes(kw))) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default to treating workspace refs as potential images
|
// Default to image for backward compatibility
|
||||||
// since that's the most common case for generated content
|
return "image";
|
||||||
return true;
|
}
|
||||||
|
|
||||||
|
return "unknown";
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Format a single output value, converting workspace refs to markdown images.
|
* Format a single output value, converting workspace refs to markdown images/videos.
|
||||||
|
* Videos use a "video:" alt-text prefix so the MarkdownContent renderer can
|
||||||
|
* distinguish them from images and render a <video> element.
|
||||||
*/
|
*/
|
||||||
function formatOutputValue(value: unknown, outputKey?: string): string {
|
function formatOutputValue(value: unknown, outputKey?: string): string {
|
||||||
if (isWorkspaceRef(value) && isLikelyImageRef(value, outputKey)) {
|
if (typeof value === "string") {
|
||||||
// Format as markdown image
|
const category = getMediaCategory(value, outputKey);
|
||||||
|
|
||||||
|
if (category === "video") {
|
||||||
|
// Format with "video:" prefix so MarkdownContent renders <video>
|
||||||
|
return ``;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (category === "image") {
|
||||||
return ``;
|
return ``;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (typeof value === "string") {
|
// For audio, unknown workspace refs, data URIs, etc. - return as-is
|
||||||
// Check for data URIs (images)
|
|
||||||
if (value.startsWith("data:image/")) {
|
|
||||||
return ``;
|
|
||||||
}
|
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ export const providerIcons: Partial<
|
|||||||
nvidia: fallbackIcon,
|
nvidia: fallbackIcon,
|
||||||
discord: FaDiscord,
|
discord: FaDiscord,
|
||||||
d_id: fallbackIcon,
|
d_id: fallbackIcon,
|
||||||
|
elevenlabs: fallbackIcon,
|
||||||
google_maps: FaGoogle,
|
google_maps: FaGoogle,
|
||||||
jina: fallbackIcon,
|
jina: fallbackIcon,
|
||||||
ideogram: fallbackIcon,
|
ideogram: fallbackIcon,
|
||||||
|
|||||||
@@ -4,7 +4,9 @@ import { loadScript } from "@/services/scripts/scripts";
|
|||||||
export async function loadGoogleAPIPicker(): Promise<void> {
|
export async function loadGoogleAPIPicker(): Promise<void> {
|
||||||
validateWindow();
|
validateWindow();
|
||||||
|
|
||||||
await loadScript("https://apis.google.com/js/api.js");
|
await loadScript("https://apis.google.com/js/api.js", {
|
||||||
|
referrerPolicy: "no-referrer-when-downgrade",
|
||||||
|
});
|
||||||
|
|
||||||
const googleAPI = window.gapi;
|
const googleAPI = window.gapi;
|
||||||
if (!googleAPI) {
|
if (!googleAPI) {
|
||||||
@@ -27,7 +29,9 @@ export async function loadGoogleIdentityServices(): Promise<void> {
|
|||||||
throw new Error("Google Identity Services cannot load on server");
|
throw new Error("Google Identity Services cannot load on server");
|
||||||
}
|
}
|
||||||
|
|
||||||
await loadScript("https://accounts.google.com/gsi/client");
|
await loadScript("https://accounts.google.com/gsi/client", {
|
||||||
|
referrerPolicy: "no-referrer-when-downgrade",
|
||||||
|
});
|
||||||
|
|
||||||
const google = window.google;
|
const google = window.google;
|
||||||
if (!google?.accounts?.oauth2) {
|
if (!google?.accounts?.oauth2) {
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ export function Navbar() {
|
|||||||
|
|
||||||
const actualLoggedInLinks = [
|
const actualLoggedInLinks = [
|
||||||
{ name: "Home", href: homeHref },
|
{ name: "Home", href: homeHref },
|
||||||
...(isChatEnabled === true ? [{ name: "Tasks", href: "/library" }] : []),
|
...(isChatEnabled === true ? [{ name: "Agents", href: "/library" }] : []),
|
||||||
...loggedInLinks,
|
...loggedInLinks,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
@@ -362,25 +362,14 @@ export type GraphMeta = {
|
|||||||
user_id: UserID;
|
user_id: UserID;
|
||||||
version: number;
|
version: number;
|
||||||
is_active: boolean;
|
is_active: boolean;
|
||||||
|
created_at: Date;
|
||||||
name: string;
|
name: string;
|
||||||
description: string;
|
description: string;
|
||||||
instructions?: string | null;
|
instructions?: string | null;
|
||||||
recommended_schedule_cron: string | null;
|
recommended_schedule_cron: string | null;
|
||||||
forked_from_id?: GraphID | null;
|
forked_from_id?: GraphID | null;
|
||||||
forked_from_version?: number | null;
|
forked_from_version?: number | null;
|
||||||
input_schema: GraphInputSchema;
|
};
|
||||||
output_schema: GraphOutputSchema;
|
|
||||||
credentials_input_schema: CredentialsInputSchema;
|
|
||||||
} & (
|
|
||||||
| {
|
|
||||||
has_external_trigger: true;
|
|
||||||
trigger_setup_info: GraphTriggerInfo;
|
|
||||||
}
|
|
||||||
| {
|
|
||||||
has_external_trigger: false;
|
|
||||||
trigger_setup_info: null;
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
export type GraphID = Brand<string, "GraphID">;
|
export type GraphID = Brand<string, "GraphID">;
|
||||||
|
|
||||||
@@ -447,11 +436,22 @@ export type GraphTriggerInfo = {
|
|||||||
|
|
||||||
/* Mirror of backend/data/graph.py:Graph */
|
/* Mirror of backend/data/graph.py:Graph */
|
||||||
export type Graph = GraphMeta & {
|
export type Graph = GraphMeta & {
|
||||||
created_at: Date;
|
|
||||||
nodes: Node[];
|
nodes: Node[];
|
||||||
links: Link[];
|
links: Link[];
|
||||||
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
|
sub_graphs: Omit<Graph, "sub_graphs">[]; // Flattened sub-graphs
|
||||||
};
|
input_schema: GraphInputSchema;
|
||||||
|
output_schema: GraphOutputSchema;
|
||||||
|
credentials_input_schema: CredentialsInputSchema;
|
||||||
|
} & (
|
||||||
|
| {
|
||||||
|
has_external_trigger: true;
|
||||||
|
trigger_setup_info: GraphTriggerInfo;
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
has_external_trigger: false;
|
||||||
|
trigger_setup_info: null;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
export type GraphUpdateable = Omit<
|
export type GraphUpdateable = Omit<
|
||||||
Graph,
|
Graph,
|
||||||
|
|||||||
@@ -192,6 +192,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
|||||||
| [Get Current Time](block-integrations/text.md#get-current-time) | This block outputs the current time |
|
| [Get Current Time](block-integrations/text.md#get-current-time) | This block outputs the current time |
|
||||||
| [Match Text Pattern](block-integrations/text.md#match-text-pattern) | Matches text against a regex pattern and forwards data to positive or negative output based on the match |
|
| [Match Text Pattern](block-integrations/text.md#match-text-pattern) | Matches text against a regex pattern and forwards data to positive or negative output based on the match |
|
||||||
| [Text Decoder](block-integrations/text.md#text-decoder) | Decodes a string containing escape sequences into actual text |
|
| [Text Decoder](block-integrations/text.md#text-decoder) | Decodes a string containing escape sequences into actual text |
|
||||||
|
| [Text Encoder](block-integrations/text.md#text-encoder) | Encodes a string by converting special characters into escape sequences |
|
||||||
| [Text Replace](block-integrations/text.md#text-replace) | This block is used to replace a text with a new text |
|
| [Text Replace](block-integrations/text.md#text-replace) | This block is used to replace a text with a new text |
|
||||||
| [Text Split](block-integrations/text.md#text-split) | This block is used to split a text into a list of strings |
|
| [Text Split](block-integrations/text.md#text-split) | This block is used to split a text into a list of strings |
|
||||||
| [Word Character Count](block-integrations/text.md#word-character-count) | Counts the number of words and characters in a given text |
|
| [Word Character Count](block-integrations/text.md#word-character-count) | Counts the number of words and characters in a given text |
|
||||||
@@ -232,6 +233,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
|||||||
| [Stagehand Extract](block-integrations/stagehand/blocks.md#stagehand-extract) | Extract structured data from a webpage |
|
| [Stagehand Extract](block-integrations/stagehand/blocks.md#stagehand-extract) | Extract structured data from a webpage |
|
||||||
| [Stagehand Observe](block-integrations/stagehand/blocks.md#stagehand-observe) | Find suggested actions for your workflows |
|
| [Stagehand Observe](block-integrations/stagehand/blocks.md#stagehand-observe) | Find suggested actions for your workflows |
|
||||||
| [Unreal Text To Speech](block-integrations/llm.md#unreal-text-to-speech) | Converts text to speech using the Unreal Speech API |
|
| [Unreal Text To Speech](block-integrations/llm.md#unreal-text-to-speech) | Converts text to speech using the Unreal Speech API |
|
||||||
|
| [Video Narration](block-integrations/video/narration.md#video-narration) | Generate AI narration and add to video |
|
||||||
|
|
||||||
## Search and Information Retrieval
|
## Search and Information Retrieval
|
||||||
|
|
||||||
@@ -471,9 +473,13 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
|||||||
|
|
||||||
| Block Name | Description |
|
| Block Name | Description |
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| [Add Audio To Video](block-integrations/multimedia.md#add-audio-to-video) | Block to attach an audio file to a video file using moviepy |
|
| [Add Audio To Video](block-integrations/video/add_audio.md#add-audio-to-video) | Block to attach an audio file to a video file using moviepy |
|
||||||
| [Loop Video](block-integrations/multimedia.md#loop-video) | Block to loop a video to a given duration or number of repeats |
|
| [Loop Video](block-integrations/video/loop.md#loop-video) | Block to loop a video to a given duration or number of repeats |
|
||||||
| [Media Duration](block-integrations/multimedia.md#media-duration) | Block to get the duration of a media file |
|
| [Media Duration](block-integrations/video/duration.md#media-duration) | Block to get the duration of a media file |
|
||||||
|
| [Video Clip](block-integrations/video/clip.md#video-clip) | Extract a time segment from a video |
|
||||||
|
| [Video Concat](block-integrations/video/concat.md#video-concat) | Merge multiple video clips into one continuous video |
|
||||||
|
| [Video Download](block-integrations/video/download.md#video-download) | Download video from URL (YouTube, Vimeo, news sites, direct links) |
|
||||||
|
| [Video Text Overlay](block-integrations/video/text_overlay.md#video-text-overlay) | Add text overlay/caption to video |
|
||||||
|
|
||||||
## Productivity
|
## Productivity
|
||||||
|
|
||||||
|
|||||||
@@ -85,7 +85,6 @@
|
|||||||
* [LLM](block-integrations/llm.md)
|
* [LLM](block-integrations/llm.md)
|
||||||
* [Logic](block-integrations/logic.md)
|
* [Logic](block-integrations/logic.md)
|
||||||
* [Misc](block-integrations/misc.md)
|
* [Misc](block-integrations/misc.md)
|
||||||
* [Multimedia](block-integrations/multimedia.md)
|
|
||||||
* [Notion Create Page](block-integrations/notion/create_page.md)
|
* [Notion Create Page](block-integrations/notion/create_page.md)
|
||||||
* [Notion Read Database](block-integrations/notion/read_database.md)
|
* [Notion Read Database](block-integrations/notion/read_database.md)
|
||||||
* [Notion Read Page](block-integrations/notion/read_page.md)
|
* [Notion Read Page](block-integrations/notion/read_page.md)
|
||||||
@@ -129,5 +128,13 @@
|
|||||||
* [Twitter Timeline](block-integrations/twitter/timeline.md)
|
* [Twitter Timeline](block-integrations/twitter/timeline.md)
|
||||||
* [Twitter Tweet Lookup](block-integrations/twitter/tweet_lookup.md)
|
* [Twitter Tweet Lookup](block-integrations/twitter/tweet_lookup.md)
|
||||||
* [Twitter User Lookup](block-integrations/twitter/user_lookup.md)
|
* [Twitter User Lookup](block-integrations/twitter/user_lookup.md)
|
||||||
|
* [Video Add Audio](block-integrations/video/add_audio.md)
|
||||||
|
* [Video Clip](block-integrations/video/clip.md)
|
||||||
|
* [Video Concat](block-integrations/video/concat.md)
|
||||||
|
* [Video Download](block-integrations/video/download.md)
|
||||||
|
* [Video Duration](block-integrations/video/duration.md)
|
||||||
|
* [Video Loop](block-integrations/video/loop.md)
|
||||||
|
* [Video Narration](block-integrations/video/narration.md)
|
||||||
|
* [Video Text Overlay](block-integrations/video/text_overlay.md)
|
||||||
* [Wolfram LLM API](block-integrations/wolfram/llm_api.md)
|
* [Wolfram LLM API](block-integrations/wolfram/llm_api.md)
|
||||||
* [Zerobounce Validate Emails](block-integrations/zerobounce/validate_emails.md)
|
* [Zerobounce Validate Emails](block-integrations/zerobounce/validate_emails.md)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user