diff --git a/autogpt_platform/backend/backend/blocks/github/checks.py b/autogpt_platform/backend/backend/blocks/github/checks.py
index 6d9ac1897c..070b5179e8 100644
--- a/autogpt_platform/backend/backend/blocks/github/checks.py
+++ b/autogpt_platform/backend/backend/blocks/github/checks.py
@@ -172,7 +172,9 @@ class GithubCreateCheckRunBlock(Block):
data.output = output_data
check_runs_url = f"{repo_url}/check-runs"
- response = api.post(check_runs_url)
+ response = api.post(
+ check_runs_url, data=data.model_dump_json(exclude_none=True)
+ )
result = response.json()
return {
@@ -323,7 +325,9 @@ class GithubUpdateCheckRunBlock(Block):
data.output = output_data
check_run_url = f"{repo_url}/check-runs/{check_run_id}"
- response = api.patch(check_run_url)
+ response = api.patch(
+ check_run_url, data=data.model_dump_json(exclude_none=True)
+ )
result = response.json()
return {
diff --git a/autogpt_platform/backend/backend/blocks/github/statuses.py b/autogpt_platform/backend/backend/blocks/github/statuses.py
index 8abf27928f..a69b0e3d61 100644
--- a/autogpt_platform/backend/backend/blocks/github/statuses.py
+++ b/autogpt_platform/backend/backend/blocks/github/statuses.py
@@ -144,7 +144,7 @@ class GithubCreateStatusBlock(Block):
data.description = description
status_url = f"{repo_url}/statuses/{sha}"
- response = api.post(status_url, json=data)
+ response = api.post(status_url, data=data.model_dump_json(exclude_none=True))
result = response.json()
return {
diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py
index 178c667642..ca9ba69a80 100644
--- a/autogpt_platform/backend/backend/blocks/ideogram.py
+++ b/autogpt_platform/backend/backend/blocks/ideogram.py
@@ -142,6 +142,16 @@ class IdeogramModelBlock(Block):
title="Color Palette Preset",
advanced=True,
)
+ custom_color_palette: Optional[list[str]] = SchemaField(
+ description=(
+ "Only available for model version V_2 or V_2_TURBO. Provide one or more color hex codes "
+ "(e.g., ['#000030', '#1C0C47', '#9900FF', '#4285F4', '#FFFFFF']) to define a custom color "
+ "palette. Only used if 'color_palette_name' is 'NONE'."
+ ),
+ default=None,
+ title="Custom Color Palette",
+ advanced=True,
+ )
class Output(BlockSchema):
result: str = SchemaField(description="Generated image URL")
@@ -164,6 +174,13 @@ class IdeogramModelBlock(Block):
"style_type": StyleType.AUTO,
"negative_prompt": None,
"color_palette_name": ColorPalettePreset.NONE,
+ "custom_color_palette": [
+ "#000030",
+ "#1C0C47",
+ "#9900FF",
+ "#4285F4",
+ "#FFFFFF",
+ ],
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
@@ -173,7 +190,7 @@ class IdeogramModelBlock(Block):
),
],
test_mock={
- "run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name: "https://ideogram.ai/api/images/test-generated-image-url.png",
+ "run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: "https://ideogram.ai/api/images/test-generated-image-url.png",
"upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png",
},
test_credentials=TEST_CREDENTIALS,
@@ -195,6 +212,7 @@ class IdeogramModelBlock(Block):
style_type=input_data.style_type.value,
negative_prompt=input_data.negative_prompt,
color_palette_name=input_data.color_palette_name.value,
+ custom_colors=input_data.custom_color_palette,
)
# Step 2: Upscale the image if requested
@@ -217,6 +235,7 @@ class IdeogramModelBlock(Block):
style_type: str,
negative_prompt: Optional[str],
color_palette_name: str,
+ custom_colors: Optional[list[str]],
):
url = "https://api.ideogram.ai/generate"
headers = {
@@ -241,7 +260,11 @@ class IdeogramModelBlock(Block):
data["image_request"]["negative_prompt"] = negative_prompt
if color_palette_name != "NONE":
- data["image_request"]["color_palette"] = {"name": color_palette_name}
+ data["color_palette"] = {"name": color_palette_name}
+ elif custom_colors:
+ data["color_palette"] = {
+ "members": [{"color_hex": color} for color in custom_colors]
+ }
try:
response = requests.post(url, json=data, headers=headers)
@@ -267,9 +290,7 @@ class IdeogramModelBlock(Block):
response = requests.post(
url,
headers=headers,
- data={
- "image_request": "{}", # Empty JSON object
- },
+ data={"image_request": "{}"},
files=files,
)
diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py
index ca6926ee1c..aef43bc9ae 100644
--- a/autogpt_platform/backend/backend/data/execution.py
+++ b/autogpt_platform/backend/backend/data/execution.py
@@ -1,11 +1,10 @@
from collections import defaultdict
from datetime import datetime, timezone
from multiprocessing import Manager
-from typing import Any, AsyncGenerator, Generator, Generic, Optional, Type, TypeVar
+from typing import Any, AsyncGenerator, Generator, Generic, Type, TypeVar
from prisma import Json
from prisma.enums import AgentExecutionStatus
-from prisma.errors import PrismaError
from prisma.models import (
AgentGraphExecution,
AgentNodeExecution,
@@ -342,28 +341,21 @@ async def update_execution_status(
return ExecutionResult.from_db(res)
-async def get_execution(
- execution_id: str, user_id: str
-) -> Optional[AgentNodeExecution]:
- """
- Get an execution by ID. Returns None if not found.
-
- Args:
- execution_id: The ID of the execution to retrieve
-
- Returns:
- The execution if found, None otherwise
- """
- try:
- execution = await AgentNodeExecution.prisma().find_unique(
- where={
- "id": execution_id,
- "userId": user_id,
- }
+async def delete_execution(
+ graph_exec_id: str, user_id: str, soft_delete: bool = True
+) -> None:
+ if soft_delete:
+ deleted_count = await AgentGraphExecution.prisma().update_many(
+ where={"id": graph_exec_id, "userId": user_id}, data={"isDeleted": True}
+ )
+ else:
+ deleted_count = await AgentGraphExecution.prisma().delete_many(
+ where={"id": graph_exec_id, "userId": user_id}
+ )
+ if deleted_count < 1:
+ raise DatabaseError(
+ f"Could not delete graph execution #{graph_exec_id}: not found"
)
- return execution
- except PrismaError:
- return None
async def get_execution_results(graph_exec_id: str) -> list[ExecutionResult]:
@@ -385,15 +377,12 @@ async def get_executions_in_timerange(
try:
executions = await AgentGraphExecution.prisma().find_many(
where={
- "AND": [
- {
- "startedAt": {
- "gte": datetime.fromisoformat(start_time),
- "lte": datetime.fromisoformat(end_time),
- }
- },
- {"userId": user_id},
- ]
+ "startedAt": {
+ "gte": datetime.fromisoformat(start_time),
+ "lte": datetime.fromisoformat(end_time),
+ },
+ "userId": user_id,
+ "isDeleted": False,
},
include=GRAPH_EXECUTION_INCLUDE,
)
diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py
index 1857c5fd51..a6a04b0ceb 100644
--- a/autogpt_platform/backend/backend/data/graph.py
+++ b/autogpt_platform/backend/backend/data/graph.py
@@ -597,9 +597,10 @@ async def get_graphs(
return graph_models
+# TODO: move execution stuff to .execution
async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]:
executions = await AgentGraphExecution.prisma().find_many(
- where={"userId": user_id},
+ where={"isDeleted": False, "userId": user_id},
order={"createdAt": "desc"},
)
return [GraphExecutionMeta.from_db(execution) for execution in executions]
@@ -607,7 +608,7 @@ async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]:
async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]:
executions = await AgentGraphExecution.prisma().find_many(
- where={"agentGraphId": graph_id, "userId": user_id},
+ where={"agentGraphId": graph_id, "isDeleted": False, "userId": user_id},
order={"createdAt": "desc"},
)
return [GraphExecutionMeta.from_db(execution) for execution in executions]
@@ -617,14 +618,14 @@ async def get_execution_meta(
user_id: str, execution_id: str
) -> GraphExecutionMeta | None:
execution = await AgentGraphExecution.prisma().find_first(
- where={"id": execution_id, "userId": user_id}
+ where={"id": execution_id, "isDeleted": False, "userId": user_id}
)
return GraphExecutionMeta.from_db(execution) if execution else None
async def get_execution(user_id: str, execution_id: str) -> GraphExecution | None:
execution = await AgentGraphExecution.prisma().find_first(
- where={"id": execution_id, "userId": user_id},
+ where={"id": execution_id, "isDeleted": False, "userId": user_id},
include={
"AgentNodeExecutions": {
"include": {"AgentNode": True, "Input": True, "Output": True},
diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py
index 08673aad8a..0863e651ba 100644
--- a/autogpt_platform/backend/backend/server/routers/v1.py
+++ b/autogpt_platform/backend/backend/server/routers/v1.py
@@ -10,12 +10,14 @@ from autogpt_libs.auth.middleware import auth_middleware
from autogpt_libs.feature_flag.client import feature_flag
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException, Request, Response
+from starlette.status import HTTP_204_NO_CONTENT
from typing_extensions import Optional, TypedDict
import backend.data.block
import backend.server.integrations.router
import backend.server.routers.analytics
import backend.server.v2.library.db as library_db
+from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import (
APIKeyError,
@@ -393,7 +395,8 @@ async def get_graph_all_versions(
path="/graphs", tags=["graphs"], dependencies=[Depends(auth_middleware)]
)
async def create_new_graph(
- create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)]
+ create_graph: CreateGraph,
+ user_id: Annotated[str, Depends(get_user_id)],
) -> graph_db.GraphModel:
graph = graph_db.make_graph_model(create_graph.graph, user_id)
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
@@ -401,10 +404,9 @@ async def create_new_graph(
graph = await graph_db.create_graph(graph, user_id=user_id)
# Create a library agent for the new graph
- await library_db.create_library_agent(
- graph.id,
- graph.version,
- user_id,
+ library_agent = await library_db.create_library_agent(graph, user_id)
+ _ = asyncio.create_task(
+ library_db.add_generated_agent_image(graph, library_agent.id)
)
graph = await on_graph_activate(
@@ -621,11 +623,26 @@ async def get_graph_execution(
result = await graph_db.get_execution(execution_id=graph_exec_id, user_id=user_id)
if not result:
- raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
+ raise HTTPException(
+ status_code=404, detail=f"Graph execution #{graph_exec_id} not found."
+ )
return result
+@v1_router.delete(
+ path="/executions/{graph_exec_id}",
+ tags=["graphs"],
+ dependencies=[Depends(auth_middleware)],
+ status_code=HTTP_204_NO_CONTENT,
+)
+async def delete_graph_execution(
+ graph_exec_id: str,
+ user_id: Annotated[str, Depends(get_user_id)],
+) -> None:
+ await execution_db.delete_execution(graph_exec_id=graph_exec_id, user_id=user_id)
+
+
########################################################
##################### Schedules ########################
########################################################
diff --git a/autogpt_platform/backend/backend/server/v2/library/db.py b/autogpt_platform/backend/backend/server/v2/library/db.py
index f8128f8d0e..ae4f151347 100644
--- a/autogpt_platform/backend/backend/server/v2/library/db.py
+++ b/autogpt_platform/backend/backend/server/v2/library/db.py
@@ -1,3 +1,4 @@
+import asyncio
import logging
from typing import Optional
@@ -7,14 +8,17 @@ import prisma.fields
import prisma.models
import prisma.types
+import backend.data.graph
import backend.data.includes
import backend.server.model
import backend.server.v2.library.model as library_model
import backend.server.v2.store.exceptions as store_exceptions
import backend.server.v2.store.image_gen as store_image_gen
import backend.server.v2.store.media as store_media
+from backend.util.settings import Config
logger = logging.getLogger(__name__)
+config = Config()
async def list_library_agents(
@@ -168,17 +172,53 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
raise store_exceptions.DatabaseError("Failed to fetch library agent") from e
+async def add_generated_agent_image(
+ graph: backend.data.graph.GraphModel,
+ library_agent_id: str,
+) -> Optional[prisma.models.LibraryAgent]:
+ """
+ Generates an image for the specified LibraryAgent and updates its record.
+ """
+ user_id = graph.user_id
+ graph_id = graph.id
+
+ # Use .jpeg here since we are generating JPEG images
+ filename = f"agent_{graph_id}.jpeg"
+ try:
+ if not (image_url := await store_media.check_media_exists(user_id, filename)):
+ # Generate agent image as JPEG
+ if config.use_agent_image_generation_v2:
+ image = await asyncio.to_thread(
+ store_image_gen.generate_agent_image_v2, graph=graph
+ )
+ else:
+ image = await store_image_gen.generate_agent_image(agent=graph)
+
+ # Create UploadFile with the correct filename and content_type
+ image_file = fastapi.UploadFile(file=image, filename=filename)
+
+ image_url = await store_media.upload_media(
+ user_id=user_id, file=image_file, use_file_name=True
+ )
+ except Exception as e:
+ logger.warning(f"Error generating and uploading agent image: {e}")
+ return None
+
+ return await prisma.models.LibraryAgent.prisma().update(
+ where={"id": library_agent_id},
+ data={"imageUrl": image_url},
+ )
+
+
async def create_library_agent(
- agent_id: str,
- agent_version: int,
+ graph: backend.data.graph.GraphModel,
user_id: str,
) -> prisma.models.LibraryAgent:
"""
Adds an agent to the user's library (LibraryAgent table).
Args:
- agent_id: The ID of the agent to add.
- agent_version: The version of the agent to add.
+ agent: The agent/Graph to add to the library.
user_id: The user to whom the agent will be added.
Returns:
@@ -189,52 +229,19 @@ async def create_library_agent(
DatabaseError: If there's an error during creation or if image generation fails.
"""
logger.info(
- f"Creating library agent for graph #{agent_id} v{agent_version}; "
+ f"Creating library agent for graph #{graph.id} v{graph.version}; "
f"user #{user_id}"
)
- # Fetch agent graph
- try:
- agent = await prisma.models.AgentGraph.prisma().find_unique(
- where={"graphVersionId": {"id": agent_id, "version": agent_version}}
- )
- except prisma.errors.PrismaError as e:
- logger.exception("Database error fetching agent")
- raise store_exceptions.DatabaseError("Failed to fetch agent") from e
-
- if not agent:
- raise store_exceptions.AgentNotFoundError(
- f"Agent #{agent_id} v{agent_version} not found"
- )
-
- # Use .jpeg here since we are generating JPEG images
- filename = f"agent_{agent_id}.jpeg"
- try:
- if not (image_url := await store_media.check_media_exists(user_id, filename)):
- # Generate agent image as JPEG
- image = await store_image_gen.generate_agent_image(agent=agent)
-
- # Create UploadFile with the correct filename and content_type
- image_file = fastapi.UploadFile(file=image, filename=filename)
-
- image_url = await store_media.upload_media(
- user_id=user_id, file=image_file, use_file_name=True
- )
- except Exception as e:
- logger.warning(f"Error generating and uploading agent image: {e}")
- image_url = None
-
try:
return await prisma.models.LibraryAgent.prisma().create(
data={
- "imageUrl": image_url,
- "isCreatedByUser": (user_id == agent.userId),
+ "isCreatedByUser": (user_id == graph.user_id),
"useGraphIsActiveVersion": True,
"User": {"connect": {"id": user_id}},
- # "Creator": {"connect": {"id": agent.userId}},
"Agent": {
"connect": {
- "graphVersionId": {"id": agent_id, "version": agent_version}
+ "graphVersionId": {"id": graph.id, "version": graph.version}
}
},
}
diff --git a/autogpt_platform/backend/backend/server/v2/store/image_gen.py b/autogpt_platform/backend/backend/server/v2/store/image_gen.py
index c75b6c23dc..a2dee7a4af 100644
--- a/autogpt_platform/backend/backend/server/v2/store/image_gen.py
+++ b/autogpt_platform/backend/backend/server/v2/store/image_gen.py
@@ -4,14 +4,26 @@ from enum import Enum
import replicate
import replicate.exceptions
-import requests
from prisma.models import AgentGraph
from replicate.helpers import FileOutput
+from backend.blocks.ideogram import (
+ AspectRatio,
+ ColorPalettePreset,
+ IdeogramModelBlock,
+ IdeogramModelName,
+ MagicPromptOption,
+ StyleType,
+ UpscaleOption,
+)
from backend.data.graph import Graph
+from backend.data.model import CredentialsMetaInput, ProviderName
+from backend.integrations.credentials_store import ideogram_credentials
+from backend.util.request import requests
from backend.util.settings import Settings
logger = logging.getLogger(__name__)
+settings = Settings()
class ImageSize(str, Enum):
@@ -22,6 +34,63 @@ class ImageStyle(str, Enum):
DIGITAL_ART = "digital art"
+def generate_agent_image_v2(graph: Graph | AgentGraph) -> io.BytesIO:
+ """
+ Generate an image for an agent using Ideogram model.
+ Returns:
+ str: The URL of the generated image
+ """
+ if not ideogram_credentials.api_key:
+ raise ValueError("Missing Ideogram API key")
+
+ name = graph.name
+ description = f"{name} ({graph.description})" if graph.description else name
+
+ prompt = (
+ f"Create a visually striking retro-futuristic vector pop art illustration prominently featuring "
+ f'"{name}" in bold typography. The image clearly and literally depicts a {description}, '
+ f"along with recognizable objects directly associated with the primary function of a {name}. "
+ f"Ensure the imagery is concrete, intuitive, and immediately understandable, clearly conveying the "
+ f"purpose of a {name}. Maintain vibrant, limited-palette colors, sharp vector lines, geometric "
+ f"shapes, flat illustration techniques, and solid colors without gradients or shading. Preserve a "
+ f"retro-futuristic aesthetic influenced by mid-century futurism and 1960s psychedelia, "
+ f"prioritizing clear visual storytelling and thematic clarity above all else."
+ )
+
+ custom_colors = [
+ "#000030",
+ "#1C0C47",
+ "#9900FF",
+ "#4285F4",
+ "#FFFFFF",
+ ]
+
+ # Run the Ideogram model block with the specified parameters
+ url = IdeogramModelBlock().run_once(
+ IdeogramModelBlock.Input(
+ credentials=CredentialsMetaInput(
+ id=ideogram_credentials.id,
+ provider=ProviderName.IDEOGRAM,
+ title=ideogram_credentials.title,
+ type=ideogram_credentials.type,
+ ),
+ prompt=prompt,
+ ideogram_model_name=IdeogramModelName.V2,
+ aspect_ratio=AspectRatio.ASPECT_4_3,
+ magic_prompt_option=MagicPromptOption.OFF,
+ style_type=StyleType.AUTO,
+ upscale=UpscaleOption.NO_UPSCALE,
+ color_palette_name=ColorPalettePreset.NONE,
+ custom_color_palette=custom_colors,
+ seed=None,
+ negative_prompt=None,
+ ),
+ "result",
+ credentials=ideogram_credentials,
+ )
+ return io.BytesIO(requests.get(url).content)
+
+
async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO:
"""
Generate an image for an agent using Flux model via Replicate API.
@@ -33,8 +102,6 @@ async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO:
io.BytesIO: The generated image as bytes
"""
try:
- settings = Settings()
-
if not settings.secrets.replicate_api_key:
raise ValueError("Missing Replicate API key in settings")
@@ -71,14 +138,12 @@ async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO:
# If it's a URL string, fetch the image bytes
result_url = output[0]
response = requests.get(result_url)
- response.raise_for_status()
image_bytes = response.content
elif isinstance(output, FileOutput):
image_bytes = output.read()
elif isinstance(output, str):
# Output is a URL
response = requests.get(output)
- response.raise_for_status()
image_bytes = response.content
else:
raise RuntimeError("Unexpected output format from the model.")
diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py
index 613093acd5..6092febd70 100644
--- a/autogpt_platform/backend/backend/util/settings.py
+++ b/autogpt_platform/backend/backend/util/settings.py
@@ -206,6 +206,11 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
description="The email address to use for sending emails",
)
+ use_agent_image_generation_v2: bool = Field(
+ default=True,
+ description="Whether to use the new agent image generation service",
+ )
+
@field_validator("platform_base_url", "frontend_base_url")
@classmethod
def validate_platform_base_url(cls, v: str, info: ValidationInfo) -> str:
diff --git a/autogpt_platform/backend/migrations/20250228161607_agent_graph_execution_soft_delete/migration.sql b/autogpt_platform/backend/migrations/20250228161607_agent_graph_execution_soft_delete/migration.sql
new file mode 100644
index 0000000000..ef091b9c80
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20250228161607_agent_graph_execution_soft_delete/migration.sql
@@ -0,0 +1,6 @@
+-- Add isDeleted column to AgentGraphExecution
+ALTER TABLE "AgentGraphExecution"
+ADD COLUMN "isDeleted"
+ BOOLEAN
+ NOT NULL
+ DEFAULT false;
diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma
index 0373544e2d..12196be673 100644
--- a/autogpt_platform/backend/schema.prisma
+++ b/autogpt_platform/backend/schema.prisma
@@ -289,6 +289,8 @@ model AgentGraphExecution {
updatedAt DateTime? @updatedAt
startedAt DateTime?
+ isDeleted Boolean @default(false)
+
executionStatus AgentExecutionStatus @default(COMPLETED)
agentGraphId String
diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs
index b7f69edfdb..7526a7c01c 100644
--- a/autogpt_platform/frontend/next.config.mjs
+++ b/autogpt_platform/frontend/next.config.mjs
@@ -9,6 +9,7 @@ const nextConfig = {
"upload.wikimedia.org",
"storage.googleapis.com",
+ "ideogram.ai", // for generated images
"picsum.photos", // for placeholder images
"dummyimage.com", // for placeholder images
"placekitten.com", // for placeholder images
diff --git a/autogpt_platform/frontend/src/app/error.tsx b/autogpt_platform/frontend/src/app/error.tsx
index f6ce27a767..ce4db030c6 100644
--- a/autogpt_platform/frontend/src/app/error.tsx
+++ b/autogpt_platform/frontend/src/app/error.tsx
@@ -30,7 +30,7 @@ export default function Error({
again later or contact support if the issue persists.
-
);
diff --git a/autogpt_platform/frontend/src/app/monitoring/page.tsx b/autogpt_platform/frontend/src/app/monitoring/page.tsx
index dd1cf2c7c0..251a194abc 100644
--- a/autogpt_platform/frontend/src/app/monitoring/page.tsx
+++ b/autogpt_platform/frontend/src/app/monitoring/page.tsx
@@ -5,6 +5,7 @@ import {
GraphExecutionMeta,
Schedule,
LibraryAgent,
+ ScheduleID,
} from "@/lib/autogpt-server-api";
import { Card } from "@/components/ui/card";
@@ -35,7 +36,7 @@ const Monitor = () => {
}, [api]);
const removeSchedule = useCallback(
- async (scheduleId: string) => {
+ async (scheduleId: ScheduleID) => {
const removedSchedule = await api.deleteSchedule(scheduleId);
setSchedules(schedules.filter((s) => s.id !== removedSchedule.id));
},
diff --git a/autogpt_platform/frontend/src/components/CustomNode.tsx b/autogpt_platform/frontend/src/components/CustomNode.tsx
index bcde5645ca..948c2389b6 100644
--- a/autogpt_platform/frontend/src/components/CustomNode.tsx
+++ b/autogpt_platform/frontend/src/components/CustomNode.tsx
@@ -837,7 +837,11 @@ export const CustomNode = React.memo(
data={data.executionResults!.at(-1)?.data || {}}
/>
-
+
View More
diff --git a/autogpt_platform/frontend/src/components/Flow.tsx b/autogpt_platform/frontend/src/components/Flow.tsx
index 7f3cf14157..9b62ac9507 100644
--- a/autogpt_platform/frontend/src/components/Flow.tsx
+++ b/autogpt_platform/frontend/src/components/Flow.tsx
@@ -26,7 +26,12 @@ import {
import "@xyflow/react/dist/style.css";
import { CustomNode } from "./CustomNode";
import "./flow.css";
-import { BlockUIType, formatEdgeID, GraphID } from "@/lib/autogpt-server-api";
+import {
+ BlockUIType,
+ formatEdgeID,
+ GraphExecutionID,
+ GraphID,
+} from "@/lib/autogpt-server-api";
import { getTypeColor, findNewlyAddedBlockCoordinates } from "@/lib/utils";
import { history } from "./history";
import { CustomEdge } from "./CustomEdge";
@@ -86,7 +91,9 @@ const FlowEditor: React.FC<{
const [visualizeBeads, setVisualizeBeads] = useState<
"no" | "static" | "animate"
>("animate");
- const [flowExecutionID, setFlowExecutionID] = useState();
+ const [flowExecutionID, setFlowExecutionID] = useState<
+ GraphExecutionID | undefined
+ >();
const {
agentName,
setAgentName,
@@ -164,7 +171,9 @@ const FlowEditor: React.FC<{
if (params.get("open_scheduling") === "true") {
setOpenCron(true);
}
- setFlowExecutionID(params.get("flowExecutionID") || undefined);
+ setFlowExecutionID(
+ (params.get("flowExecutionID") as GraphExecutionID) || undefined,
+ );
}, [params]);
useEffect(() => {
diff --git a/autogpt_platform/frontend/src/components/PrimaryActionButton.tsx b/autogpt_platform/frontend/src/components/PrimaryActionButton.tsx
index 19d36e29f8..bec81fc267 100644
--- a/autogpt_platform/frontend/src/components/PrimaryActionButton.tsx
+++ b/autogpt_platform/frontend/src/components/PrimaryActionButton.tsx
@@ -1,5 +1,6 @@
-import React from "react";
-import { Clock, LogOut } from "lucide-react";
+import React, { useState } from "react";
+import { Button } from "./ui/button";
+import { Clock, LogOut, ChevronLeft } from "lucide-react";
import { IconPlay, IconSquare } from "@/components/ui/icons";
import {
Tooltip,
@@ -7,7 +8,6 @@ import {
TooltipTrigger,
} from "@/components/ui/tooltip";
import { FaSpinner } from "react-icons/fa";
-import { Button } from "@/components/ui/button";
interface PrimaryActionBarProps {
onClickAgentOutputs: () => void;
@@ -41,7 +41,12 @@ const PrimaryActionBar: React.FC = ({
-
+
Agent Outputs{" "}
@@ -55,7 +60,9 @@ const PrimaryActionBar: React.FC = ({
= ({
{
return (
- {show_tutorial && Tutorial}
+ {show_tutorial && (
+
+ Tutorial
+
+ )}
-
+
Reach Out
diff --git a/autogpt_platform/frontend/src/components/agents/agent-run-details-view.tsx b/autogpt_platform/frontend/src/components/agents/agent-run-details-view.tsx
index c6df19ce90..ccdef84575 100644
--- a/autogpt_platform/frontend/src/components/agents/agent-run-details-view.tsx
+++ b/autogpt_platform/frontend/src/components/agents/agent-run-details-view.tsx
@@ -23,10 +23,12 @@ export default function AgentRunDetailsView({
graph,
run,
agentActions,
+ deleteRun,
}: {
graph: GraphMeta;
run: GraphExecution | GraphExecutionMeta;
agentActions: ButtonAction[];
+ deleteRun: () => void;
}): React.ReactNode {
const api = useBackendAPI();
@@ -86,6 +88,11 @@ export default function AgentRunDetailsView({
[api, graph, agentRunInputs],
);
+ const stopRun = useCallback(
+ () => api.stopGraphExecution(graph.id, run.execution_id),
+ [api, graph.id, run.execution_id],
+ );
+
const agentRunOutputs:
| Record<
string,
@@ -109,9 +116,23 @@ export default function AgentRunDetailsView({
);
}, [graph, run, runStatus]);
- const runActions: { label: string; callback: () => void }[] = useMemo(
- () => [{ label: "Run again", callback: () => runAgain() }],
- [runAgain],
+ const runActions: ButtonAction[] = useMemo(
+ () => [
+ ...(["running", "queued"].includes(runStatus)
+ ? ([
+ {
+ label: "Stop run",
+ variant: "secondary",
+ callback: stopRun,
+ },
+ ] satisfies ButtonAction[])
+ : []),
+ ...(["success", "failed", "stopped"].includes(runStatus)
+ ? [{ label: "Run again", callback: runAgain }]
+ : []),
+ { label: "Delete run", variant: "secondary", callback: deleteRun },
+ ],
+ [runStatus, runAgain, stopRun, deleteRun],
);
return (
@@ -190,7 +211,11 @@ export default function AgentRunDetailsView({
Run actions
{runActions.map((action, i) => (
-
+
{action.label}
))}
diff --git a/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx b/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx
index a4f096780f..50ba007c96 100644
--- a/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx
+++ b/autogpt_platform/frontend/src/components/agents/agent-run-draft-view.tsx
@@ -2,7 +2,7 @@
import React, { useCallback, useMemo, useState } from "react";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
-import { GraphMeta } from "@/lib/autogpt-server-api";
+import { GraphExecutionID, GraphMeta } from "@/lib/autogpt-server-api";
import type { ButtonAction } from "@/components/agptui/types";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
@@ -15,7 +15,7 @@ export default function AgentRunDraftView({
agentActions,
}: {
graph: GraphMeta;
- onRun: (runID: string) => void;
+ onRun: (runID: GraphExecutionID) => void;
agentActions: ButtonAction[];
}): React.ReactNode {
const api = useBackendAPI();
diff --git a/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx b/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx
index 7336835438..912569a8cf 100644
--- a/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx
+++ b/autogpt_platform/frontend/src/components/agents/agent-run-summary-card.tsx
@@ -18,24 +18,24 @@ import AgentRunStatusChip, {
} from "@/components/agents/agent-run-status-chip";
export type AgentRunSummaryProps = {
- agentID: string;
- agentRunID: string;
status: AgentRunStatus;
title: string;
timestamp: number | Date;
selected?: boolean;
onClick?: () => void;
+ // onRename: () => void;
+ onDelete: () => void;
className?: string;
};
export default function AgentRunSummaryCard({
- agentID,
- agentRunID,
status,
title,
timestamp,
selected = false,
onClick,
+ // onRename,
+ onDelete,
className,
}: AgentRunSummaryProps): React.ReactElement {
return (
@@ -55,32 +55,24 @@ export default function AgentRunSummaryCard({
{title}
- {/*
+
-
- Pin into a template
+ {/* {onPinAsPreset && (
+
+ Pin as a preset
+ )} */}
-
- Rename
-
+ {/* Rename */}
-
- Delete
-
+ Delete
- */}
+
void;
+ onSelectRun: (id: GraphExecutionID) => void;
onSelectSchedule: (schedule: Schedule) => void;
- onDraftNewRun: () => void;
+ onSelectDraftNewRun: () => void;
+ onDeleteRun: (id: GraphExecutionMeta) => void;
+ onDeleteSchedule: (id: ScheduleID) => void;
className?: string;
}
@@ -34,7 +38,9 @@ export default function AgentRunsSelectorList({
selectedView,
onSelectRun,
onSelectSchedule,
- onDraftNewRun,
+ onSelectDraftNewRun,
+ onDeleteRun,
+ onDeleteSchedule,
className,
}: AgentRunsSelectorListProps): React.ReactElement {
const [activeListTab, setActiveListTab] = useState<"runs" | "scheduled">(
@@ -51,7 +57,7 @@ export default function AgentRunsSelectorList({
? "agpt-card-selected text-accent"
: "")
}
- onClick={onDraftNewRun}
+ onClick={onSelectDraftNewRun}
>
New run
@@ -91,7 +97,7 @@ export default function AgentRunsSelectorList({
? "agpt-card-selected text-accent"
: "")
}
- onClick={onDraftNewRun}
+ onClick={onSelectDraftNewRun}
>
New run
@@ -102,13 +108,12 @@ export default function AgentRunsSelectorList({
onSelectRun(run.execution_id)}
+ onDelete={() => onDeleteRun(run)}
/>
))
: schedules
@@ -117,13 +122,12 @@ export default function AgentRunsSelectorList({
onSelectSchedule(schedule)}
+ onDelete={() => onDeleteSchedule(schedule.id)}
/>
))}
diff --git a/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx b/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx
index 2133cea3bf..520188fad4 100644
--- a/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx
+++ b/autogpt_platform/frontend/src/components/agents/agent-schedule-details-view.tsx
@@ -1,7 +1,11 @@
"use client";
import React, { useCallback, useMemo } from "react";
-import { GraphMeta, Schedule } from "@/lib/autogpt-server-api";
+import {
+ GraphExecutionID,
+ GraphMeta,
+ Schedule,
+} from "@/lib/autogpt-server-api";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
import type { ButtonAction } from "@/components/agptui/types";
@@ -18,7 +22,7 @@ export default function AgentScheduleDetailsView({
}: {
graph: GraphMeta;
schedule: Schedule;
- onForcedRun: (runID: string) => void;
+ onForcedRun: (runID: GraphExecutionID) => void;
agentActions: ButtonAction[];
}): React.ReactNode {
const api = useBackendAPI();
diff --git a/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx b/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx
index 2dfff0b64b..c30c3df915 100644
--- a/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx
+++ b/autogpt_platform/frontend/src/components/agptui/BecomeACreator.tsx
@@ -2,7 +2,6 @@
import * as React from "react";
import { PublishAgentPopout } from "./composite/PublishAgentPopout";
-import { Button } from "@/components/ui/button";
interface BecomeACreatorProps {
title?: string;
description?: string;
@@ -47,7 +46,16 @@ export const BecomeACreator: React.FC = ({
{buttonText}}
+ trigger={
+
+
+ {buttonText}
+
+
+ }
/>
diff --git a/autogpt_platform/frontend/src/components/agptui/Button.tsx b/autogpt_platform/frontend/src/components/agptui/Button.tsx
index 9da212d4fd..1e4d9b0307 100644
--- a/autogpt_platform/frontend/src/components/agptui/Button.tsx
+++ b/autogpt_platform/frontend/src/components/agptui/Button.tsx
@@ -1,3 +1,5 @@
+"use client";
+
import * as React from "react";
import { Slot } from "@radix-ui/react-slot";
import { cva, type VariantProps } from "class-variance-authority";
@@ -5,7 +7,7 @@ import { cva, type VariantProps } from "class-variance-authority";
import { cn } from "@/lib/utils";
const buttonVariants = cva(
- "inline-flex items-center whitespace-nowrap font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:focus-visible:ring-neutral-300 font-neue leading-9 tracking-tight",
+ "inline-flex items-center whitespace-nowrap overflow-hidden font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:focus-visible:ring-neutral-300 font-neue leading-9 tracking-tight",
{
variants: {
variant: {
@@ -55,14 +57,39 @@ export interface ButtonProps
}
const Button = React.forwardRef(
- ({ className, variant, size, asChild = false, ...props }, ref) => {
+ ({ className, variant, size, asChild = false, onClick, ...props }, ref) => {
+ const [isLoading, setIsLoading] = React.useState(false);
const Comp = asChild ? Slot : "button";
+
+ const handleClick = async (e: React.MouseEvent) => {
+ if (!onClick) return;
+
+ try {
+ setIsLoading(true);
+ const result: any = onClick(e);
+ if (result instanceof Promise) {
+ await result;
+ }
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
return (
+ >
+ {props.children}
+ {isLoading && (
+
+ )}
+
);
},
);
diff --git a/autogpt_platform/frontend/src/components/agptui/FeaturedAgentCard.tsx b/autogpt_platform/frontend/src/components/agptui/FeaturedAgentCard.tsx
index 630743d3b7..b1a6571138 100644
--- a/autogpt_platform/frontend/src/components/agptui/FeaturedAgentCard.tsx
+++ b/autogpt_platform/frontend/src/components/agptui/FeaturedAgentCard.tsx
@@ -27,33 +27,35 @@ export const FeaturedAgentCard: React.FC = ({
data-testid="featured-store-card"
onMouseEnter={() => setIsHovered(true)}
onMouseLeave={() => setIsHovered(false)}
- className={backgroundColor}
+ className={`flex h-full flex-col ${backgroundColor}`}
>
- {agent.agent_name}
- {agent.description}
+
+ {agent.agent_name}
+
+
+ By {agent.creator}
+
-
-
+
+
+
-
-
-
-
+
{agent.description}
-
+
@@ -63,13 +65,7 @@ export const FeaturedAgentCard: React.FC
= ({
{agent.rating.toFixed(1) ?? "0.0"}
-
- {StarRatingIcons(agent.rating)}
-
+ {StarRatingIcons(agent.rating)}
diff --git a/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx b/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx
index 88dbd6e185..302287788e 100644
--- a/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx
+++ b/autogpt_platform/frontend/src/components/agptui/ProfileInfoForm.tsx
@@ -5,12 +5,12 @@ import { useState } from "react";
import Image from "next/image";
+import { Button } from "./Button";
import { IconPersonFill } from "@/components/ui/icons";
import { CreatorDetails, ProfileDetails } from "@/lib/autogpt-server-api/types";
import { Separator } from "@/components/ui/separator";
import useSupabase from "@/hooks/useSupabase";
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
-import { Button } from "@/components/ui/button";
export const ProfileInfoForm = ({ profile }: { profile: CreatorDetails }) => {
const [isSubmitting, setIsSubmitting] = useState(false);
@@ -245,14 +245,21 @@ export const ProfileInfoForm = ({ profile }: { profile: CreatorDetails }) => {
{
setProfileData(profile);
}}
>
Cancel
-
+
{isSubmitting ? "Saving..." : "Save changes"}
diff --git a/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx
index c559fdf678..3cf0646fbc 100644
--- a/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx
+++ b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx
@@ -46,45 +46,41 @@ export const FeaturedSection: React.FC = ({
};
return (
-
-
-
- Featured agents
-
+
+
+ Featured agents
+
-
-
-
- {featuredAgents.map((agent, index) => (
-
-
-
-
-
- ))}
-
-
-
-
-
-
-
+
+
+ {featuredAgents.map((agent, index) => (
+
+
+
+
+
+ ))}
+
+
+
+
+
-
-
+
+
);
};
diff --git a/autogpt_platform/frontend/src/components/agents/agent-delete-confirm-dialog.tsx b/autogpt_platform/frontend/src/components/agptui/delete-confirm-dialog.tsx
similarity index 57%
rename from autogpt_platform/frontend/src/components/agents/agent-delete-confirm-dialog.tsx
rename to autogpt_platform/frontend/src/components/agptui/delete-confirm-dialog.tsx
index c8934e06eb..7d09502dd2 100644
--- a/autogpt_platform/frontend/src/components/agents/agent-delete-confirm-dialog.tsx
+++ b/autogpt_platform/frontend/src/components/agptui/delete-confirm-dialog.tsx
@@ -8,25 +8,41 @@ import {
DialogTitle,
} from "@/components/ui/dialog";
-export default function AgentDeleteConfirmDialog({
+export default function DeleteConfirmDialog({
+ entityType,
+ entityName,
open,
onOpenChange,
onDoDelete,
+ isIrreversible = true,
className,
}: {
+ entityType: string;
+ entityName?: string;
open: boolean;
onOpenChange: (open: boolean) => void;
onDoDelete: () => void;
+ isIrreversible?: boolean;
className?: string;
}): React.ReactNode {
+ const displayType = entityType
+ .split(" ")
+ .map((word) => word.charAt(0).toUpperCase() + word.slice(1))
+ .join(" ");
return (