diff --git a/autogpt_platform/backend/.gitignore b/autogpt_platform/backend/.gitignore index 9224c07d9e..6e688311a6 100644 --- a/autogpt_platform/backend/.gitignore +++ b/autogpt_platform/backend/.gitignore @@ -19,3 +19,6 @@ load-tests/*.json load-tests/*.log load-tests/node_modules/* migrations/*/rollback*.sql + +# Workspace files +workspaces/ diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 218575085b..06da6bdf2b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -33,7 +33,7 @@ from backend.data.understanding import ( get_business_understanding, ) from backend.util.exceptions import NotFoundError -from backend.util.settings import Settings +from backend.util.settings import AppEnvironment, Settings from . import db as chat_db from . import stream_registry @@ -222,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str: try: # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt # Use asyncio.to_thread to avoid blocking the event loop + # In non-production environments, fetch the latest prompt version + # instead of the production-labeled version for easier testing + label = ( + None + if settings.config.app_env == AppEnvironment.PRODUCTION + else "latest" + ) prompt = await asyncio.to_thread( - langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0 + langfuse.get_prompt, + config.langfuse_prompt_name, + label=label, + cache_ttl_seconds=0, ) return prompt.compile(users_information=context) except Exception as e: @@ -618,6 +628,9 @@ async def stream_chat_completion( total_tokens=chunk.totalTokens, ) ) + elif isinstance(chunk, StreamHeartbeat): + # Pass through heartbeat to keep SSE connection alive + yield chunk else: logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py index 62d59c470e..61cdba1ef9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py @@ -206,9 +206,9 @@ async def search_agents( ] ) no_results_msg = ( - f"No agents found matching '{query}'. Try different keywords or browse the marketplace." + f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs." if source == "marketplace" - else f"No agents matching '{query}' found in your library." + else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs." ) return NoResultsResponse( message=no_results_msg, session_id=session_id, suggestions=suggestions @@ -224,10 +224,10 @@ async def search_agents( message = ( "Now you have found some options for the user to choose from. " "You can add a link to a recommended agent at: /marketplace/agent/agent_id " - "Please ask the user if they would like to use any of these agents." + "Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs." if source == "marketplace" else "Found agents in the user's library. You can provide a link to view an agent at: " - "/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute." + "/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs." ) return AgentsFoundResponse( diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index e79be3e99b..6d81a86b4c 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block): credentials: WebshareProxyCredentials, **kwargs, ) -> BlockOutput: - video_id = self.extract_video_id(input_data.youtube_url) - yield "video_id", video_id + try: + video_id = self.extract_video_id(input_data.youtube_url) + transcript = self.get_transcript(video_id, credentials) + transcript_text = self.format_transcript(transcript=transcript) - transcript = self.get_transcript(video_id, credentials) - transcript_text = self.format_transcript(transcript=transcript) - - yield "transcript", transcript_text + # Only yield after all operations succeed + yield "video_id", video_id + yield "transcript", transcript_text + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/data/credit_test.py b/autogpt_platform/backend/backend/data/credit_test.py index 30660eccf1..6b13d9fa7b 100644 --- a/autogpt_platform/backend/backend/data/credit_test.py +++ b/autogpt_platform/backend/backend/data/credit_test.py @@ -132,9 +132,15 @@ async def test_block_credit_reset(server: SpinTestServer): month1 = datetime(2025, 1, 15, 12, 0, 0, tzinfo=timezone.utc) user_credit.time_now = lambda: month1 - # Reset transactions AFTER setting time_now so the old date calculation - # uses a date that's definitely in a previous month relative to month1 - await disable_test_user_transactions() + # IMPORTANT: Set updatedAt to December of previous year to ensure it's + # in a different month than month1 (January). This fixes a timing bug + # where if the test runs in early February, 35 days ago would be January, + # matching the mocked month1 and preventing the refill from triggering. + dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15) + await UserBalance.prisma().update( + where={"userId": DEFAULT_USER_ID}, + data={"updatedAt": dec_previous_year}, + ) # First call in month 1 should trigger refill balance = await user_credit.get_credits(DEFAULT_USER_ID) diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index 2ac433a272..851c3b33e8 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -346,6 +346,7 @@ export function ChatMessage({ toolId={message.toolId} toolName={message.toolName} result={message.result} + onSendMessage={onSendMessage} /> ); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx index 01d107c64e..d8478f1e82 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx @@ -73,6 +73,7 @@ export function MessageList({ key={index} message={message} prevMessage={messages[index - 1]} + onSendMessage={onSendMessage} /> ); } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx index 15b10e5715..7c5a75bec5 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx @@ -5,11 +5,13 @@ import { shouldSkipAgentOutput } from "../../helpers"; export interface LastToolResponseProps { message: ChatMessageData; prevMessage: ChatMessageData | undefined; + onSendMessage?: (content: string) => void; } export function LastToolResponse({ message, prevMessage, + onSendMessage, }: LastToolResponseProps) { if (message.type !== "tool_response") return null; @@ -21,6 +23,7 @@ export function LastToolResponse({ toolId={message.toolId} toolName={message.toolName} result={message.result} + onSendMessage={onSendMessage} /> ); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx index 047c2277b0..34018f0292 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx @@ -1,6 +1,8 @@ +import { Progress } from "@/components/atoms/Progress/Progress"; import { cn } from "@/lib/utils"; import { useEffect, useRef, useState } from "react"; import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; +import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress"; export interface ThinkingMessageProps { className?: string; @@ -11,18 +13,19 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) { const [showCoffeeMessage, setShowCoffeeMessage] = useState(false); const timerRef = useRef(null); const coffeeTimerRef = useRef(null); + const progress = useAsymptoticProgress(showCoffeeMessage); useEffect(() => { if (timerRef.current === null) { timerRef.current = setTimeout(() => { setShowSlowLoader(true); - }, 8000); + }, 3000); } if (coffeeTimerRef.current === null) { coffeeTimerRef.current = setTimeout(() => { setShowCoffeeMessage(true); - }, 10000); + }, 8000); } return () => { @@ -49,9 +52,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
{showCoffeeMessage ? ( - - This could take a few minutes, grab a coffee ☕️ - +
+
+
+ Working on it... + {Math.round(progress)}% +
+ +
+ + This could take a few minutes, grab a coffee ☕️ + +
) : showSlowLoader ? ( Taking a bit more time... diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts new file mode 100644 index 0000000000..cf1b89e7c4 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts @@ -0,0 +1,50 @@ +import { useEffect, useRef, useState } from "react"; + +/** + * Hook that returns a progress value that starts fast and slows down, + * asymptotically approaching but never reaching the max value. + * + * Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife)) + * This creates the "game loading bar" effect where: + * - 50% is reached at halfLifeSeconds + * - 75% is reached at 2 * halfLifeSeconds + * - 87.5% is reached at 3 * halfLifeSeconds + * - and so on... + * + * @param isActive - Whether the progress should be animating + * @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30) + * @param maxProgress - Maximum progress value to approach (default: 100) + * @param intervalMs - Update interval in milliseconds (default: 100) + * @returns Current progress value (0-maxProgress) + */ +export function useAsymptoticProgress( + isActive: boolean, + halfLifeSeconds = 30, + maxProgress = 100, + intervalMs = 100, +) { + const [progress, setProgress] = useState(0); + const elapsedTimeRef = useRef(0); + + useEffect(() => { + if (!isActive) { + setProgress(0); + elapsedTimeRef.current = 0; + return; + } + + const interval = setInterval(() => { + elapsedTimeRef.current += intervalMs / 1000; + // Half-life approach: progress = max * (1 - 0.5^(time/halfLife)) + // At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc. + const newProgress = + maxProgress * + (1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds)); + setProgress(newProgress); + }, intervalMs); + + return () => clearInterval(interval); + }, [isActive, halfLifeSeconds, maxProgress, intervalMs]); + + return progress; +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx new file mode 100644 index 0000000000..8494452eea --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx @@ -0,0 +1,128 @@ +"use client"; + +import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import { RunAgentModal } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { + CheckCircleIcon, + PencilLineIcon, + PlayIcon, +} from "@phosphor-icons/react"; +import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; + +interface Props { + agentName: string; + libraryAgentId: string; + onSendMessage?: (content: string) => void; +} + +export function AgentCreatedPrompt({ + agentName, + libraryAgentId, + onSendMessage, +}: Props) { + // Fetch library agent eagerly so modal is ready when user clicks + const { data: libraryAgentResponse, isLoading } = useGetV2GetLibraryAgent( + libraryAgentId, + { + query: { + enabled: !!libraryAgentId, + }, + }, + ); + + const libraryAgent = + libraryAgentResponse?.status === 200 ? libraryAgentResponse.data : null; + + function handleRunWithPlaceholders() { + onSendMessage?.( + `Run the agent "${agentName}" with placeholder/example values so I can test it.`, + ); + } + + function handleRunCreated(execution: GraphExecutionMeta) { + onSendMessage?.( + `I've started the agent "${agentName}". The execution ID is ${execution.id}. Please monitor its progress and let me know when it completes.`, + ); + } + + function handleScheduleCreated(schedule: GraphExecutionJobInfo) { + const scheduleInfo = schedule.cron + ? `with cron schedule "${schedule.cron}"` + : "to run on the specified schedule"; + onSendMessage?.( + `I've scheduled the agent "${agentName}" ${scheduleInfo}. The schedule ID is ${schedule.id}.`, + ); + } + + return ( + +
+
+
+ +
+
+ + Agent Created Successfully + + + "{agentName}" is ready to test + +
+
+ +
+ + Ready to test? + +
+ + {libraryAgent ? ( + + + Run with my inputs + + } + agent={libraryAgent} + onRunCreated={handleRunCreated} + onScheduleCreated={handleScheduleCreated} + /> + ) : ( + + )} +
+ + or just ask me + +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx index 27da02beb8..53d5f1ef96 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx @@ -2,11 +2,13 @@ import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import type { ToolResult } from "@/types/chat"; import { WarningCircleIcon } from "@phosphor-icons/react"; +import { AgentCreatedPrompt } from "./AgentCreatedPrompt"; import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; import { formatToolResponse, getErrorMessage, + isAgentSavedResponse, isErrorResponse, } from "./helpers"; @@ -16,6 +18,7 @@ export interface ToolResponseMessageProps { result?: ToolResult; success?: boolean; className?: string; + onSendMessage?: (content: string) => void; } export function ToolResponseMessage({ @@ -24,6 +27,7 @@ export function ToolResponseMessage({ result, success: _success, className, + onSendMessage, }: ToolResponseMessageProps) { if (isErrorResponse(result)) { const errorMessage = getErrorMessage(result); @@ -43,6 +47,18 @@ export function ToolResponseMessage({ ); } + // Check for agent_saved response - show special prompt + const agentSavedData = isAgentSavedResponse(result); + if (agentSavedData.isSaved) { + return ( + + ); + } + const formattedText = formatToolResponse(result, toolName); return ( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts index ff0b004201..63da171f54 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts @@ -6,6 +6,43 @@ function stripInternalReasoning(content: string): string { .trim(); } +export interface AgentSavedData { + isSaved: boolean; + agentName: string; + agentId: string; + libraryAgentId: string; + libraryAgentLink: string; +} + +export function isAgentSavedResponse(result: unknown): AgentSavedData { + if (typeof result !== "object" || result === null) { + return { + isSaved: false, + agentName: "", + agentId: "", + libraryAgentId: "", + libraryAgentLink: "", + }; + } + const response = result as Record; + if (response.type === "agent_saved") { + return { + isSaved: true, + agentName: (response.agent_name as string) || "Agent", + agentId: (response.agent_id as string) || "", + libraryAgentId: (response.library_agent_id as string) || "", + libraryAgentLink: (response.library_agent_link as string) || "", + }; + } + return { + isSaved: false, + agentName: "", + agentId: "", + libraryAgentId: "", + libraryAgentLink: "", + }; +} + export function isErrorResponse(result: unknown): boolean { if (typeof result === "string") { const lower = result.toLowerCase();