From 5ac89d7c0b3abed610ad4324ae2e6b4b47298e98 Mon Sep 17 00:00:00 2001 From: Otto Date: Thu, 5 Feb 2026 10:56:26 +0000 Subject: [PATCH 1/7] fix(test): fix timing bug in test_block_credit_reset (#11978) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Fixes the flaky `test_block_credit_reset` test that was failing on multiple PRs with `assert 0 == 1000`. ## Root Cause The test calls `disable_test_user_transactions()` which sets `updatedAt` to 35 days ago from the **actual current time**. It then mocks `time_now` to January 1st. **The bug**: If the test runs in early February, 35 days ago is January — the **same month** as the mocked `time_now`. The credit refill logic only triggers when the balance snapshot is from a *different* month, so no refill happens and the balance stays at 0. ## Fix After calling `disable_test_user_transactions()`, explicitly set `updatedAt` to December of the previous year. This ensures it's always in a different month than the mocked `month1` (January), regardless of when the test runs. ## Testing CI will verify the fix. --- autogpt_platform/backend/backend/data/credit_test.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/autogpt_platform/backend/backend/data/credit_test.py b/autogpt_platform/backend/backend/data/credit_test.py index 391a373b86..2b10c62882 100644 --- a/autogpt_platform/backend/backend/data/credit_test.py +++ b/autogpt_platform/backend/backend/data/credit_test.py @@ -134,6 +134,16 @@ async def test_block_credit_reset(server: SpinTestServer): month1 = datetime.now(timezone.utc).replace(month=1, day=1) user_credit.time_now = lambda: month1 + # IMPORTANT: Set updatedAt to December of previous year to ensure it's + # in a different month than month1 (January). This fixes a timing bug + # where if the test runs in early February, 35 days ago would be January, + # matching the mocked month1 and preventing the refill from triggering. + dec_previous_year = month1.replace(year=month1.year - 1, month=12, day=15) + await UserBalance.prisma().update( + where={"userId": DEFAULT_USER_ID}, + data={"updatedAt": dec_previous_year}, + ) + # First call in month 1 should trigger refill balance = await user_credit.get_credits(DEFAULT_USER_ID) assert balance == REFILL_VALUE # Should get 1000 credits From 5401d54eaab4fd23146b79d037c0afa441851672 Mon Sep 17 00:00:00 2001 From: Bently Date: Thu, 5 Feb 2026 11:04:46 +0000 Subject: [PATCH 2/7] fix(backend): Handle StreamHeartbeat in CoPilot stream handler (#11928) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes 🏗️ Fixes **AUTOGPT-SERVER-7JA** (123 events since Jan 27, 2026). #### Problem `StreamHeartbeat` was added to keep SSE connections alive during long-running tool executions (yielded every 15s while waiting). However, the main `stream_chat_completion` handler's `elif` chain didn't have a case for it: ``` StreamTextStart → ✅ handled StreamTextDelta → ✅ handled StreamTextEnd → ✅ handled StreamToolInputStart → ✅ handled StreamToolInputAvailable → ✅ handled StreamToolOutputAvailable → ✅ handled StreamFinish → ✅ handled StreamError → ✅ handled StreamUsage → ✅ handled StreamHeartbeat → ❌ fell through to 'Unknown chunk type' error ``` This meant every heartbeat during tool execution generated a Sentry error instead of keeping the connection alive. #### Fix Add `StreamHeartbeat` to the `elif` chain and yield it through. The route handler already calls `to_sse()` on all yielded chunks, and `StreamHeartbeat.to_sse()` correctly returns `: heartbeat\n\n` (SSE comment format, ignored by clients but keeps proxies/load balancers happy). **1 file changed, 3 insertions.** --- autogpt_platform/backend/backend/api/features/chat/service.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 218575085b..bf7dbb6c7e 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -618,6 +618,9 @@ async def stream_chat_completion( total_tokens=chunk.totalTokens, ) ) + elif isinstance(chunk, StreamHeartbeat): + # Pass through heartbeat to keep SSE connection alive + yield chunk else: logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True) From a077ba9f03e2c0d35726bc83cc2e91d5b8a713e2 Mon Sep 17 00:00:00 2001 From: Otto Date: Thu, 5 Feb 2026 11:51:32 +0000 Subject: [PATCH 3/7] fix(platform): YouTube block yields only error on failure (#11980) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Fixes [SECRT-1889](https://linear.app/autogpt/issue/SECRT-1889): The YouTube transcription block was yielding both `video_id` and `error` when the transcript fetch failed. ## Problem The block yielded `video_id` immediately upon extracting it from the URL, before attempting to fetch the transcript. If the transcript fetch failed, both outputs were present. ```python # Before video_id = self.extract_video_id(input_data.youtube_url) yield "video_id", video_id # ← Yielded before transcript attempt transcript = self.get_transcript(video_id, credentials) # ← Could fail here ``` ## Solution Wrap the entire operation in try/except and only yield outputs after all operations succeed: ```python # After try: video_id = self.extract_video_id(input_data.youtube_url) transcript = self.get_transcript(video_id, credentials) transcript_text = self.format_transcript(transcript=transcript) # Only yield after all operations succeed yield "video_id", video_id yield "transcript", transcript_text except Exception as e: yield "error", str(e) ``` This follows the established pattern in other blocks (e.g., `ai_image_generator_block.py`). ## Testing - All 10 unit tests pass (`test/blocks/test_youtube.py`) - Lint/format checks pass Co-authored-by: Toran Bruce Richards --- .../backend/backend/blocks/youtube.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index e79be3e99b..6d81a86b4c 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -165,10 +165,13 @@ class TranscribeYoutubeVideoBlock(Block): credentials: WebshareProxyCredentials, **kwargs, ) -> BlockOutput: - video_id = self.extract_video_id(input_data.youtube_url) - yield "video_id", video_id + try: + video_id = self.extract_video_id(input_data.youtube_url) + transcript = self.get_transcript(video_id, credentials) + transcript_text = self.format_transcript(transcript=transcript) - transcript = self.get_transcript(video_id, credentials) - transcript_text = self.format_transcript(transcript=transcript) - - yield "transcript", transcript_text + # Only yield after all operations succeed + yield "video_id", video_id + yield "transcript", transcript_text + except Exception as e: + yield "error", str(e) From 3ae5eabf9de84e7200af4a486a50276a7b11c7c8 Mon Sep 17 00:00:00 2001 From: Swifty Date: Thu, 5 Feb 2026 14:54:39 +0100 Subject: [PATCH 4/7] fix(backend/chat): Use latest prompt label in non-production environments (#11977) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In non-production environments, the chat service now fetches prompts with the `latest` label instead of the default production-labeled prompt. This makes it easier to test and iterate on prompt changes in dev/staging without needing to promote them to production first. ### Changes 🏗️ - Updated `_get_system_prompt_template()` in chat service to pass `label="latest"` when `app_env` is not `PRODUCTION` - Production environments continue using the default behavior (production-labeled prompts) ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Verified that in non-production environments, prompts with `latest` label are fetched - [x] Verified that production environments still use the default (production) labeled prompts Co-authored-by: Otto --- .../backend/backend/api/features/chat/service.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index bf7dbb6c7e..06da6bdf2b 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -33,7 +33,7 @@ from backend.data.understanding import ( get_business_understanding, ) from backend.util.exceptions import NotFoundError -from backend.util.settings import Settings +from backend.util.settings import AppEnvironment, Settings from . import db as chat_db from . import stream_registry @@ -222,8 +222,18 @@ async def _get_system_prompt_template(context: str) -> str: try: # cache_ttl_seconds=0 disables SDK caching to always get the latest prompt # Use asyncio.to_thread to avoid blocking the event loop + # In non-production environments, fetch the latest prompt version + # instead of the production-labeled version for easier testing + label = ( + None + if settings.config.app_env == AppEnvironment.PRODUCTION + else "latest" + ) prompt = await asyncio.to_thread( - langfuse.get_prompt, config.langfuse_prompt_name, cache_ttl_seconds=0 + langfuse.get_prompt, + config.langfuse_prompt_name, + label=label, + cache_ttl_seconds=0, ) return prompt.compile(users_information=context) except Exception as e: From e40233a3acc9231ce5d5ed927a1d0a598c32f69e Mon Sep 17 00:00:00 2001 From: Swifty Date: Thu, 5 Feb 2026 15:36:55 +0100 Subject: [PATCH 5/7] fix(backend/chat): Guide find_agent users toward action with CTAs (#11976) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When users search for agents, guide them toward creating custom agents if no results are found or after showing results. This improves user engagement by offering a clear next step. ### Changes 🏗️ - Updated `agent_search.py` to add CTAs in search responses - Added messaging to inform users they can create custom agents based on their needs - Applied to both "no results found" and "agents found" scenarios ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Search for agents in marketplace with matching results - [x] Search for agents in marketplace with no results - [x] Search for agents in library with matching results - [x] Search for agents in library with no results - [x] Verify CTA message appears in all cases --------- Co-authored-by: Otto --- .../backend/api/features/chat/tools/agent_search.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py index 62d59c470e..61cdba1ef9 100644 --- a/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py +++ b/autogpt_platform/backend/backend/api/features/chat/tools/agent_search.py @@ -206,9 +206,9 @@ async def search_agents( ] ) no_results_msg = ( - f"No agents found matching '{query}'. Try different keywords or browse the marketplace." + f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs." if source == "marketplace" - else f"No agents matching '{query}' found in your library." + else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs." ) return NoResultsResponse( message=no_results_msg, session_id=session_id, suggestions=suggestions @@ -224,10 +224,10 @@ async def search_agents( message = ( "Now you have found some options for the user to choose from. " "You can add a link to a recommended agent at: /marketplace/agent/agent_id " - "Please ask the user if they would like to use any of these agents." + "Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs." if source == "marketplace" else "Found agents in the user's library. You can provide a link to view an agent at: " - "/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute." + "/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs." ) return AgentsFoundResponse( From c22c18374d323b285ea720e53f1a459006d45238 Mon Sep 17 00:00:00 2001 From: Swifty Date: Thu, 5 Feb 2026 15:37:31 +0100 Subject: [PATCH 6/7] feat(frontend): Add ready-to-test prompt after agent creation [SECRT-1882] (#11975) ## Summary - Add special UI prompt when agent is successfully created in chat - Show "Agent Created Successfully" with agent name - Provide two action buttons: - **Run with example values**: Sends chat message asking AI to run with placeholders - **Run with my inputs**: Opens RunAgentModal for custom input configuration - After run/schedule, automatically send chat message with execution details for AI monitoring https://github.com/user-attachments/assets/b11e118c-de59-4b79-a629-8bd0d52d9161 ## Test plan - [x] Create an agent through chat - [x] Verify "Agent Created Successfully" prompt appears - [x] Click "Run with example values" - verify chat message is sent - [x] Click "Run with my inputs" - verify RunAgentModal opens - [x] Fill inputs and run - verify chat message with execution ID is sent - [x] Fill inputs and schedule - verify chat message with schedule details is sent --------- Co-authored-by: Otto --- .../components/ChatMessage/ChatMessage.tsx | 1 + .../components/MessageList/MessageList.tsx | 1 + .../LastToolResponse/LastToolResponse.tsx | 3 + .../AgentCreatedPrompt.tsx | 128 ++++++++++++++++++ .../ToolResponseMessage.tsx | 16 +++ .../components/ToolResponseMessage/helpers.ts | 37 +++++ 6 files changed, 186 insertions(+) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx index 2ac433a272..851c3b33e8 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatMessage/ChatMessage.tsx @@ -346,6 +346,7 @@ export function ChatMessage({ toolId={message.toolId} toolName={message.toolName} result={message.result} + onSendMessage={onSendMessage} /> ); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx index 01d107c64e..d8478f1e82 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx @@ -73,6 +73,7 @@ export function MessageList({ key={index} message={message} prevMessage={messages[index - 1]} + onSendMessage={onSendMessage} /> ); } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx index 15b10e5715..7c5a75bec5 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx @@ -5,11 +5,13 @@ import { shouldSkipAgentOutput } from "../../helpers"; export interface LastToolResponseProps { message: ChatMessageData; prevMessage: ChatMessageData | undefined; + onSendMessage?: (content: string) => void; } export function LastToolResponse({ message, prevMessage, + onSendMessage, }: LastToolResponseProps) { if (message.type !== "tool_response") return null; @@ -21,6 +23,7 @@ export function LastToolResponse({ toolId={message.toolId} toolName={message.toolName} result={message.result} + onSendMessage={onSendMessage} /> ); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx new file mode 100644 index 0000000000..8494452eea --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/AgentCreatedPrompt.tsx @@ -0,0 +1,128 @@ +"use client"; + +import { useGetV2GetLibraryAgent } from "@/app/api/__generated__/endpoints/library/library"; +import { GraphExecutionJobInfo } from "@/app/api/__generated__/models/graphExecutionJobInfo"; +import { GraphExecutionMeta } from "@/app/api/__generated__/models/graphExecutionMeta"; +import { RunAgentModal } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { + CheckCircleIcon, + PencilLineIcon, + PlayIcon, +} from "@phosphor-icons/react"; +import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; + +interface Props { + agentName: string; + libraryAgentId: string; + onSendMessage?: (content: string) => void; +} + +export function AgentCreatedPrompt({ + agentName, + libraryAgentId, + onSendMessage, +}: Props) { + // Fetch library agent eagerly so modal is ready when user clicks + const { data: libraryAgentResponse, isLoading } = useGetV2GetLibraryAgent( + libraryAgentId, + { + query: { + enabled: !!libraryAgentId, + }, + }, + ); + + const libraryAgent = + libraryAgentResponse?.status === 200 ? libraryAgentResponse.data : null; + + function handleRunWithPlaceholders() { + onSendMessage?.( + `Run the agent "${agentName}" with placeholder/example values so I can test it.`, + ); + } + + function handleRunCreated(execution: GraphExecutionMeta) { + onSendMessage?.( + `I've started the agent "${agentName}". The execution ID is ${execution.id}. Please monitor its progress and let me know when it completes.`, + ); + } + + function handleScheduleCreated(schedule: GraphExecutionJobInfo) { + const scheduleInfo = schedule.cron + ? `with cron schedule "${schedule.cron}"` + : "to run on the specified schedule"; + onSendMessage?.( + `I've scheduled the agent "${agentName}" ${scheduleInfo}. The schedule ID is ${schedule.id}.`, + ); + } + + return ( + +
+
+
+ +
+
+ + Agent Created Successfully + + + "{agentName}" is ready to test + +
+
+ +
+ + Ready to test? + +
+ + {libraryAgent ? ( + + + Run with my inputs + + } + agent={libraryAgent} + onRunCreated={handleRunCreated} + onScheduleCreated={handleScheduleCreated} + /> + ) : ( + + )} +
+ + or just ask me + +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx index 27da02beb8..53d5f1ef96 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx @@ -2,11 +2,13 @@ import { Text } from "@/components/atoms/Text/Text"; import { cn } from "@/lib/utils"; import type { ToolResult } from "@/types/chat"; import { WarningCircleIcon } from "@phosphor-icons/react"; +import { AgentCreatedPrompt } from "./AgentCreatedPrompt"; import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; import { MarkdownContent } from "../MarkdownContent/MarkdownContent"; import { formatToolResponse, getErrorMessage, + isAgentSavedResponse, isErrorResponse, } from "./helpers"; @@ -16,6 +18,7 @@ export interface ToolResponseMessageProps { result?: ToolResult; success?: boolean; className?: string; + onSendMessage?: (content: string) => void; } export function ToolResponseMessage({ @@ -24,6 +27,7 @@ export function ToolResponseMessage({ result, success: _success, className, + onSendMessage, }: ToolResponseMessageProps) { if (isErrorResponse(result)) { const errorMessage = getErrorMessage(result); @@ -43,6 +47,18 @@ export function ToolResponseMessage({ ); } + // Check for agent_saved response - show special prompt + const agentSavedData = isAgentSavedResponse(result); + if (agentSavedData.isSaved) { + return ( + + ); + } + const formattedText = formatToolResponse(result, toolName); return ( diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts index 2397176603..9ed4e3973b 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts @@ -6,6 +6,43 @@ function stripInternalReasoning(content: string): string { .trim(); } +export interface AgentSavedData { + isSaved: boolean; + agentName: string; + agentId: string; + libraryAgentId: string; + libraryAgentLink: string; +} + +export function isAgentSavedResponse(result: unknown): AgentSavedData { + if (typeof result !== "object" || result === null) { + return { + isSaved: false, + agentName: "", + agentId: "", + libraryAgentId: "", + libraryAgentLink: "", + }; + } + const response = result as Record; + if (response.type === "agent_saved") { + return { + isSaved: true, + agentName: (response.agent_name as string) || "Agent", + agentId: (response.agent_id as string) || "", + libraryAgentId: (response.library_agent_id as string) || "", + libraryAgentLink: (response.library_agent_link as string) || "", + }; + } + return { + isSaved: false, + agentName: "", + agentId: "", + libraryAgentId: "", + libraryAgentLink: "", + }; +} + export function isErrorResponse(result: unknown): boolean { if (typeof result === "string") { const lower = result.toLowerCase(); From b121030c944103dcffab535351a0e540394cb7ff Mon Sep 17 00:00:00 2001 From: Swifty Date: Thu, 5 Feb 2026 15:37:51 +0100 Subject: [PATCH 7/7] feat(frontend): Add progress indicator during agent generation [SECRT-1883] (#11974) ## Summary - Add asymptotic progress bar that appears during long-running chat tasks - Progress bar shows after 10 seconds with "Working on it..." label and percentage - Uses half-life formula: ~50% at 30s, ~75% at 60s, ~87.5% at 90s, etc. - Creates the classic "game loading bar" effect that never reaches 100% https://github.com/user-attachments/assets/3c59289e-793c-4a08-b3fc-69e1eef28b1f ## Test plan - [x] Start a chat that triggers agent generation - [x] Wait 10+ seconds for the progress bar to appear - [x] Verify progress bar is centered with label and percentage - [x] Verify progress follows expected timing (~50% at 30s) - [x] Verify progress bar disappears when task completes --------- Co-authored-by: Otto --- autogpt_platform/backend/.gitignore | 3 ++ .../ThinkingMessage/ThinkingMessage.tsx | 22 ++++++-- .../ToolCallMessage/useAsymptoticProgress.ts | 50 +++++++++++++++++++ 3 files changed, 70 insertions(+), 5 deletions(-) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts diff --git a/autogpt_platform/backend/.gitignore b/autogpt_platform/backend/.gitignore index 9224c07d9e..6e688311a6 100644 --- a/autogpt_platform/backend/.gitignore +++ b/autogpt_platform/backend/.gitignore @@ -19,3 +19,6 @@ load-tests/*.json load-tests/*.log load-tests/node_modules/* migrations/*/rollback*.sql + +# Workspace files +workspaces/ diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx index 047c2277b0..34018f0292 100644 --- a/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ThinkingMessage/ThinkingMessage.tsx @@ -1,6 +1,8 @@ +import { Progress } from "@/components/atoms/Progress/Progress"; import { cn } from "@/lib/utils"; import { useEffect, useRef, useState } from "react"; import { AIChatBubble } from "../AIChatBubble/AIChatBubble"; +import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress"; export interface ThinkingMessageProps { className?: string; @@ -11,18 +13,19 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) { const [showCoffeeMessage, setShowCoffeeMessage] = useState(false); const timerRef = useRef(null); const coffeeTimerRef = useRef(null); + const progress = useAsymptoticProgress(showCoffeeMessage); useEffect(() => { if (timerRef.current === null) { timerRef.current = setTimeout(() => { setShowSlowLoader(true); - }, 8000); + }, 3000); } if (coffeeTimerRef.current === null) { coffeeTimerRef.current = setTimeout(() => { setShowCoffeeMessage(true); - }, 10000); + }, 8000); } return () => { @@ -49,9 +52,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
{showCoffeeMessage ? ( - - This could take a few minutes, grab a coffee ☕️ - +
+
+
+ Working on it... + {Math.round(progress)}% +
+ +
+ + This could take a few minutes, grab a coffee ☕️ + +
) : showSlowLoader ? ( Taking a bit more time... diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts new file mode 100644 index 0000000000..cf1b89e7c4 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/useAsymptoticProgress.ts @@ -0,0 +1,50 @@ +import { useEffect, useRef, useState } from "react"; + +/** + * Hook that returns a progress value that starts fast and slows down, + * asymptotically approaching but never reaching the max value. + * + * Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife)) + * This creates the "game loading bar" effect where: + * - 50% is reached at halfLifeSeconds + * - 75% is reached at 2 * halfLifeSeconds + * - 87.5% is reached at 3 * halfLifeSeconds + * - and so on... + * + * @param isActive - Whether the progress should be animating + * @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30) + * @param maxProgress - Maximum progress value to approach (default: 100) + * @param intervalMs - Update interval in milliseconds (default: 100) + * @returns Current progress value (0-maxProgress) + */ +export function useAsymptoticProgress( + isActive: boolean, + halfLifeSeconds = 30, + maxProgress = 100, + intervalMs = 100, +) { + const [progress, setProgress] = useState(0); + const elapsedTimeRef = useRef(0); + + useEffect(() => { + if (!isActive) { + setProgress(0); + elapsedTimeRef.current = 0; + return; + } + + const interval = setInterval(() => { + elapsedTimeRef.current += intervalMs / 1000; + // Half-life approach: progress = max * (1 - 0.5^(time/halfLife)) + // At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc. + const newProgress = + maxProgress * + (1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds)); + setProgress(newProgress); + }, intervalMs); + + return () => clearInterval(interval); + }, [isActive, halfLifeSeconds, maxProgress, intervalMs]); + + return progress; +}