From c5069ca48f1e7f5ccd29e504affb91e2560ce8b1 Mon Sep 17 00:00:00 2001 From: Ubbe Date: Thu, 22 Jan 2026 16:43:42 +0700 Subject: [PATCH 1/4] fix(frontend): chat UX improvements (#11804) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes πŸ—οΈ Screenshot 2026-01-19 at 22 14 51 This PR lays the groundwork for the new UX of AutoGPT Copilot. - moves the Copilot to its own route `/copilot` - Makes the Copilot the homepage when enabled - Updates the labelling of the homepage icons - Makes the Library the homepage when Copilot is disabled - Improves Copilot's: - session handling - styles and UX - message parsing ### Other improvements - Improve the log out UX by adding a new `/logout` page and using a re-direct ### Checklist πŸ“‹ #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Run locally and test the above --- > [!NOTE] > Launches the new Copilot experience and aligns API behavior with the UI. > > - **Routing/Home**: Add `/copilot` with `CopilotShell` (desktop sidebar + mobile drawer), make homepage route flag-driven; update login/signup/error redirects and root page to use `getHomepageRoute`. > - **Chat UX**: Replace legacy chat with `components/contextual/Chat/*` (new message list, bubbles, tool call/response formatting, stop button, initial-prompt handling, refined streaming/error handling); remove old platform chat components. > - **Sessions**: Add paginated session list (infinite load), auto-select/create logic, mobile/desktop navigation, and improved session fetching/claiming guards. > - **Auth/Logout**: New `/logout` flow with delayed redirect; gate various queries on auth state and logout-in-progress. > - **Backend**: `GET /api/chat/sessions/{id}` returns `null` instead of 404; service saves assistant message on `StreamFinish` to avoid loss and prevents duplicate saves; OpenAPI updated accordingly. > - **Misc**: Minor UI polish in library modals, loader styling, docs (CONTRIBUTING) additions, and small formatting fixes in block docs generator. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 1b4776dcf52ccd6987830ada3a58a87a160ce36c. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). --- AGENTS.md | 26 ++ autogpt_platform/CLAUDE.md | 10 +- .../backend/api/features/chat/model.py | 5 + .../backend/api/features/chat/routes.py | 46 ++- .../backend/api/features/chat/service.py | 182 +++++++++--- autogpt_platform/frontend/CONTRIBUTING.md | 43 ++- .../src/app/(no-navbar)/logout/page.tsx | 58 ++++ .../src/app/(platform)/auth/callback/route.ts | 2 +- .../(platform)/chat/components/Chat/Chat.tsx | 134 --------- .../ChatContainer/ChatContainer.tsx | 88 ------ .../Chat/components/ChatInput/ChatInput.tsx | 64 ----- .../Chat/components/ChatInput/useChatInput.ts | 60 ---- .../components/MessageList/MessageList.tsx | 121 -------- .../ToolCallMessage/ToolCallMessage.tsx | 24 -- .../ToolResponseMessage.tsx | 260 ----------------- .../chat/components/Chat/helpers.ts | 66 ----- .../chat/components/Chat/useChatSession.ts | 271 ------------------ .../frontend/src/app/(platform)/chat/page.tsx | 27 -- .../components/CopilotShell/CopilotShell.tsx | 88 ++++++ .../DesktopSidebar/DesktopSidebar.tsx | 70 +++++ .../components/LoadingState/LoadingState.tsx | 15 + .../components/MobileDrawer/MobileDrawer.tsx | 91 ++++++ .../MobileDrawer/useMobileDrawer.ts | 24 ++ .../components/MobileHeader/MobileHeader.tsx | 22 ++ .../components/SessionsList/SessionsList.tsx | 80 ++++++ .../SessionsList/useSessionsPagination.ts | 92 ++++++ .../components/CopilotShell/helpers.ts | 165 +++++++++++ .../CopilotShell/useCopilotShell.ts | 170 +++++++++++ .../src/app/(platform)/copilot/helpers.ts | 33 +++ .../src/app/(platform)/copilot/layout.tsx | 6 + .../src/app/(platform)/copilot/page.tsx | 228 +++++++++++++++ .../src/app/(platform)/error/page.tsx | 8 +- .../modals/RunAgentModal/RunAgentModal.tsx | 2 +- .../components/ModalHeader/ModalHeader.tsx | 6 +- .../LibraryAgentCard/useLibraryAgentCard.ts | 6 + .../src/app/(platform)/login/useLoginPage.ts | 10 +- .../app/(platform)/profile/(user)/page.tsx | 4 +- .../src/app/(platform)/signup/actions.ts | 6 +- .../app/(platform)/signup/useSignupPage.ts | 11 +- .../src/app/api/mutators/custom-mutator.ts | 4 +- .../frontend/src/app/api/openapi.json | 2 +- autogpt_platform/frontend/src/app/globals.css | 46 --- autogpt_platform/frontend/src/app/page.tsx | 26 +- .../src/components/contextual/Chat/Chat.tsx | 81 ++++++ .../components/AIChatBubble/AIChatBubble.tsx | 15 + .../AgentCarouselMessage.tsx | 0 .../AgentInputsSetup/AgentInputsSetup.tsx | 0 .../AgentInputsSetup/useAgentInputsSetup.ts | 0 .../AuthPromptWidget/AuthPromptWidget.tsx | 2 +- .../ChatContainer/ChatContainer.tsx | 106 +++++++ .../createStreamEventDispatcher.ts | 36 ++- .../components/ChatContainer/handlers.ts} | 137 ++++----- .../Chat/components/ChatContainer/helpers.ts | 39 ++- .../ChatContainer/useChatContainer.ts | 116 ++++++-- .../ChatCredentialsSetup.tsx | 0 .../useChatCredentialsSetup.ts | 0 .../ChatErrorState/ChatErrorState.tsx | 0 .../Chat/components/ChatInput/ChatInput.tsx | 103 +++++++ .../Chat/components/ChatInput/useChatInput.ts | 115 ++++++++ .../Chat/components/ChatLoader/ChatLoader.tsx | 12 + .../ChatLoadingState/ChatLoadingState.tsx | 0 .../components/ChatMessage/ChatMessage.tsx | 205 +++++++------ .../components/ChatMessage/useChatMessage.ts | 0 .../ExecutionStartedMessage.tsx | 0 .../MarkdownContent/MarkdownContent.tsx | 0 .../MessageBubble/MessageBubble.tsx | 11 +- .../components/MessageList/MessageList.tsx | 119 ++++++++ .../LastToolResponse/LastToolResponse.tsx | 30 ++ .../components/MessageItem/MessageItem.tsx | 40 +++ .../components/MessageItem/useMessageItem.ts | 62 ++++ .../Chat/components/MessageList/helpers.ts | 68 +++++ .../components/MessageList/useMessageList.ts | 0 .../NoResultsMessage/NoResultsMessage.tsx | 0 .../QuickActionsWelcome.tsx | 0 .../SessionsDrawer/SessionsDrawer.tsx | 4 +- .../StreamingMessage/StreamingMessage.tsx | 13 +- .../StreamingMessage/useStreamingMessage.ts | 0 .../ThinkingMessage/ThinkingMessage.tsx | 21 +- .../ToolCallMessage/ToolCallMessage.tsx | 55 ++++ .../components/ToolCallMessage/helpers.ts | 184 ++++++++++++ .../ToolResponseMessage.tsx | 28 ++ .../components/ToolResponseMessage/helpers.ts | 256 +++++++++++++++++ .../UserChatBubble/UserChatBubble.tsx | 25 ++ .../contextual}/Chat/useChat.ts | 40 +-- .../contextual}/Chat/useChatDrawer.ts | 0 .../contextual/Chat/useChatSession.ts | 262 +++++++++++++++++ .../contextual}/Chat/useChatStream.ts | 205 ++++++++++++- .../contextual}/Chat/usePageContext.ts | 0 .../AgentSelectStep/useAgentSelectStep.ts | 3 + .../PublishAgentModal/usePublishAgentModal.ts | 14 +- .../src/components/layout/Navbar/Navbar.tsx | 41 +-- .../components/AccountLogoutOption.tsx | 56 +--- .../Navbar/components/MenuIcon/MenuIcon.tsx | 164 +++++++++++ .../components/MobileNavbar/MobileNavBar.tsx | 46 +-- .../components/MobileNavbarLogoutItem.tsx | 31 ++ .../layout/Navbar/components/NavbarLink.tsx | 68 +++-- .../src/components/layout/Navbar/helpers.tsx | 4 - .../src/components/layout/Navbar/useNavbar.ts | 4 +- .../components/molecules/Dialog/Dialog.tsx | 4 +- .../Dialog/components/DialogWrap.tsx | 34 ++- .../frontend/src/components/styles/colors.ts | 2 +- .../frontend/src/lib/constants.ts | 10 + .../frontend/src/lib/supabase/helpers.ts | 3 +- .../frontend/src/lib/supabase/middleware.ts | 3 +- .../onboarding/onboarding-provider.tsx | 30 +- .../feature-flags/feature-flag-provider.tsx | 6 +- .../src/services/storage/session-storage.ts | 40 +++ .../frontend/src/tests/signin.spec.ts | 8 +- autogpt_platform/frontend/tailwind.config.ts | 9 + 109 files changed, 4057 insertions(+), 1615 deletions(-) create mode 100644 autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts delete mode 100644 autogpt_platform/frontend/src/app/(platform)/chat/page.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx create mode 100644 autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx (99%) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatContainer/createStreamEventDispatcher.ts (55%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts => components/contextual/Chat/components/ChatContainer/handlers.ts} (66%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatContainer/helpers.ts (92%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatContainer/useChatContainer.ts (65%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatErrorState/ChatErrorState.tsx (100%) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/useChatInput.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ChatLoader/ChatLoader.tsx rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatLoadingState/ChatLoadingState.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatMessage/ChatMessage.tsx (67%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ChatMessage/useChatMessage.ts (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ExecutionStartedMessage/ExecutionStartedMessage.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/MarkdownContent/MarkdownContent.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/MessageBubble/MessageBubble.tsx (82%) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/MessageList.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/LastToolResponse/LastToolResponse.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/MessageItem/MessageItem.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/components/MessageItem/useMessageItem.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/MessageList/helpers.ts rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/MessageList/useMessageList.ts (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/NoResultsMessage/NoResultsMessage.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/QuickActionsWelcome/QuickActionsWelcome.tsx (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/SessionsDrawer/SessionsDrawer.tsx (97%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/StreamingMessage/StreamingMessage.tsx (65%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/StreamingMessage/useStreamingMessage.ts (100%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/components/ThinkingMessage/ThinkingMessage.tsx (67%) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/ToolCallMessage.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolCallMessage/helpers.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/ToolResponseMessage/helpers.ts create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/components/UserChatBubble/UserChatBubble.tsx rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/useChat.ts (79%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/useChatDrawer.ts (100%) create mode 100644 autogpt_platform/frontend/src/components/contextual/Chat/useChatSession.ts rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/useChatStream.ts (59%) rename autogpt_platform/frontend/src/{app/(platform)/chat/components => components/contextual}/Chat/usePageContext.ts (100%) create mode 100644 autogpt_platform/frontend/src/components/layout/Navbar/components/MenuIcon/MenuIcon.tsx create mode 100644 autogpt_platform/frontend/src/components/layout/Navbar/components/MobileNavbar/components/MobileNavbarLogoutItem.tsx create mode 100644 autogpt_platform/frontend/src/services/storage/session-storage.ts diff --git a/AGENTS.md b/AGENTS.md index d31bc92f8c..cd176f8a2d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -16,6 +16,32 @@ See `docs/content/platform/getting-started.md` for setup instructions. - Format Python code with `poetry run format`. - Format frontend code using `pnpm format`. + +## Frontend guidelines: + +See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: + +1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx` + - Add `usePageName.ts` hook for logic + - Put sub-components in local `components/` folder +2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts` + - Use design system components from `src/components/` (atoms, molecules, organisms) + - Never use `src/components/__legacy__/*` +3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/` + - Regenerate with `pnpm generate:api` + - Pattern: `use{Method}{Version}{OperationName}` +4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only +5. **Testing**: Add Storybook stories for new components, Playwright for E2E +6. **Code conventions**: Function declarations (not arrow functions) for components/handlers +- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component +- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) +- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible +- Avoid large hooks, abstract logic into `helpers.ts` files when sensible +- Use function declarations for components, arrow functions only for callbacks +- No barrel files or `index.ts` re-exports +- Do not use `useCallback` or `useMemo` unless strictly needed +- Avoid comments at all times unless the code is very complex + ## Testing - Backend: `poetry run test` (runs pytest with a docker based postgres + prisma). diff --git a/autogpt_platform/CLAUDE.md b/autogpt_platform/CLAUDE.md index df1f3314aa..2c76e7db80 100644 --- a/autogpt_platform/CLAUDE.md +++ b/autogpt_platform/CLAUDE.md @@ -201,7 +201,7 @@ If you get any pushback or hit complex block conditions check the new_blocks gui 3. Write tests alongside the route file 4. Run `poetry run test` to verify -**Frontend feature development:** +### Frontend guidelines: See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: @@ -217,6 +217,14 @@ See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference: 4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only 5. **Testing**: Add Storybook stories for new components, Playwright for E2E 6. **Code conventions**: Function declarations (not arrow functions) for components/handlers +- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component +- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts) +- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible +- Avoid large hooks, abstract logic into `helpers.ts` files when sensible +- Use function declarations for components, arrow functions only for callbacks +- No barrel files or `index.ts` re-exports +- Do not use `useCallback` or `useMemo` unless strictly needed +- Avoid comments at all times unless the code is very complex ### Security Implementation diff --git a/autogpt_platform/backend/backend/api/features/chat/model.py b/autogpt_platform/backend/backend/api/features/chat/model.py index ec4cf1fc8b..75bda11127 100644 --- a/autogpt_platform/backend/backend/api/features/chat/model.py +++ b/autogpt_platform/backend/backend/api/features/chat/model.py @@ -290,6 +290,11 @@ async def _cache_session(session: ChatSession) -> None: await async_redis.setex(redis_key, config.session_ttl, session.model_dump_json()) +async def cache_chat_session(session: ChatSession) -> None: + """Cache a chat session without persisting to the database.""" + await _cache_session(session) + + async def _get_session_from_db(session_id: str) -> ChatSession | None: """Get a chat session from the database.""" prisma_session = await chat_db.get_chat_session(session_id) diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py index 58b017ad5e..cab51543b1 100644 --- a/autogpt_platform/backend/backend/api/features/chat/routes.py +++ b/autogpt_platform/backend/backend/api/features/chat/routes.py @@ -172,12 +172,12 @@ async def get_session( user_id: The optional authenticated user ID, or None for anonymous access. Returns: - SessionDetailResponse: Details for the requested session; raises NotFoundError if not found. + SessionDetailResponse: Details for the requested session, or None if not found. """ session = await get_chat_session(session_id, user_id) if not session: - raise NotFoundError(f"Session {session_id} not found") + raise NotFoundError(f"Session {session_id} not found.") messages = [message.model_dump() for message in session.messages] logger.info( @@ -222,6 +222,8 @@ async def stream_chat_post( session = await _validate_and_get_session(session_id, user_id) async def event_generator() -> AsyncGenerator[str, None]: + chunk_count = 0 + first_chunk_type: str | None = None async for chunk in chat_service.stream_chat_completion( session_id, request.message, @@ -230,7 +232,26 @@ async def stream_chat_post( session=session, # Pass pre-fetched session to avoid double-fetch context=request.context, ): + if chunk_count < 3: + logger.info( + "Chat stream chunk", + extra={ + "session_id": session_id, + "chunk_type": str(chunk.type), + }, + ) + if not first_chunk_type: + first_chunk_type = str(chunk.type) + chunk_count += 1 yield chunk.to_sse() + logger.info( + "Chat stream completed", + extra={ + "session_id": session_id, + "chunk_count": chunk_count, + "first_chunk_type": first_chunk_type, + }, + ) # AI SDK protocol termination yield "data: [DONE]\n\n" @@ -275,6 +296,8 @@ async def stream_chat_get( session = await _validate_and_get_session(session_id, user_id) async def event_generator() -> AsyncGenerator[str, None]: + chunk_count = 0 + first_chunk_type: str | None = None async for chunk in chat_service.stream_chat_completion( session_id, message, @@ -282,7 +305,26 @@ async def stream_chat_get( user_id=user_id, session=session, # Pass pre-fetched session to avoid double-fetch ): + if chunk_count < 3: + logger.info( + "Chat stream chunk", + extra={ + "session_id": session_id, + "chunk_type": str(chunk.type), + }, + ) + if not first_chunk_type: + first_chunk_type = str(chunk.type) + chunk_count += 1 yield chunk.to_sse() + logger.info( + "Chat stream completed", + extra={ + "session_id": session_id, + "chunk_count": chunk_count, + "first_chunk_type": first_chunk_type, + }, + ) # AI SDK protocol termination yield "data: [DONE]\n\n" diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 93634c47e3..3daf378f65 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -1,12 +1,20 @@ import asyncio import logging +import time +from asyncio import CancelledError from collections.abc import AsyncGenerator from typing import Any import orjson from langfuse import get_client, propagate_attributes from langfuse.openai import openai # type: ignore -from openai import APIConnectionError, APIError, APIStatusError, RateLimitError +from openai import ( + APIConnectionError, + APIError, + APIStatusError, + PermissionDeniedError, + RateLimitError, +) from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam from backend.data.understanding import ( @@ -21,6 +29,7 @@ from .model import ( ChatMessage, ChatSession, Usage, + cache_chat_session, get_chat_session, update_session_title, upsert_chat_session, @@ -296,6 +305,10 @@ async def stream_chat_completion( content="", ) accumulated_tool_calls: list[dict[str, Any]] = [] + has_saved_assistant_message = False + has_appended_streaming_message = False + last_cache_time = 0.0 + last_cache_content_len = 0 # Wrap main logic in try/finally to ensure Langfuse observations are always ended has_yielded_end = False @@ -332,6 +345,23 @@ async def stream_chat_completion( assert assistant_response.content is not None assistant_response.content += delta has_received_text = True + if not has_appended_streaming_message: + session.messages.append(assistant_response) + has_appended_streaming_message = True + current_time = time.monotonic() + content_len = len(assistant_response.content) + if ( + current_time - last_cache_time >= 1.0 + and content_len > last_cache_content_len + ): + try: + await cache_chat_session(session) + except Exception as e: + logger.warning( + f"Failed to cache partial session {session.session_id}: {e}" + ) + last_cache_time = current_time + last_cache_content_len = content_len yield chunk elif isinstance(chunk, StreamTextEnd): # Emit text-end after text completes @@ -390,10 +420,42 @@ async def stream_chat_completion( if has_received_text and not text_streaming_ended: yield StreamTextEnd(id=text_block_id) text_streaming_ended = True + + # Save assistant message before yielding finish to ensure it's persisted + # even if client disconnects immediately after receiving StreamFinish + if not has_saved_assistant_message: + messages_to_save_early: list[ChatMessage] = [] + if accumulated_tool_calls: + assistant_response.tool_calls = ( + accumulated_tool_calls + ) + if not has_appended_streaming_message and ( + assistant_response.content + or assistant_response.tool_calls + ): + messages_to_save_early.append(assistant_response) + messages_to_save_early.extend(tool_response_messages) + + if messages_to_save_early: + session.messages.extend(messages_to_save_early) + logger.info( + f"Saving assistant message before StreamFinish: " + f"content_len={len(assistant_response.content or '')}, " + f"tool_calls={len(assistant_response.tool_calls or [])}, " + f"tool_responses={len(tool_response_messages)}" + ) + if ( + messages_to_save_early + or has_appended_streaming_message + ): + await upsert_chat_session(session) + has_saved_assistant_message = True + has_yielded_end = True yield chunk elif isinstance(chunk, StreamError): has_yielded_error = True + yield chunk elif isinstance(chunk, StreamUsage): session.usage.append( Usage( @@ -413,6 +475,27 @@ async def stream_chat_completion( langfuse.update_current_trace(output=str(tool_response_messages)) langfuse.update_current_span(output=str(tool_response_messages)) + except CancelledError: + if not has_saved_assistant_message: + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + if assistant_response.content: + assistant_response.content = ( + f"{assistant_response.content}\n\n[interrupted]" + ) + else: + assistant_response.content = "[interrupted]" + if not has_appended_streaming_message: + session.messages.append(assistant_response) + if tool_response_messages: + session.messages.extend(tool_response_messages) + try: + await upsert_chat_session(session) + except Exception as e: + logger.warning( + f"Failed to save interrupted session {session.session_id}: {e}" + ) + raise except Exception as e: logger.error(f"Error during stream: {e!s}", exc_info=True) @@ -434,14 +517,19 @@ async def stream_chat_completion( # Add assistant message if it has content or tool calls if accumulated_tool_calls: assistant_response.tool_calls = accumulated_tool_calls - if assistant_response.content or assistant_response.tool_calls: + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): messages_to_save.append(assistant_response) # Add tool response messages after assistant message messages_to_save.extend(tool_response_messages) - session.messages.extend(messages_to_save) - await upsert_chat_session(session) + if not has_saved_assistant_message: + if messages_to_save: + session.messages.extend(messages_to_save) + if messages_to_save or has_appended_streaming_message: + await upsert_chat_session(session) if not has_yielded_error: error_message = str(e) @@ -472,38 +560,49 @@ async def stream_chat_completion( return # Exit after retry to avoid double-saving in finally block # Normal completion path - save session and handle tool call continuation - logger.info( - f"Normal completion path: session={session.session_id}, " - f"current message_count={len(session.messages)}" - ) - - # Build the messages list in the correct order - messages_to_save: list[ChatMessage] = [] - - # Add assistant message with tool_calls if any - if accumulated_tool_calls: - assistant_response.tool_calls = accumulated_tool_calls + # Only save if we haven't already saved when StreamFinish was received + if not has_saved_assistant_message: logger.info( - f"Added {len(accumulated_tool_calls)} tool calls to assistant message" - ) - if assistant_response.content or assistant_response.tool_calls: - messages_to_save.append(assistant_response) - logger.info( - f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + f"Normal completion path: session={session.session_id}, " + f"current message_count={len(session.messages)}" ) - # Add tool response messages after assistant message - messages_to_save.extend(tool_response_messages) - logger.info( - f"Saving {len(tool_response_messages)} tool response messages, " - f"total_to_save={len(messages_to_save)}" - ) + # Build the messages list in the correct order + messages_to_save: list[ChatMessage] = [] - session.messages.extend(messages_to_save) - logger.info( - f"Extended session messages, new message_count={len(session.messages)}" - ) - await upsert_chat_session(session) + # Add assistant message with tool_calls if any + if accumulated_tool_calls: + assistant_response.tool_calls = accumulated_tool_calls + logger.info( + f"Added {len(accumulated_tool_calls)} tool calls to assistant message" + ) + if not has_appended_streaming_message and ( + assistant_response.content or assistant_response.tool_calls + ): + messages_to_save.append(assistant_response) + logger.info( + f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}" + ) + + # Add tool response messages after assistant message + messages_to_save.extend(tool_response_messages) + logger.info( + f"Saving {len(tool_response_messages)} tool response messages, " + f"total_to_save={len(messages_to_save)}" + ) + + if messages_to_save: + session.messages.extend(messages_to_save) + logger.info( + f"Extended session messages, new message_count={len(session.messages)}" + ) + if messages_to_save or has_appended_streaming_message: + await upsert_chat_session(session) + else: + logger.info( + "Assistant message already saved when StreamFinish was received, " + "skipping duplicate save" + ) # If we did a tool call, stream the chat completion again to get the next response if has_done_tool_call: @@ -545,6 +644,12 @@ def _is_retryable_error(error: Exception) -> bool: return False +def _is_region_blocked_error(error: Exception) -> bool: + if isinstance(error, PermissionDeniedError): + return "not available in your region" in str(error).lower() + return "not available in your region" in str(error).lower() + + async def _stream_chat_chunks( session: ChatSession, tools: list[ChatCompletionToolParam], @@ -737,7 +842,18 @@ async def _stream_chat_chunks( f"Error in stream (not retrying): {e!s}", exc_info=True, ) - error_response = StreamError(errorText=str(e)) + error_code = None + error_text = str(e) + if _is_region_blocked_error(e): + error_code = "MODEL_NOT_AVAILABLE_REGION" + error_text = ( + "This model is not available in your region. " + "Please connect via VPN and try again." + ) + error_response = StreamError( + errorText=error_text, + code=error_code, + ) yield error_response yield StreamFinish() return diff --git a/autogpt_platform/frontend/CONTRIBUTING.md b/autogpt_platform/frontend/CONTRIBUTING.md index 1b2b810986..649bb1ca92 100644 --- a/autogpt_platform/frontend/CONTRIBUTING.md +++ b/autogpt_platform/frontend/CONTRIBUTING.md @@ -175,6 +175,8 @@ While server components and actions are cool and cutting-edge, they introduce a - Prefer [React Query](https://tanstack.com/query/latest/docs/framework/react/overview) for server state, colocated near consumers (see [state colocation](https://kentcdodds.com/blog/state-colocation-will-make-your-react-app-faster)) - Co-locate UI state inside components/hooks; keep global state minimal +- Avoid `useMemo` and `useCallback` unless you have a measured performance issue +- Do not abuse `useEffect`; prefer state colocation and derive values directly when possible ### Styling and components @@ -549,9 +551,48 @@ Files: Types: - Prefer `interface` for object shapes -- Component props should be `interface Props { ... }` +- Component props should be `interface Props { ... }` (not exported) +- Only use specific exported names (e.g., `export interface MyComponentProps`) when the interface needs to be used outside the component +- Keep type definitions inline with the component - do not create separate `types.ts` files unless types are shared across multiple files - Use precise types; avoid `any` and unsafe casts +**Props naming examples:** + +```tsx +// βœ… Good - internal props, not exported +interface Props { + title: string; + onClose: () => void; +} + +export function Modal({ title, onClose }: Props) { + // ... +} + +// βœ… Good - exported when needed externally +export interface ModalProps { + title: string; + onClose: () => void; +} + +export function Modal({ title, onClose }: ModalProps) { + // ... +} + +// ❌ Bad - unnecessarily specific name for internal use +interface ModalComponentProps { + title: string; + onClose: () => void; +} + +// ❌ Bad - separate types.ts file for single component +// types.ts +export interface ModalProps { ... } + +// Modal.tsx +import type { ModalProps } from './types'; +``` + Parameters: - If more than one parameter is needed, pass a single `Args` object for clarity diff --git a/autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx b/autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx new file mode 100644 index 0000000000..ef3dc03f1a --- /dev/null +++ b/autogpt_platform/frontend/src/app/(no-navbar)/logout/page.tsx @@ -0,0 +1,58 @@ +"use client"; + +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { useToast } from "@/components/molecules/Toast/use-toast"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useRouter } from "next/navigation"; +import { useEffect, useRef } from "react"; + +const LOGOUT_REDIRECT_DELAY_MS = 400; + +function wait(ms: number): Promise { + return new Promise(function resolveAfterDelay(resolve) { + setTimeout(resolve, ms); + }); +} + +export default function LogoutPage() { + const { logOut } = useSupabase(); + const { toast } = useToast(); + const router = useRouter(); + const hasStartedRef = useRef(false); + + useEffect( + function handleLogoutEffect() { + if (hasStartedRef.current) return; + hasStartedRef.current = true; + + async function runLogout() { + try { + await logOut(); + } catch { + toast({ + title: "Failed to log out. Redirecting to login.", + variant: "destructive", + }); + } finally { + await wait(LOGOUT_REDIRECT_DELAY_MS); + router.replace("/login"); + } + } + + void runLogout(); + }, + [logOut, router, toast], + ); + + return ( +
+
+ + + Logging you out... + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts index 13f8d988fe..a6a07a703f 100644 --- a/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts +++ b/autogpt_platform/frontend/src/app/(platform)/auth/callback/route.ts @@ -9,7 +9,7 @@ export async function GET(request: Request) { const { searchParams, origin } = new URL(request.url); const code = searchParams.get("code"); - let next = "/marketplace"; + let next = "/"; if (code) { const supabase = await getServerSupabase(); diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx deleted file mode 100644 index 461c885dc3..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/Chat.tsx +++ /dev/null @@ -1,134 +0,0 @@ -"use client"; - -import { Button } from "@/components/atoms/Button/Button"; -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { List } from "@phosphor-icons/react"; -import React, { useState } from "react"; -import { ChatContainer } from "./components/ChatContainer/ChatContainer"; -import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; -import { ChatLoadingState } from "./components/ChatLoadingState/ChatLoadingState"; -import { SessionsDrawer } from "./components/SessionsDrawer/SessionsDrawer"; -import { useChat } from "./useChat"; - -export interface ChatProps { - className?: string; - headerTitle?: React.ReactNode; - showHeader?: boolean; - showSessionInfo?: boolean; - showNewChatButton?: boolean; - onNewChat?: () => void; - headerActions?: React.ReactNode; -} - -export function Chat({ - className, - headerTitle = "AutoGPT Copilot", - showHeader = true, - showSessionInfo = true, - showNewChatButton = true, - onNewChat, - headerActions, -}: ChatProps) { - const { - messages, - isLoading, - isCreating, - error, - sessionId, - createSession, - clearSession, - loadSession, - } = useChat(); - - const [isSessionsDrawerOpen, setIsSessionsDrawerOpen] = useState(false); - - const handleNewChat = () => { - clearSession(); - onNewChat?.(); - }; - - const handleSelectSession = async (sessionId: string) => { - try { - await loadSession(sessionId); - } catch (err) { - console.error("Failed to load session:", err); - } - }; - - return ( -
- {/* Header */} - {showHeader && ( -
-
-
- - {typeof headerTitle === "string" ? ( - - {headerTitle} - - ) : ( - headerTitle - )} -
-
- {showSessionInfo && sessionId && ( - <> - {showNewChatButton && ( - - )} - - )} - {headerActions} -
-
-
- )} - - {/* Main Content */} -
- {/* Loading State - show when explicitly loading/creating OR when we don't have a session yet and no error */} - {(isLoading || isCreating || (!sessionId && !error)) && ( - - )} - - {/* Error State */} - {error && !isLoading && ( - - )} - - {/* Session Content */} - {sessionId && !isLoading && !error && ( - - )} -
- - {/* Sessions Drawer */} - setIsSessionsDrawerOpen(false)} - onSelectSession={handleSelectSession} - currentSessionId={sessionId} - /> -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx deleted file mode 100644 index 6f7a0e8f51..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/ChatContainer.tsx +++ /dev/null @@ -1,88 +0,0 @@ -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { cn } from "@/lib/utils"; -import { useCallback } from "react"; -import { usePageContext } from "../../usePageContext"; -import { ChatInput } from "../ChatInput/ChatInput"; -import { MessageList } from "../MessageList/MessageList"; -import { QuickActionsWelcome } from "../QuickActionsWelcome/QuickActionsWelcome"; -import { useChatContainer } from "./useChatContainer"; - -export interface ChatContainerProps { - sessionId: string | null; - initialMessages: SessionDetailResponse["messages"]; - className?: string; -} - -export function ChatContainer({ - sessionId, - initialMessages, - className, -}: ChatContainerProps) { - const { messages, streamingChunks, isStreaming, sendMessage } = - useChatContainer({ - sessionId, - initialMessages, - }); - const { capturePageContext } = usePageContext(); - - // Wrap sendMessage to automatically capture page context - const sendMessageWithContext = useCallback( - async (content: string, isUserMessage: boolean = true) => { - const context = capturePageContext(); - await sendMessage(content, isUserMessage, context); - }, - [sendMessage, capturePageContext], - ); - - const quickActions = [ - "Find agents for social media management", - "Show me agents for content creation", - "Help me automate my business", - "What can you help me with?", - ]; - - return ( -
- {/* Messages or Welcome Screen */} -
- {messages.length === 0 ? ( - - ) : ( - - )} -
- - {/* Input - Always visible */} -
- -
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx deleted file mode 100644 index 3101174a11..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/ChatInput.tsx +++ /dev/null @@ -1,64 +0,0 @@ -import { Input } from "@/components/atoms/Input/Input"; -import { cn } from "@/lib/utils"; -import { ArrowUpIcon } from "@phosphor-icons/react"; -import { useChatInput } from "./useChatInput"; - -export interface ChatInputProps { - onSend: (message: string) => void; - disabled?: boolean; - placeholder?: string; - className?: string; -} - -export function ChatInput({ - onSend, - disabled = false, - placeholder = "Type your message...", - className, -}: ChatInputProps) { - const inputId = "chat-input"; - const { value, setValue, handleKeyDown, handleSend } = useChatInput({ - onSend, - disabled, - maxRows: 5, - inputId, - }); - - return ( -
- setValue(e.target.value)} - onKeyDown={handleKeyDown} - placeholder={placeholder} - disabled={disabled} - rows={1} - wrapperClassName="mb-0 relative" - className="pr-12" - /> - - Press Enter to send, Shift+Enter for new line - - - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts deleted file mode 100644 index 08cf565daa..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatInput/useChatInput.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { KeyboardEvent, useCallback, useEffect, useState } from "react"; - -interface UseChatInputArgs { - onSend: (message: string) => void; - disabled?: boolean; - maxRows?: number; - inputId?: string; -} - -export function useChatInput({ - onSend, - disabled = false, - maxRows = 5, - inputId = "chat-input", -}: UseChatInputArgs) { - const [value, setValue] = useState(""); - - useEffect(() => { - const textarea = document.getElementById(inputId) as HTMLTextAreaElement; - if (!textarea) return; - textarea.style.height = "auto"; - const lineHeight = parseInt( - window.getComputedStyle(textarea).lineHeight, - 10, - ); - const maxHeight = lineHeight * maxRows; - const newHeight = Math.min(textarea.scrollHeight, maxHeight); - textarea.style.height = `${newHeight}px`; - textarea.style.overflowY = - textarea.scrollHeight > maxHeight ? "auto" : "hidden"; - }, [value, maxRows, inputId]); - - const handleSend = useCallback(() => { - if (disabled || !value.trim()) return; - onSend(value.trim()); - setValue(""); - const textarea = document.getElementById(inputId) as HTMLTextAreaElement; - if (textarea) { - textarea.style.height = "auto"; - } - }, [value, onSend, disabled, inputId]); - - const handleKeyDown = useCallback( - (event: KeyboardEvent) => { - if (event.key === "Enter" && !event.shiftKey) { - event.preventDefault(); - handleSend(); - } - // Shift+Enter allows default behavior (new line) - no need to handle explicitly - }, - [handleSend], - ); - - return { - value, - setValue, - handleKeyDown, - handleSend, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx deleted file mode 100644 index 22b51c0a92..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/MessageList/MessageList.tsx +++ /dev/null @@ -1,121 +0,0 @@ -"use client"; - -import { cn } from "@/lib/utils"; -import { ChatMessage } from "../ChatMessage/ChatMessage"; -import type { ChatMessageData } from "../ChatMessage/useChatMessage"; -import { StreamingMessage } from "../StreamingMessage/StreamingMessage"; -import { ThinkingMessage } from "../ThinkingMessage/ThinkingMessage"; -import { useMessageList } from "./useMessageList"; - -export interface MessageListProps { - messages: ChatMessageData[]; - streamingChunks?: string[]; - isStreaming?: boolean; - className?: string; - onStreamComplete?: () => void; - onSendMessage?: (content: string) => void; -} - -export function MessageList({ - messages, - streamingChunks = [], - isStreaming = false, - className, - onStreamComplete, - onSendMessage, -}: MessageListProps) { - const { messagesEndRef, messagesContainerRef } = useMessageList({ - messageCount: messages.length, - isStreaming, - }); - - return ( -
-
- {/* Render all persisted messages */} - {messages.map((message, index) => { - // Check if current message is an agent_output tool_response - // and if previous message is an assistant message - let agentOutput: ChatMessageData | undefined; - - if (message.type === "tool_response" && message.result) { - let parsedResult: Record | null = null; - try { - parsedResult = - typeof message.result === "string" - ? JSON.parse(message.result) - : (message.result as Record); - } catch { - parsedResult = null; - } - if (parsedResult?.type === "agent_output") { - const prevMessage = messages[index - 1]; - if ( - prevMessage && - prevMessage.type === "message" && - prevMessage.role === "assistant" - ) { - // This agent output will be rendered inside the previous assistant message - // Skip rendering this message separately - return null; - } - } - } - - // Check if next message is an agent_output tool_response to include in current assistant message - if (message.type === "message" && message.role === "assistant") { - const nextMessage = messages[index + 1]; - if ( - nextMessage && - nextMessage.type === "tool_response" && - nextMessage.result - ) { - let parsedResult: Record | null = null; - try { - parsedResult = - typeof nextMessage.result === "string" - ? JSON.parse(nextMessage.result) - : (nextMessage.result as Record); - } catch { - parsedResult = null; - } - if (parsedResult?.type === "agent_output") { - agentOutput = nextMessage; - } - } - } - - return ( - - ); - })} - - {/* Render thinking message when streaming but no chunks yet */} - {isStreaming && streamingChunks.length === 0 && } - - {/* Render streaming message if active */} - {isStreaming && streamingChunks.length > 0 && ( - - )} - - {/* Invisible div to scroll to */} -
-
-
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx deleted file mode 100644 index 97590ae0cf..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolCallMessage/ToolCallMessage.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; -import { cn } from "@/lib/utils"; -import { WrenchIcon } from "@phosphor-icons/react"; -import { getToolActionPhrase } from "../../helpers"; - -export interface ToolCallMessageProps { - toolName: string; - className?: string; -} - -export function ToolCallMessage({ toolName, className }: ToolCallMessageProps) { - return ( -
- - - {getToolActionPhrase(toolName)}... - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx deleted file mode 100644 index b84204c3ff..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ToolResponseMessage/ToolResponseMessage.tsx +++ /dev/null @@ -1,260 +0,0 @@ -import { Text } from "@/components/atoms/Text/Text"; -import "@/components/contextual/OutputRenderers"; -import { - globalRegistry, - OutputItem, -} from "@/components/contextual/OutputRenderers"; -import { cn } from "@/lib/utils"; -import type { ToolResult } from "@/types/chat"; -import { WrenchIcon } from "@phosphor-icons/react"; -import { getToolActionPhrase } from "../../helpers"; - -export interface ToolResponseMessageProps { - toolName: string; - result?: ToolResult; - success?: boolean; - className?: string; -} - -export function ToolResponseMessage({ - toolName, - result, - success: _success = true, - className, -}: ToolResponseMessageProps) { - if (!result) { - return ( -
- - - {getToolActionPhrase(toolName)}... - -
- ); - } - - let parsedResult: Record | null = null; - try { - parsedResult = - typeof result === "string" - ? JSON.parse(result) - : (result as Record); - } catch { - parsedResult = null; - } - - if (parsedResult && typeof parsedResult === "object") { - const responseType = parsedResult.type as string | undefined; - - if (responseType === "agent_output") { - const execution = parsedResult.execution as - | { - outputs?: Record; - } - | null - | undefined; - const outputs = execution?.outputs || {}; - const message = parsedResult.message as string | undefined; - - return ( -
-
- - - {getToolActionPhrase(toolName)} - -
- {message && ( -
- - {message} - -
- )} - {Object.keys(outputs).length > 0 && ( -
- {Object.entries(outputs).map(([outputName, values]) => - values.map((value, index) => { - const renderer = globalRegistry.getRenderer(value); - if (renderer) { - return ( - - ); - } - return ( -
- - {outputName} - -
-                        {JSON.stringify(value, null, 2)}
-                      
-
- ); - }), - )} -
- )} -
- ); - } - - if (responseType === "block_output" && parsedResult.outputs) { - const outputs = parsedResult.outputs as Record; - - return ( -
-
- - - {getToolActionPhrase(toolName)} - -
-
- {Object.entries(outputs).map(([outputName, values]) => - values.map((value, index) => { - const renderer = globalRegistry.getRenderer(value); - if (renderer) { - return ( - - ); - } - return ( -
- - {outputName} - -
-                      {JSON.stringify(value, null, 2)}
-                    
-
- ); - }), - )} -
-
- ); - } - - // Handle other response types with a message field (e.g., understanding_updated) - if (parsedResult.message && typeof parsedResult.message === "string") { - // Format tool name from snake_case to Title Case - const formattedToolName = toolName - .split("_") - .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) - .join(" "); - - // Clean up message - remove incomplete user_name references - let cleanedMessage = parsedResult.message; - // Remove "Updated understanding with: user_name" pattern if user_name is just a placeholder - cleanedMessage = cleanedMessage.replace( - /Updated understanding with:\s*user_name\.?\s*/gi, - "", - ); - // Remove standalone user_name references - cleanedMessage = cleanedMessage.replace(/\buser_name\b\.?\s*/gi, ""); - cleanedMessage = cleanedMessage.trim(); - - // Only show message if it has content after cleaning - if (!cleanedMessage) { - return ( -
- - - {formattedToolName} - -
- ); - } - - return ( -
-
- - - {formattedToolName} - -
-
- - {cleanedMessage} - -
-
- ); - } - } - - const renderer = globalRegistry.getRenderer(result); - if (renderer) { - return ( -
-
- - - {getToolActionPhrase(toolName)} - -
- -
- ); - } - - return ( -
- - - {getToolActionPhrase(toolName)}... - -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts deleted file mode 100644 index 0fade56b73..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/helpers.ts +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Maps internal tool names to user-friendly display names with emojis. - * @deprecated Use getToolActionPhrase or getToolCompletionPhrase for status messages - * - * @param toolName - The internal tool name from the backend - * @returns A user-friendly display name with an emoji prefix - */ -export function getToolDisplayName(toolName: string): string { - const toolDisplayNames: Record = { - find_agent: "πŸ” Search Marketplace", - get_agent_details: "πŸ“‹ Get Agent Details", - check_credentials: "πŸ”‘ Check Credentials", - setup_agent: "βš™οΈ Setup Agent", - run_agent: "▢️ Run Agent", - get_required_setup_info: "πŸ“ Get Setup Requirements", - }; - return toolDisplayNames[toolName] || toolName; -} - -/** - * Maps internal tool names to human-friendly action phrases (present continuous). - * Used for tool call messages to indicate what action is currently happening. - * - * @param toolName - The internal tool name from the backend - * @returns A human-friendly action phrase in present continuous tense - */ -export function getToolActionPhrase(toolName: string): string { - const toolActionPhrases: Record = { - find_agent: "Looking for agents in the marketplace", - agent_carousel: "Looking for agents in the marketplace", - get_agent_details: "Learning about the agent", - check_credentials: "Checking your credentials", - setup_agent: "Setting up the agent", - execution_started: "Running the agent", - run_agent: "Running the agent", - get_required_setup_info: "Getting setup requirements", - schedule_agent: "Scheduling the agent to run", - }; - - // Return mapped phrase or generate human-friendly fallback - return toolActionPhrases[toolName] || toolName; -} - -/** - * Maps internal tool names to human-friendly completion phrases (past tense). - * Used for tool response messages to indicate what action was completed. - * - * @param toolName - The internal tool name from the backend - * @returns A human-friendly completion phrase in past tense - */ -export function getToolCompletionPhrase(toolName: string): string { - const toolCompletionPhrases: Record = { - find_agent: "Finished searching the marketplace", - get_agent_details: "Got agent details", - check_credentials: "Checked credentials", - setup_agent: "Agent setup complete", - run_agent: "Agent execution started", - get_required_setup_info: "Got setup requirements", - }; - - // Return mapped phrase or generate human-friendly fallback - return ( - toolCompletionPhrases[toolName] || - `Finished ${toolName.replace(/_/g, " ").replace("...", "")}` - ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts b/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts deleted file mode 100644 index a54dc9e32a..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/useChatSession.ts +++ /dev/null @@ -1,271 +0,0 @@ -import { - getGetV2GetSessionQueryKey, - getGetV2GetSessionQueryOptions, - postV2CreateSession, - useGetV2GetSession, - usePatchV2SessionAssignUser, - usePostV2CreateSession, -} from "@/app/api/__generated__/endpoints/chat/chat"; -import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { okData } from "@/app/api/helpers"; -import { isValidUUID } from "@/lib/utils"; -import { Key, storage } from "@/services/storage/local-storage"; -import { useQueryClient } from "@tanstack/react-query"; -import { useCallback, useEffect, useMemo, useRef, useState } from "react"; -import { toast } from "sonner"; - -interface UseChatSessionArgs { - urlSessionId?: string | null; - autoCreate?: boolean; -} - -export function useChatSession({ - urlSessionId, - autoCreate = false, -}: UseChatSessionArgs = {}) { - const queryClient = useQueryClient(); - const [sessionId, setSessionId] = useState(null); - const [error, setError] = useState(null); - const justCreatedSessionIdRef = useRef(null); - - useEffect(() => { - if (urlSessionId) { - if (!isValidUUID(urlSessionId)) { - console.error("Invalid session ID format:", urlSessionId); - toast.error("Invalid session ID", { - description: - "The session ID in the URL is not valid. Starting a new session...", - }); - setSessionId(null); - storage.clean(Key.CHAT_SESSION_ID); - return; - } - setSessionId(urlSessionId); - storage.set(Key.CHAT_SESSION_ID, urlSessionId); - } else { - const storedSessionId = storage.get(Key.CHAT_SESSION_ID); - if (storedSessionId) { - if (!isValidUUID(storedSessionId)) { - console.error("Invalid stored session ID:", storedSessionId); - storage.clean(Key.CHAT_SESSION_ID); - setSessionId(null); - } else { - setSessionId(storedSessionId); - } - } else if (autoCreate) { - setSessionId(null); - } - } - }, [urlSessionId, autoCreate]); - - const { - mutateAsync: createSessionMutation, - isPending: isCreating, - error: createError, - } = usePostV2CreateSession(); - - const { - data: sessionData, - isLoading: isLoadingSession, - error: loadError, - refetch, - } = useGetV2GetSession(sessionId || "", { - query: { - enabled: !!sessionId, - select: okData, - staleTime: Infinity, // Never mark as stale - refetchOnMount: false, // Don't refetch on component mount - refetchOnWindowFocus: false, // Don't refetch when window regains focus - refetchOnReconnect: false, // Don't refetch when network reconnects - retry: 1, - }, - }); - - const { mutateAsync: claimSessionMutation } = usePatchV2SessionAssignUser(); - - const session = useMemo(() => { - if (sessionData) return sessionData; - - if (sessionId && justCreatedSessionIdRef.current === sessionId) { - return { - id: sessionId, - user_id: null, - messages: [], - created_at: new Date().toISOString(), - updated_at: new Date().toISOString(), - } as SessionDetailResponse; - } - return null; - }, [sessionData, sessionId]); - - const messages = session?.messages || []; - const isLoading = isCreating || isLoadingSession; - - useEffect(() => { - if (createError) { - setError( - createError instanceof Error - ? createError - : new Error("Failed to create session"), - ); - } else if (loadError) { - setError( - loadError instanceof Error - ? loadError - : new Error("Failed to load session"), - ); - } else { - setError(null); - } - }, [createError, loadError]); - - const createSession = useCallback( - async function createSession() { - try { - setError(null); - const response = await postV2CreateSession({ - body: JSON.stringify({}), - }); - if (response.status !== 200) { - throw new Error("Failed to create session"); - } - const newSessionId = response.data.id; - setSessionId(newSessionId); - storage.set(Key.CHAT_SESSION_ID, newSessionId); - justCreatedSessionIdRef.current = newSessionId; - setTimeout(() => { - if (justCreatedSessionIdRef.current === newSessionId) { - justCreatedSessionIdRef.current = null; - } - }, 10000); - return newSessionId; - } catch (err) { - const error = - err instanceof Error ? err : new Error("Failed to create session"); - setError(error); - toast.error("Failed to create chat session", { - description: error.message, - }); - throw error; - } - }, - [createSessionMutation], - ); - - const loadSession = useCallback( - async function loadSession(id: string) { - try { - setError(null); - // Invalidate the query cache for this session to force a fresh fetch - await queryClient.invalidateQueries({ - queryKey: getGetV2GetSessionQueryKey(id), - }); - // Set sessionId after invalidation to ensure the hook refetches - setSessionId(id); - storage.set(Key.CHAT_SESSION_ID, id); - // Force fetch with fresh data (bypass cache) - const queryOptions = getGetV2GetSessionQueryOptions(id, { - query: { - staleTime: 0, // Force fresh fetch - retry: 1, - }, - }); - const result = await queryClient.fetchQuery(queryOptions); - if (!result || ("status" in result && result.status !== 200)) { - console.warn("Session not found on server, clearing local state"); - storage.clean(Key.CHAT_SESSION_ID); - setSessionId(null); - throw new Error("Session not found"); - } - } catch (err) { - const error = - err instanceof Error ? err : new Error("Failed to load session"); - setError(error); - throw error; - } - }, - [queryClient], - ); - - const refreshSession = useCallback( - async function refreshSession() { - if (!sessionId) { - console.log("[refreshSession] Skipping - no session ID"); - return; - } - try { - setError(null); - await refetch(); - } catch (err) { - const error = - err instanceof Error ? err : new Error("Failed to refresh session"); - setError(error); - throw error; - } - }, - [sessionId, refetch], - ); - - const claimSession = useCallback( - async function claimSession(id: string) { - try { - setError(null); - await claimSessionMutation({ sessionId: id }); - if (justCreatedSessionIdRef.current === id) { - justCreatedSessionIdRef.current = null; - } - await queryClient.invalidateQueries({ - queryKey: getGetV2GetSessionQueryKey(id), - }); - await refetch(); - toast.success("Session claimed successfully", { - description: "Your chat history has been saved to your account", - }); - } catch (err: unknown) { - const error = - err instanceof Error ? err : new Error("Failed to claim session"); - const is404 = - (typeof err === "object" && - err !== null && - "status" in err && - err.status === 404) || - (typeof err === "object" && - err !== null && - "response" in err && - typeof err.response === "object" && - err.response !== null && - "status" in err.response && - err.response.status === 404); - if (!is404) { - setError(error); - toast.error("Failed to claim session", { - description: error.message || "Unable to claim session", - }); - } - throw error; - } - }, - [claimSessionMutation, queryClient, refetch], - ); - - const clearSession = useCallback(function clearSession() { - setSessionId(null); - setError(null); - storage.clean(Key.CHAT_SESSION_ID); - justCreatedSessionIdRef.current = null; - }, []); - - return { - session, - sessionId, - messages, - isLoading, - isCreating, - error, - createSession, - loadSession, - refreshSession, - claimSession, - clearSession, - }; -} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/page.tsx b/autogpt_platform/frontend/src/app/(platform)/chat/page.tsx deleted file mode 100644 index 9c04e40594..0000000000 --- a/autogpt_platform/frontend/src/app/(platform)/chat/page.tsx +++ /dev/null @@ -1,27 +0,0 @@ -"use client"; - -import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; -import { useRouter } from "next/navigation"; -import { useEffect } from "react"; -import { Chat } from "./components/Chat/Chat"; - -export default function ChatPage() { - const isChatEnabled = useGetFlag(Flag.CHAT); - const router = useRouter(); - - useEffect(() => { - if (isChatEnabled === false) { - router.push("/marketplace"); - } - }, [isChatEnabled, router]); - - if (isChatEnabled === null || isChatEnabled === false) { - return null; - } - - return ( -
- -
- ); -} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx new file mode 100644 index 0000000000..03a2ff5db0 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/CopilotShell.tsx @@ -0,0 +1,88 @@ +"use client"; + +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; +import type { ReactNode } from "react"; +import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar"; +import { LoadingState } from "./components/LoadingState/LoadingState"; +import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer"; +import { MobileHeader } from "./components/MobileHeader/MobileHeader"; +import { useCopilotShell } from "./useCopilotShell"; + +interface Props { + children: ReactNode; +} + +export function CopilotShell({ children }: Props) { + const { + isMobile, + isDrawerOpen, + isLoading, + isLoggedIn, + hasActiveSession, + sessions, + currentSessionId, + handleSelectSession, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + handleNewChat, + hasNextPage, + isFetchingNextPage, + fetchNextPage, + isReadyToShowContent, + } = useCopilotShell(); + + if (!isLoggedIn) { + return ( +
+ +
+ ); + } + + return ( +
+ {!isMobile && ( + + )} + +
+ {isMobile && } +
+ {isReadyToShowContent ? children : } +
+
+ + {isMobile && ( + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx new file mode 100644 index 0000000000..122a09a02f --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/DesktopSidebar/DesktopSidebar.tsx @@ -0,0 +1,70 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; +import { Plus } from "@phosphor-icons/react"; +import { SessionsList } from "../SessionsList/SessionsList"; + +interface Props { + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + hasNextPage: boolean; + isFetchingNextPage: boolean; + onSelectSession: (sessionId: string) => void; + onFetchNextPage: () => void; + onNewChat: () => void; + hasActiveSession: boolean; +} + +export function DesktopSidebar({ + sessions, + currentSessionId, + isLoading, + hasNextPage, + isFetchingNextPage, + onSelectSession, + onFetchNextPage, + onNewChat, + hasActiveSession, +}: Props) { + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx new file mode 100644 index 0000000000..21b1663916 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/LoadingState/LoadingState.tsx @@ -0,0 +1,15 @@ +import { Text } from "@/components/atoms/Text/Text"; +import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader"; + +export function LoadingState() { + return ( +
+
+ + + Loading your chats... + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx new file mode 100644 index 0000000000..ea3b39f829 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/MobileDrawer.tsx @@ -0,0 +1,91 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { scrollbarStyles } from "@/components/styles/scrollbars"; +import { cn } from "@/lib/utils"; +import { PlusIcon, X } from "@phosphor-icons/react"; +import { Drawer } from "vaul"; +import { SessionsList } from "../SessionsList/SessionsList"; + +interface Props { + isOpen: boolean; + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + hasNextPage: boolean; + isFetchingNextPage: boolean; + onSelectSession: (sessionId: string) => void; + onFetchNextPage: () => void; + onNewChat: () => void; + onClose: () => void; + onOpenChange: (open: boolean) => void; + hasActiveSession: boolean; +} + +export function MobileDrawer({ + isOpen, + sessions, + currentSessionId, + isLoading, + hasNextPage, + isFetchingNextPage, + onSelectSession, + onFetchNextPage, + onNewChat, + onClose, + onOpenChange, + hasActiveSession, +}: Props) { + return ( + + + + +
+
+ + Your chats + + +
+
+
+ +
+ {hasActiveSession && ( +
+ +
+ )} +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts new file mode 100644 index 0000000000..c9504e49a9 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileDrawer/useMobileDrawer.ts @@ -0,0 +1,24 @@ +import { useState } from "react"; + +export function useMobileDrawer() { + const [isDrawerOpen, setIsDrawerOpen] = useState(false); + + function handleOpenDrawer() { + setIsDrawerOpen(true); + } + + function handleCloseDrawer() { + setIsDrawerOpen(false); + } + + function handleDrawerOpenChange(open: boolean) { + setIsDrawerOpen(open); + } + + return { + isDrawerOpen, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx new file mode 100644 index 0000000000..e0d6161744 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/MobileHeader/MobileHeader.tsx @@ -0,0 +1,22 @@ +import { Button } from "@/components/atoms/Button/Button"; +import { NAVBAR_HEIGHT_PX } from "@/lib/constants"; +import { ListIcon } from "@phosphor-icons/react"; + +interface Props { + onOpenDrawer: () => void; +} + +export function MobileHeader({ onOpenDrawer }: Props) { + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx new file mode 100644 index 0000000000..ef63e1aff4 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/SessionsList.tsx @@ -0,0 +1,80 @@ +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Text } from "@/components/atoms/Text/Text"; +import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList"; +import { cn } from "@/lib/utils"; +import { getSessionTitle } from "../../helpers"; + +interface Props { + sessions: SessionSummaryResponse[]; + currentSessionId: string | null; + isLoading: boolean; + hasNextPage: boolean; + isFetchingNextPage: boolean; + onSelectSession: (sessionId: string) => void; + onFetchNextPage: () => void; +} + +export function SessionsList({ + sessions, + currentSessionId, + isLoading, + hasNextPage, + isFetchingNextPage, + onSelectSession, + onFetchNextPage, +}: Props) { + if (isLoading) { + return ( +
+ {Array.from({ length: 5 }).map((_, i) => ( +
+ +
+ ))} +
+ ); + } + + if (sessions.length === 0) { + return ( +
+ + You don't have previous chats + +
+ ); + } + + return ( + { + const isActive = session.id === currentSessionId; + return ( + + ); + }} + /> + ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts new file mode 100644 index 0000000000..8833a419c1 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/components/SessionsList/useSessionsPagination.ts @@ -0,0 +1,92 @@ +import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat"; +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { okData } from "@/app/api/helpers"; +import { useEffect, useMemo, useState } from "react"; + +const PAGE_SIZE = 50; + +export interface UseSessionsPaginationArgs { + enabled: boolean; +} + +export function useSessionsPagination({ enabled }: UseSessionsPaginationArgs) { + const [offset, setOffset] = useState(0); + const [accumulatedSessions, setAccumulatedSessions] = useState< + SessionSummaryResponse[] + >([]); + const [totalCount, setTotalCount] = useState(null); + + const { data, isLoading, isFetching, isError } = useGetV2ListSessions( + { limit: PAGE_SIZE, offset }, + { + query: { + enabled: enabled && offset >= 0, + }, + }, + ); + + useEffect(() => { + const responseData = okData(data); + if (responseData) { + const newSessions = responseData.sessions; + const total = responseData.total; + setTotalCount(total); + + if (offset === 0) { + setAccumulatedSessions(newSessions); + } else { + setAccumulatedSessions((prev) => [...prev, ...newSessions]); + } + } else if (!enabled) { + setAccumulatedSessions([]); + setTotalCount(null); + } + }, [data, offset, enabled]); + + const hasNextPage = useMemo(() => { + if (totalCount === null) return false; + return accumulatedSessions.length < totalCount; + }, [accumulatedSessions.length, totalCount]); + + const areAllSessionsLoaded = useMemo(() => { + if (totalCount === null) return false; + return ( + accumulatedSessions.length >= totalCount && !isFetching && !isLoading + ); + }, [accumulatedSessions.length, totalCount, isFetching, isLoading]); + + useEffect(() => { + if ( + hasNextPage && + !isFetching && + !isLoading && + !isError && + totalCount !== null + ) { + setOffset((prev) => prev + PAGE_SIZE); + } + }, [hasNextPage, isFetching, isLoading, isError, totalCount]); + + function fetchNextPage() { + if (hasNextPage && !isFetching) { + setOffset((prev) => prev + PAGE_SIZE); + } + } + + function reset() { + setOffset(0); + setAccumulatedSessions([]); + setTotalCount(null); + } + + return { + sessions: accumulatedSessions, + isLoading, + isFetching, + hasNextPage, + areAllSessionsLoaded, + totalCount, + fetchNextPage, + reset, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts new file mode 100644 index 0000000000..bf4eb70ccb --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/helpers.ts @@ -0,0 +1,165 @@ +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; +import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse"; +import { format, formatDistanceToNow, isToday } from "date-fns"; + +export function convertSessionDetailToSummary( + session: SessionDetailResponse, +): SessionSummaryResponse { + return { + id: session.id, + created_at: session.created_at, + updated_at: session.updated_at, + title: undefined, + }; +} + +export function filterVisibleSessions( + sessions: SessionSummaryResponse[], +): SessionSummaryResponse[] { + return sessions.filter( + (session) => session.updated_at !== session.created_at, + ); +} + +export function getSessionTitle(session: SessionSummaryResponse): string { + if (session.title) return session.title; + const isNewSession = session.updated_at === session.created_at; + if (isNewSession) { + const createdDate = new Date(session.created_at); + if (isToday(createdDate)) { + return "Today"; + } + return format(createdDate, "MMM d, yyyy"); + } + return "Untitled Chat"; +} + +export function getSessionUpdatedLabel( + session: SessionSummaryResponse, +): string { + if (!session.updated_at) return ""; + return formatDistanceToNow(new Date(session.updated_at), { addSuffix: true }); +} + +export function mergeCurrentSessionIntoList( + accumulatedSessions: SessionSummaryResponse[], + currentSessionId: string | null, + currentSessionData: SessionDetailResponse | null | undefined, +): SessionSummaryResponse[] { + const filteredSessions: SessionSummaryResponse[] = []; + + if (accumulatedSessions.length > 0) { + const visibleSessions = filterVisibleSessions(accumulatedSessions); + + if (currentSessionId) { + const currentInAll = accumulatedSessions.find( + (s) => s.id === currentSessionId, + ); + if (currentInAll) { + const isInVisible = visibleSessions.some( + (s) => s.id === currentSessionId, + ); + if (!isInVisible) { + filteredSessions.push(currentInAll); + } + } + } + + filteredSessions.push(...visibleSessions); + } + + if (currentSessionId && currentSessionData) { + const isCurrentInList = filteredSessions.some( + (s) => s.id === currentSessionId, + ); + if (!isCurrentInList) { + const summarySession = convertSessionDetailToSummary(currentSessionData); + filteredSessions.unshift(summarySession); + } + } + + return filteredSessions; +} + +export function getCurrentSessionId( + searchParams: URLSearchParams, +): string | null { + return searchParams.get("sessionId"); +} + +export function shouldAutoSelectSession( + areAllSessionsLoaded: boolean, + hasAutoSelectedSession: boolean, + paramSessionId: string | null, + visibleSessions: SessionSummaryResponse[], + accumulatedSessions: SessionSummaryResponse[], + isLoading: boolean, + totalCount: number | null, +): { + shouldSelect: boolean; + sessionIdToSelect: string | null; + shouldCreate: boolean; +} { + if (!areAllSessionsLoaded || hasAutoSelectedSession) { + return { + shouldSelect: false, + sessionIdToSelect: null, + shouldCreate: false, + }; + } + + if (paramSessionId) { + return { + shouldSelect: false, + sessionIdToSelect: null, + shouldCreate: false, + }; + } + + if (visibleSessions.length > 0) { + return { + shouldSelect: true, + sessionIdToSelect: visibleSessions[0].id, + shouldCreate: false, + }; + } + + if (accumulatedSessions.length === 0 && !isLoading && totalCount === 0) { + return { shouldSelect: false, sessionIdToSelect: null, shouldCreate: true }; + } + + if (totalCount === 0) { + return { + shouldSelect: false, + sessionIdToSelect: null, + shouldCreate: false, + }; + } + + return { shouldSelect: false, sessionIdToSelect: null, shouldCreate: false }; +} + +export function checkReadyToShowContent( + areAllSessionsLoaded: boolean, + paramSessionId: string | null, + accumulatedSessions: SessionSummaryResponse[], + isCurrentSessionLoading: boolean, + currentSessionData: SessionDetailResponse | null | undefined, + hasAutoSelectedSession: boolean, +): boolean { + if (!areAllSessionsLoaded) return false; + + if (paramSessionId) { + const sessionFound = accumulatedSessions.some( + (s) => s.id === paramSessionId, + ); + return ( + sessionFound || + (!isCurrentSessionLoading && + currentSessionData !== undefined && + currentSessionData !== null) + ); + } + + return hasAutoSelectedSession; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts new file mode 100644 index 0000000000..6003c64b73 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/components/CopilotShell/useCopilotShell.ts @@ -0,0 +1,170 @@ +"use client"; + +import { + getGetV2ListSessionsQueryKey, + useGetV2GetSession, +} from "@/app/api/__generated__/endpoints/chat/chat"; +import { okData } from "@/app/api/helpers"; +import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { useQueryClient } from "@tanstack/react-query"; +import { usePathname, useRouter, useSearchParams } from "next/navigation"; +import { useEffect, useRef, useState } from "react"; +import { useMobileDrawer } from "./components/MobileDrawer/useMobileDrawer"; +import { useSessionsPagination } from "./components/SessionsList/useSessionsPagination"; +import { + checkReadyToShowContent, + filterVisibleSessions, + getCurrentSessionId, + mergeCurrentSessionIntoList, +} from "./helpers"; + +export function useCopilotShell() { + const router = useRouter(); + const pathname = usePathname(); + const searchParams = useSearchParams(); + const queryClient = useQueryClient(); + const breakpoint = useBreakpoint(); + const { isLoggedIn } = useSupabase(); + const isMobile = + breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; + + const isOnHomepage = pathname === "/copilot"; + const paramSessionId = searchParams.get("sessionId"); + + const { + isDrawerOpen, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + } = useMobileDrawer(); + + const paginationEnabled = !isMobile || isDrawerOpen || !!paramSessionId; + + const { + sessions: accumulatedSessions, + isLoading: isSessionsLoading, + isFetching: isSessionsFetching, + hasNextPage, + areAllSessionsLoaded, + fetchNextPage, + reset: resetPagination, + } = useSessionsPagination({ + enabled: paginationEnabled, + }); + + const currentSessionId = getCurrentSessionId(searchParams); + + const { data: currentSessionData, isLoading: isCurrentSessionLoading } = + useGetV2GetSession(currentSessionId || "", { + query: { + enabled: !!currentSessionId, + select: okData, + }, + }); + + const [hasAutoSelectedSession, setHasAutoSelectedSession] = useState(false); + const hasAutoSelectedRef = useRef(false); + + // Mark as auto-selected when sessionId is in URL + useEffect(() => { + if (paramSessionId && !hasAutoSelectedRef.current) { + hasAutoSelectedRef.current = true; + setHasAutoSelectedSession(true); + } + }, [paramSessionId]); + + // On homepage without sessionId, mark as ready immediately + useEffect(() => { + if (isOnHomepage && !paramSessionId && !hasAutoSelectedRef.current) { + hasAutoSelectedRef.current = true; + setHasAutoSelectedSession(true); + } + }, [isOnHomepage, paramSessionId]); + + // Invalidate sessions list when navigating to homepage (to show newly created sessions) + useEffect(() => { + if (isOnHomepage && !paramSessionId) { + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + } + }, [isOnHomepage, paramSessionId, queryClient]); + + // Reset pagination when query becomes disabled + const prevPaginationEnabledRef = useRef(paginationEnabled); + useEffect(() => { + if (prevPaginationEnabledRef.current && !paginationEnabled) { + resetPagination(); + resetAutoSelect(); + } + prevPaginationEnabledRef.current = paginationEnabled; + }, [paginationEnabled, resetPagination]); + + const sessions = mergeCurrentSessionIntoList( + accumulatedSessions, + currentSessionId, + currentSessionData, + ); + + const visibleSessions = filterVisibleSessions(sessions); + + const sidebarSelectedSessionId = + isOnHomepage && !paramSessionId ? null : currentSessionId; + + const isReadyToShowContent = isOnHomepage + ? true + : checkReadyToShowContent( + areAllSessionsLoaded, + paramSessionId, + accumulatedSessions, + isCurrentSessionLoading, + currentSessionData, + hasAutoSelectedSession, + ); + + function handleSelectSession(sessionId: string) { + // Navigate using replaceState to avoid full page reload + window.history.replaceState(null, "", `/copilot?sessionId=${sessionId}`); + // Force a re-render by updating the URL through router + router.replace(`/copilot?sessionId=${sessionId}`); + if (isMobile) handleCloseDrawer(); + } + + function handleNewChat() { + resetAutoSelect(); + resetPagination(); + // Invalidate and refetch sessions list to ensure newly created sessions appear + queryClient.invalidateQueries({ + queryKey: getGetV2ListSessionsQueryKey(), + }); + window.history.replaceState(null, "", "/copilot"); + router.replace("/copilot"); + if (isMobile) handleCloseDrawer(); + } + + function resetAutoSelect() { + hasAutoSelectedRef.current = false; + setHasAutoSelectedSession(false); + } + + return { + isMobile, + isDrawerOpen, + isLoggedIn, + hasActiveSession: + Boolean(currentSessionId) && (!isOnHomepage || Boolean(paramSessionId)), + isLoading: isSessionsLoading || !areAllSessionsLoaded, + sessions: visibleSessions, + currentSessionId: sidebarSelectedSessionId, + handleSelectSession, + handleOpenDrawer, + handleCloseDrawer, + handleDrawerOpenChange, + handleNewChat, + hasNextPage, + isFetchingNextPage: isSessionsFetching, + fetchNextPage, + isReadyToShowContent, + }; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts new file mode 100644 index 0000000000..692a5741f4 --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/helpers.ts @@ -0,0 +1,33 @@ +import type { User } from "@supabase/supabase-js"; + +export function getGreetingName(user?: User | null): string { + if (!user) return "there"; + const metadata = user.user_metadata as Record | undefined; + const fullName = metadata?.full_name; + const name = metadata?.name; + if (typeof fullName === "string" && fullName.trim()) { + return fullName.split(" ")[0]; + } + if (typeof name === "string" && name.trim()) { + return name.split(" ")[0]; + } + if (user.email) { + return user.email.split("@")[0]; + } + return "there"; +} + +export function buildCopilotChatUrl(prompt: string): string { + const trimmed = prompt.trim(); + if (!trimmed) return "/copilot/chat"; + const encoded = encodeURIComponent(trimmed); + return `/copilot/chat?prompt=${encoded}`; +} + +export function getQuickActions(): string[] { + return [ + "Show me what I can automate", + "Design a custom workflow", + "Help me with content creation", + ]; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx new file mode 100644 index 0000000000..89cf72e2ba --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/layout.tsx @@ -0,0 +1,6 @@ +import type { ReactNode } from "react"; +import { CopilotShell } from "./components/CopilotShell/CopilotShell"; + +export default function CopilotLayout({ children }: { children: ReactNode }) { + return {children}; +} diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx new file mode 100644 index 0000000000..add9504f9b --- /dev/null +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/page.tsx @@ -0,0 +1,228 @@ +"use client"; + +import { postV2CreateSession } from "@/app/api/__generated__/endpoints/chat/chat"; +import { Skeleton } from "@/components/__legacy__/ui/skeleton"; +import { Button } from "@/components/atoms/Button/Button"; +import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner"; +import { Text } from "@/components/atoms/Text/Text"; +import { Chat } from "@/components/contextual/Chat/Chat"; +import { ChatInput } from "@/components/contextual/Chat/components/ChatInput/ChatInput"; +import { getHomepageRoute } from "@/lib/constants"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; +import { + Flag, + type FlagValues, + useGetFlag, +} from "@/services/feature-flags/use-get-flag"; +import { useFlags } from "launchdarkly-react-client-sdk"; +import { useRouter, useSearchParams } from "next/navigation"; +import { useEffect, useMemo, useRef, useState } from "react"; +import { getGreetingName, getQuickActions } from "./helpers"; + +type PageState = + | { type: "welcome" } + | { type: "creating"; prompt: string } + | { type: "chat"; sessionId: string; initialPrompt?: string }; + +export default function CopilotPage() { + const router = useRouter(); + const searchParams = useSearchParams(); + const { user, isLoggedIn, isUserLoading } = useSupabase(); + + const isChatEnabled = useGetFlag(Flag.CHAT); + const flags = useFlags(); + const homepageRoute = getHomepageRoute(isChatEnabled); + const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; + const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; + const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); + const isFlagReady = + !isLaunchDarklyConfigured || flags[Flag.CHAT] !== undefined; + + const [pageState, setPageState] = useState({ type: "welcome" }); + const initialPromptRef = useRef>(new Map()); + + const urlSessionId = searchParams.get("sessionId"); + + // Sync with URL sessionId (preserve initialPrompt from ref) + useEffect( + function syncSessionFromUrl() { + if (urlSessionId) { + // If we're already in chat state with this sessionId, don't overwrite + if (pageState.type === "chat" && pageState.sessionId === urlSessionId) { + return; + } + // Get initialPrompt from ref or current state + const storedInitialPrompt = initialPromptRef.current.get(urlSessionId); + const currentInitialPrompt = + storedInitialPrompt || + (pageState.type === "creating" + ? pageState.prompt + : pageState.type === "chat" + ? pageState.initialPrompt + : undefined); + if (currentInitialPrompt) { + initialPromptRef.current.set(urlSessionId, currentInitialPrompt); + } + setPageState({ + type: "chat", + sessionId: urlSessionId, + initialPrompt: currentInitialPrompt, + }); + } else if (pageState.type === "chat") { + setPageState({ type: "welcome" }); + } + }, + [urlSessionId], + ); + + useEffect( + function ensureAccess() { + if (!isFlagReady) return; + if (isChatEnabled === false) { + router.replace(homepageRoute); + } + }, + [homepageRoute, isChatEnabled, isFlagReady, router], + ); + + const greetingName = useMemo( + function getName() { + return getGreetingName(user); + }, + [user], + ); + + const quickActions = useMemo(function getActions() { + return getQuickActions(); + }, []); + + async function startChatWithPrompt(prompt: string) { + if (!prompt?.trim()) return; + if (pageState.type === "creating") return; + + const trimmedPrompt = prompt.trim(); + setPageState({ type: "creating", prompt: trimmedPrompt }); + + try { + // Create session + const sessionResponse = await postV2CreateSession({ + body: JSON.stringify({}), + }); + + if (sessionResponse.status !== 200 || !sessionResponse.data?.id) { + throw new Error("Failed to create session"); + } + + const sessionId = sessionResponse.data.id; + + // Store initialPrompt in ref so it persists across re-renders + initialPromptRef.current.set(sessionId, trimmedPrompt); + + // Update URL and show Chat with initial prompt + // Chat will handle sending the message and streaming + window.history.replaceState(null, "", `/copilot?sessionId=${sessionId}`); + setPageState({ type: "chat", sessionId, initialPrompt: trimmedPrompt }); + } catch (error) { + console.error("[CopilotPage] Failed to start chat:", error); + setPageState({ type: "welcome" }); + } + } + + function handleQuickAction(action: string) { + startChatWithPrompt(action); + } + + function handleSessionNotFound() { + router.replace("/copilot"); + } + + if (!isFlagReady || isChatEnabled === false || !isLoggedIn) { + return null; + } + + // Show Chat when we have an active session + if (pageState.type === "chat") { + return ( +
+ +
+ ); + } + + // Show loading state while creating session and sending first message + if (pageState.type === "creating") { + return ( +
+ + + Starting your chat... + +
+ ); + } + + // Show Welcome screen + const isLoading = isUserLoading; + + return ( +
+
+ {isLoading ? ( +
+ + +
+ +
+
+ {Array.from({ length: 4 }).map((_, i) => ( + + ))} +
+
+ ) : ( + <> +
+ + Hey, {greetingName} + + + What do you want to automate? + + +
+ +
+
+
+ {quickActions.map((action) => ( + + ))} +
+ + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/error/page.tsx b/autogpt_platform/frontend/src/app/(platform)/error/page.tsx index b7858787cf..b26ca4559b 100644 --- a/autogpt_platform/frontend/src/app/(platform)/error/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/error/page.tsx @@ -1,6 +1,8 @@ "use client"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { getHomepageRoute } from "@/lib/constants"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { useSearchParams } from "next/navigation"; import { Suspense } from "react"; import { getErrorDetails } from "./helpers"; @@ -9,6 +11,8 @@ function ErrorPageContent() { const searchParams = useSearchParams(); const errorMessage = searchParams.get("message"); const errorDetails = getErrorDetails(errorMessage); + const isChatEnabled = useGetFlag(Flag.CHAT); + const homepageRoute = getHomepageRoute(isChatEnabled); function handleRetry() { // Auth-related errors should redirect to login @@ -25,8 +29,8 @@ function ErrorPageContent() { window.location.reload(); }, 2000); } else { - // For server/network errors, go to marketplace - window.location.href = "/marketplace"; + // For server/network errors, go to home + window.location.href = homepageRoute; } } diff --git a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx index cd0c666be6..d5ba9142ee 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentModal/RunAgentModal.tsx @@ -180,7 +180,7 @@ export function RunAgentModal({ {/* Content */} {hasAnySetupFields ? ( -
+
{agent.description} @@ -40,6 +40,8 @@ export function ModalHeader({ agent }: ModalHeaderProps) { Tip +
+ For best results, run this agent{" "} {humanizeCronExpression( @@ -50,7 +52,7 @@ export function ModalHeader({ agent }: ModalHeaderProps) { ) : null} {agent.instructions ? ( -
+
Instructions diff --git a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts index 4232847226..87e9e9e9bc 100644 --- a/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts +++ b/autogpt_platform/frontend/src/app/(platform)/library/components/LibraryAgentCard/useLibraryAgentCard.ts @@ -8,6 +8,8 @@ import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent"; import { okData } from "@/app/api/helpers"; import { useToast } from "@/components/molecules/Toast/use-toast"; +import { isLogoutInProgress } from "@/lib/autogpt-server-api/helpers"; +import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { updateFavoriteInQueries } from "./helpers"; interface Props { @@ -23,10 +25,14 @@ export function useLibraryAgentCard({ agent }: Props) { const { toast } = useToast(); const queryClient = getQueryClient(); const { mutateAsync: updateLibraryAgent } = usePatchV2UpdateLibraryAgent(); + const { user, isLoggedIn } = useSupabase(); + const logoutInProgress = isLogoutInProgress(); const { data: profile } = useGetV2GetUserProfile({ query: { select: okData, + enabled: isLoggedIn && !!user && !logoutInProgress, + queryKey: ["/api/store/profile", user?.id], }, }); diff --git a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts index 656e1febc2..9bde570548 100644 --- a/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/login/useLoginPage.ts @@ -1,6 +1,8 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; +import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { environment } from "@/services/environment"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { loginFormSchema, LoginProvider } from "@/types/auth"; import { zodResolver } from "@hookform/resolvers/zod"; import { useRouter, useSearchParams } from "next/navigation"; @@ -20,15 +22,17 @@ export function useLoginPage() { const [isGoogleLoading, setIsGoogleLoading] = useState(false); const [showNotAllowedModal, setShowNotAllowedModal] = useState(false); const isCloudEnv = environment.isCloud(); + const isChatEnabled = useGetFlag(Flag.CHAT); + const homepageRoute = getHomepageRoute(isChatEnabled); // Get redirect destination from 'next' query parameter const nextUrl = searchParams.get("next"); useEffect(() => { if (isLoggedIn && !isLoggingIn) { - router.push(nextUrl || "/marketplace"); + router.push(nextUrl || homepageRoute); } - }, [isLoggedIn, isLoggingIn, nextUrl, router]); + }, [homepageRoute, isLoggedIn, isLoggingIn, nextUrl, router]); const form = useForm>({ resolver: zodResolver(loginFormSchema), @@ -98,7 +102,7 @@ export function useLoginPage() { } else if (result.onboarding) { router.replace("/onboarding"); } else { - router.replace("/marketplace"); + router.replace(homepageRoute); } } catch (error) { toast({ diff --git a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx index 260fbc0b52..979b113f55 100644 --- a/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx +++ b/autogpt_platform/frontend/src/app/(platform)/profile/(user)/page.tsx @@ -3,12 +3,14 @@ import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/store"; import { ProfileInfoForm } from "@/components/__legacy__/ProfileInfoForm"; import { ErrorCard } from "@/components/molecules/ErrorCard/ErrorCard"; +import { isLogoutInProgress } from "@/lib/autogpt-server-api/helpers"; import { ProfileDetails } from "@/lib/autogpt-server-api/types"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { ProfileLoading } from "./ProfileLoading"; export default function UserProfilePage() { const { user } = useSupabase(); + const logoutInProgress = isLogoutInProgress(); const { data: profile, @@ -18,7 +20,7 @@ export default function UserProfilePage() { refetch, } = useGetV2GetUserProfile({ query: { - enabled: !!user, + enabled: !!user && !logoutInProgress, select: (res) => { if (res.status === 200) { return { diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts index 68f7ae10ec..6d68782e7a 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/actions.ts @@ -1,5 +1,6 @@ "use server"; +import { getHomepageRoute } from "@/lib/constants"; import { getServerSupabase } from "@/lib/supabase/server/getServerSupabase"; import { signupFormSchema } from "@/types/auth"; import * as Sentry from "@sentry/nextjs"; @@ -11,6 +12,7 @@ export async function signup( password: string, confirmPassword: string, agreeToTerms: boolean, + isChatEnabled: boolean, ) { try { const parsed = signupFormSchema.safeParse({ @@ -58,7 +60,9 @@ export async function signup( } const isOnboardingEnabled = await shouldShowOnboarding(); - const next = isOnboardingEnabled ? "/onboarding" : "/"; + const next = isOnboardingEnabled + ? "/onboarding" + : getHomepageRoute(isChatEnabled); return { success: true, next }; } catch (err) { diff --git a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts index e6d7c68aef..5bd53ca846 100644 --- a/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts +++ b/autogpt_platform/frontend/src/app/(platform)/signup/useSignupPage.ts @@ -1,6 +1,8 @@ import { useToast } from "@/components/molecules/Toast/use-toast"; +import { getHomepageRoute } from "@/lib/constants"; import { useSupabase } from "@/lib/supabase/hooks/useSupabase"; import { environment } from "@/services/environment"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; import { LoginProvider, signupFormSchema } from "@/types/auth"; import { zodResolver } from "@hookform/resolvers/zod"; import { useRouter, useSearchParams } from "next/navigation"; @@ -20,15 +22,17 @@ export function useSignupPage() { const [isGoogleLoading, setIsGoogleLoading] = useState(false); const [showNotAllowedModal, setShowNotAllowedModal] = useState(false); const isCloudEnv = environment.isCloud(); + const isChatEnabled = useGetFlag(Flag.CHAT); + const homepageRoute = getHomepageRoute(isChatEnabled); // Get redirect destination from 'next' query parameter const nextUrl = searchParams.get("next"); useEffect(() => { if (isLoggedIn && !isSigningUp) { - router.push(nextUrl || "/marketplace"); + router.push(nextUrl || homepageRoute); } - }, [isLoggedIn, isSigningUp, nextUrl, router]); + }, [homepageRoute, isLoggedIn, isSigningUp, nextUrl, router]); const form = useForm>({ resolver: zodResolver(signupFormSchema), @@ -104,6 +108,7 @@ export function useSignupPage() { data.password, data.confirmPassword, data.agreeToTerms, + isChatEnabled === true, ); setIsLoading(false); @@ -129,7 +134,7 @@ export function useSignupPage() { } // Prefer the URL's next parameter, then result.next (for onboarding), then default - const redirectTo = nextUrl || result.next || "/"; + const redirectTo = nextUrl || result.next || homepageRoute; router.replace(redirectTo); } catch (error) { setIsLoading(false); diff --git a/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts b/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts index 4578ac03fe..3c9eda7785 100644 --- a/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts +++ b/autogpt_platform/frontend/src/app/api/mutators/custom-mutator.ts @@ -4,12 +4,12 @@ import { getServerAuthToken, } from "@/lib/autogpt-server-api/helpers"; -import { transformDates } from "./date-transformer"; -import { environment } from "@/services/environment"; import { IMPERSONATION_HEADER_NAME, IMPERSONATION_STORAGE_KEY, } from "@/lib/constants"; +import { environment } from "@/services/environment"; +import { transformDates } from "./date-transformer"; const FRONTEND_BASE_URL = process.env.NEXT_PUBLIC_FRONTEND_BASE_URL || "http://localhost:3000"; diff --git a/autogpt_platform/frontend/src/app/api/openapi.json b/autogpt_platform/frontend/src/app/api/openapi.json index 5cd60fcb35..579bc3e454 100644 --- a/autogpt_platform/frontend/src/app/api/openapi.json +++ b/autogpt_platform/frontend/src/app/api/openapi.json @@ -1022,7 +1022,7 @@ "get": { "tags": ["v2", "chat", "chat"], "summary": "Get Session", - "description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.", + "description": "Retrieve the details of a specific chat session.\n\nLooks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.\n\nArgs:\n session_id: The unique identifier for the desired chat session.\n user_id: The optional authenticated user ID, or None for anonymous access.\n\nReturns:\n SessionDetailResponse: Details for the requested session, or None if not found.", "operationId": "getV2GetSession", "security": [{ "HTTPBearerJWT": [] }], "parameters": [ diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css index 0625c26082..1f782f753b 100644 --- a/autogpt_platform/frontend/src/app/globals.css +++ b/autogpt_platform/frontend/src/app/globals.css @@ -141,52 +141,6 @@ } } -@keyframes shimmer { - 0% { - background-position: -200% 0; - } - 100% { - background-position: 200% 0; - } -} - -@keyframes l3 { - 25% { - background-position: - 0 0, - 100% 100%, - 100% calc(100% - 5px); - } - 50% { - background-position: - 0 100%, - 100% 100%, - 0 calc(100% - 5px); - } - 75% { - background-position: - 0 100%, - 100% 0, - 100% 5px; - } -} - -.loader { - width: 80px; - height: 70px; - border: 5px solid rgb(241 245 249); - padding: 0 8px; - box-sizing: border-box; - background: - linear-gradient(rgb(15 23 42) 0 0) 0 0/8px 20px, - linear-gradient(rgb(15 23 42) 0 0) 100% 0/8px 20px, - radial-gradient(farthest-side, rgb(15 23 42) 90%, #0000) 0 5px/8px 8px - content-box, - transparent; - background-repeat: no-repeat; - animation: l3 2s infinite linear; -} - input[type="number"]::-webkit-outer-spin-button, input[type="number"]::-webkit-inner-spin-button { -webkit-appearance: none; diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx index b499a40d71..dbfab49469 100644 --- a/autogpt_platform/frontend/src/app/page.tsx +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -1,5 +1,27 @@ -import { redirect } from "next/navigation"; +"use client"; + +import { getHomepageRoute } from "@/lib/constants"; +import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag"; +import { useRouter } from "next/navigation"; +import { useEffect } from "react"; export default function Page() { - redirect("/marketplace"); + const isChatEnabled = useGetFlag(Flag.CHAT); + const router = useRouter(); + const homepageRoute = getHomepageRoute(isChatEnabled); + const envEnabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; + const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; + const isLaunchDarklyConfigured = envEnabled && Boolean(clientId); + const isFlagReady = + !isLaunchDarklyConfigured || typeof isChatEnabled === "boolean"; + + useEffect( + function redirectToHomepage() { + if (!isFlagReady) return; + router.replace(homepageRoute); + }, + [homepageRoute, isFlagReady, router], + ); + + return null; } diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx new file mode 100644 index 0000000000..0f99246088 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/Chat.tsx @@ -0,0 +1,81 @@ +"use client"; + +import { Text } from "@/components/atoms/Text/Text"; +import { cn } from "@/lib/utils"; +import { useEffect, useRef } from "react"; +import { ChatContainer } from "./components/ChatContainer/ChatContainer"; +import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState"; +import { ChatLoader } from "./components/ChatLoader/ChatLoader"; +import { useChat } from "./useChat"; + +export interface ChatProps { + className?: string; + urlSessionId?: string | null; + initialPrompt?: string; + onSessionNotFound?: () => void; +} + +export function Chat({ + className, + urlSessionId, + initialPrompt, + onSessionNotFound, +}: ChatProps) { + const hasHandledNotFoundRef = useRef(false); + const { + messages, + isLoading, + isCreating, + error, + isSessionNotFound, + sessionId, + createSession, + showLoader, + } = useChat({ urlSessionId }); + + useEffect( + function handleMissingSession() { + if (!onSessionNotFound) return; + if (!urlSessionId) return; + if (!isSessionNotFound || isLoading || isCreating) return; + if (hasHandledNotFoundRef.current) return; + hasHandledNotFoundRef.current = true; + onSessionNotFound(); + }, + [onSessionNotFound, urlSessionId, isSessionNotFound, isLoading, isCreating], + ); + + return ( +
+ {/* Main Content */} +
+ {/* Loading State */} + {showLoader && (isLoading || isCreating) && ( +
+
+ + + Loading your chats... + +
+
+ )} + + {/* Error State */} + {error && !isLoading && ( + + )} + + {/* Session Content */} + {sessionId && !isLoading && !error && ( + + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx new file mode 100644 index 0000000000..f5d56fcb15 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/AIChatBubble/AIChatBubble.tsx @@ -0,0 +1,15 @@ +import { cn } from "@/lib/utils"; +import { ReactNode } from "react"; + +export interface AIChatBubbleProps { + children: ReactNode; + className?: string; +} + +export function AIChatBubble({ children, className }: AIChatBubbleProps) { + return ( +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentCarouselMessage/AgentCarouselMessage.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/AgentInputsSetup.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AgentInputsSetup/useAgentInputsSetup.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx similarity index 99% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx index 33f02e660f..b2cf92ec56 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/AuthPromptWidget/AuthPromptWidget.tsx @@ -21,7 +21,7 @@ export function AuthPromptWidget({ message, sessionId, agentInfo, - returnUrl = "/chat", + returnUrl = "/copilot/chat", className, }: AuthPromptWidgetProps) { const router = useRouter(); diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx new file mode 100644 index 0000000000..b86f1c922a --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/ChatContainer.tsx @@ -0,0 +1,106 @@ +import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; +import { Button } from "@/components/atoms/Button/Button"; +import { Text } from "@/components/atoms/Text/Text"; +import { Dialog } from "@/components/molecules/Dialog/Dialog"; +import { useBreakpoint } from "@/lib/hooks/useBreakpoint"; +import { cn } from "@/lib/utils"; +import { ChatInput } from "../ChatInput/ChatInput"; +import { MessageList } from "../MessageList/MessageList"; +import { useChatContainer } from "./useChatContainer"; + +export interface ChatContainerProps { + sessionId: string | null; + initialMessages: SessionDetailResponse["messages"]; + initialPrompt?: string; + className?: string; +} + +export function ChatContainer({ + sessionId, + initialMessages, + initialPrompt, + className, +}: ChatContainerProps) { + const { + messages, + streamingChunks, + isStreaming, + stopStreaming, + isRegionBlockedModalOpen, + sendMessageWithContext, + handleRegionModalOpenChange, + handleRegionModalClose, + } = useChatContainer({ + sessionId, + initialMessages, + initialPrompt, + }); + + const breakpoint = useBreakpoint(); + const isMobile = + breakpoint === "base" || breakpoint === "sm" || breakpoint === "md"; + + return ( +
+ + +
+ + This model is not available in your region. Please connect via VPN + and try again. + +
+ +
+
+
+
+ {/* Messages - Scrollable */} +
+
+ +
+
+ + {/* Input - Fixed at bottom */} +
+
+ +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts similarity index 55% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts index 844f126d49..791cf046d5 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/createStreamEventDispatcher.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/createStreamEventDispatcher.ts @@ -1,6 +1,6 @@ import { toast } from "sonner"; import { StreamChunk } from "../../useChatStream"; -import type { HandlerDependencies } from "./useChatContainer.handlers"; +import type { HandlerDependencies } from "./handlers"; import { handleError, handleLoginNeeded, @@ -9,12 +9,30 @@ import { handleTextEnded, handleToolCallStart, handleToolResponse, -} from "./useChatContainer.handlers"; + isRegionBlockedError, +} from "./handlers"; export function createStreamEventDispatcher( deps: HandlerDependencies, ): (chunk: StreamChunk) => void { return function dispatchStreamEvent(chunk: StreamChunk): void { + if ( + chunk.type === "text_chunk" || + chunk.type === "tool_call_start" || + chunk.type === "tool_response" || + chunk.type === "login_needed" || + chunk.type === "need_login" || + chunk.type === "error" + ) { + if (!deps.hasResponseRef.current) { + console.info("[ChatStream] First response chunk:", { + type: chunk.type, + sessionId: deps.sessionId, + }); + } + deps.hasResponseRef.current = true; + } + switch (chunk.type) { case "text_chunk": handleTextChunk(chunk, deps); @@ -38,15 +56,23 @@ export function createStreamEventDispatcher( break; case "stream_end": + console.info("[ChatStream] Stream ended:", { + sessionId: deps.sessionId, + hasResponse: deps.hasResponseRef.current, + chunkCount: deps.streamingChunksRef.current.length, + }); handleStreamEnd(chunk, deps); break; case "error": + const isRegionBlocked = isRegionBlockedError(chunk); handleError(chunk, deps); // Show toast at dispatcher level to avoid circular dependencies - toast.error("Chat Error", { - description: chunk.message || chunk.content || "An error occurred", - }); + if (!isRegionBlocked) { + toast.error("Chat Error", { + description: chunk.message || chunk.content || "An error occurred", + }); + } break; case "usage": diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts similarity index 66% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts index 064b847064..96198a0386 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.handlers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/handlers.ts @@ -7,15 +7,30 @@ import { parseToolResponse, } from "./helpers"; +function isToolCallMessage( + message: ChatMessageData, +): message is Extract { + return message.type === "tool_call"; +} + export interface HandlerDependencies { setHasTextChunks: Dispatch>; setStreamingChunks: Dispatch>; streamingChunksRef: MutableRefObject; + hasResponseRef: MutableRefObject; setMessages: Dispatch>; setIsStreamingInitiated: Dispatch>; + setIsRegionBlockedModalOpen: Dispatch>; sessionId: string; } +export function isRegionBlockedError(chunk: StreamChunk): boolean { + if (chunk.code === "MODEL_NOT_AVAILABLE_REGION") return true; + const message = chunk.message || chunk.content; + if (typeof message !== "string") return false; + return message.toLowerCase().includes("not available in your region"); +} + export function handleTextChunk(chunk: StreamChunk, deps: HandlerDependencies) { if (!chunk.content) return; deps.setHasTextChunks(true); @@ -30,16 +45,17 @@ export function handleTextEnded( _chunk: StreamChunk, deps: HandlerDependencies, ) { - console.log("[Text Ended] Saving streamed text as assistant message"); const completedText = deps.streamingChunksRef.current.join(""); if (completedText.trim()) { - const assistantMessage: ChatMessageData = { - type: "message", - role: "assistant", - content: completedText, - timestamp: new Date(), - }; - deps.setMessages((prev) => [...prev, assistantMessage]); + deps.setMessages((prev) => { + const assistantMessage: ChatMessageData = { + type: "message", + role: "assistant", + content: completedText, + timestamp: new Date(), + }; + return [...prev, assistantMessage]; + }); } deps.setStreamingChunks([]); deps.streamingChunksRef.current = []; @@ -50,30 +66,45 @@ export function handleToolCallStart( chunk: StreamChunk, deps: HandlerDependencies, ) { - const toolCallMessage: ChatMessageData = { + const toolCallMessage: Extract = { type: "tool_call", toolId: chunk.tool_id || `tool-${Date.now()}-${chunk.idx || 0}`, - toolName: chunk.tool_name || "Executing...", + toolName: chunk.tool_name || "Executing", arguments: chunk.arguments || {}, timestamp: new Date(), }; - deps.setMessages((prev) => [...prev, toolCallMessage]); - console.log("[Tool Call Start]", { - toolId: toolCallMessage.toolId, - toolName: toolCallMessage.toolName, - timestamp: new Date().toISOString(), - }); + + function updateToolCallMessages(prev: ChatMessageData[]) { + const existingIndex = prev.findIndex(function findToolCallIndex(msg) { + return isToolCallMessage(msg) && msg.toolId === toolCallMessage.toolId; + }); + if (existingIndex === -1) { + return [...prev, toolCallMessage]; + } + const nextMessages = [...prev]; + const existing = nextMessages[existingIndex]; + if (!isToolCallMessage(existing)) return prev; + const nextArguments = + toolCallMessage.arguments && + Object.keys(toolCallMessage.arguments).length > 0 + ? toolCallMessage.arguments + : existing.arguments; + nextMessages[existingIndex] = { + ...existing, + toolName: toolCallMessage.toolName || existing.toolName, + arguments: nextArguments, + timestamp: toolCallMessage.timestamp, + }; + return nextMessages; + } + + deps.setMessages(updateToolCallMessages); } export function handleToolResponse( chunk: StreamChunk, deps: HandlerDependencies, ) { - console.log("[Tool Response] Received:", { - toolId: chunk.tool_id, - toolName: chunk.tool_name, - timestamp: new Date().toISOString(), - }); let toolName = chunk.tool_name || "unknown"; if (!chunk.tool_name || chunk.tool_name === "unknown") { deps.setMessages((prev) => { @@ -127,22 +158,15 @@ export function handleToolResponse( const toolCallIndex = prev.findIndex( (msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id, ); + const hasResponse = prev.some( + (msg) => msg.type === "tool_response" && msg.toolId === chunk.tool_id, + ); + if (hasResponse) return prev; if (toolCallIndex !== -1) { const newMessages = [...prev]; - newMessages[toolCallIndex] = responseMessage; - console.log( - "[Tool Response] Replaced tool_call with matching tool_id:", - chunk.tool_id, - "at index:", - toolCallIndex, - ); + newMessages.splice(toolCallIndex + 1, 0, responseMessage); return newMessages; } - console.warn( - "[Tool Response] No tool_call found with tool_id:", - chunk.tool_id, - "appending instead", - ); return [...prev, responseMessage]; }); } @@ -167,55 +191,38 @@ export function handleStreamEnd( deps: HandlerDependencies, ) { const completedContent = deps.streamingChunksRef.current.join(""); - // Only save message if there are uncommitted chunks - // (text_ended already saved if there were tool calls) + if (!completedContent.trim() && !deps.hasResponseRef.current) { + deps.setMessages((prev) => [ + ...prev, + { + type: "message", + role: "assistant", + content: "No response received. Please try again.", + timestamp: new Date(), + }, + ]); + } if (completedContent.trim()) { - console.log( - "[Stream End] Saving remaining streamed text as assistant message", - ); const assistantMessage: ChatMessageData = { type: "message", role: "assistant", content: completedContent, timestamp: new Date(), }; - deps.setMessages((prev) => { - const updated = [...prev, assistantMessage]; - console.log("[Stream End] Final state:", { - localMessages: updated.map((m) => ({ - type: m.type, - ...(m.type === "message" && { - role: m.role, - contentLength: m.content.length, - }), - ...(m.type === "tool_call" && { - toolId: m.toolId, - toolName: m.toolName, - }), - ...(m.type === "tool_response" && { - toolId: m.toolId, - toolName: m.toolName, - success: m.success, - }), - })), - streamingChunks: deps.streamingChunksRef.current, - timestamp: new Date().toISOString(), - }); - return updated; - }); - } else { - console.log("[Stream End] No uncommitted chunks, message already saved"); + deps.setMessages((prev) => [...prev, assistantMessage]); } deps.setStreamingChunks([]); deps.streamingChunksRef.current = []; deps.setHasTextChunks(false); deps.setIsStreamingInitiated(false); - console.log("[Stream End] Stream complete, messages in local state"); } export function handleError(chunk: StreamChunk, deps: HandlerDependencies) { const errorMessage = chunk.message || chunk.content || "An error occurred"; console.error("Stream error:", errorMessage); + if (isRegionBlockedError(chunk)) { + deps.setIsRegionBlockedModalOpen(true); + } deps.setIsStreamingInitiated(false); deps.setHasTextChunks(false); deps.setStreamingChunks([]); diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts similarity index 92% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts index ab7dbd275d..9d51003a93 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/helpers.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/helpers.ts @@ -1,6 +1,33 @@ +import { SessionKey, sessionStorage } from "@/services/storage/session-storage"; import type { ToolResult } from "@/types/chat"; import type { ChatMessageData } from "../ChatMessage/useChatMessage"; +export function hasSentInitialPrompt(sessionId: string): boolean { + try { + const sent = JSON.parse( + sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}", + ); + return sent[sessionId] === true; + } catch { + return false; + } +} + +export function markInitialPromptSent(sessionId: string): void { + try { + const sent = JSON.parse( + sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}", + ); + sent[sessionId] = true; + sessionStorage.set( + SessionKey.CHAT_SENT_INITIAL_PROMPTS, + JSON.stringify(sent), + ); + } catch { + // Ignore storage errors + } +} + export function removePageContext(content: string): string { // Remove "Page URL: ..." pattern at start of line (case insensitive, handles various formats) let cleaned = content.replace(/^\s*Page URL:\s*[^\n\r]*/gim, ""); @@ -207,12 +234,22 @@ export function parseToolResponse( if (responseType === "setup_requirements") { return null; } + if (responseType === "understanding_updated") { + return { + type: "tool_response", + toolId, + toolName, + result: (parsedResult || result) as ToolResult, + success: true, + timestamp: timestamp || new Date(), + }; + } } return { type: "tool_response", toolId, toolName, - result, + result: parsedResult ? (parsedResult as ToolResult) : result, success: true, timestamp: timestamp || new Date(), }; diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts similarity index 65% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts index 8e7dee7718..42dd04670d 100644 --- a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatContainer/useChatContainer.ts +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatContainer/useChatContainer.ts @@ -1,14 +1,17 @@ import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse"; -import { useCallback, useMemo, useRef, useState } from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { toast } from "sonner"; import { useChatStream } from "../../useChatStream"; +import { usePageContext } from "../../usePageContext"; import type { ChatMessageData } from "../ChatMessage/useChatMessage"; import { createStreamEventDispatcher } from "./createStreamEventDispatcher"; import { createUserMessage, filterAuthMessages, + hasSentInitialPrompt, isToolCallArray, isValidMessage, + markInitialPromptSent, parseToolResponse, removePageContext, } from "./helpers"; @@ -16,20 +19,45 @@ import { interface Args { sessionId: string | null; initialMessages: SessionDetailResponse["messages"]; + initialPrompt?: string; } -export function useChatContainer({ sessionId, initialMessages }: Args) { +export function useChatContainer({ + sessionId, + initialMessages, + initialPrompt, +}: Args) { const [messages, setMessages] = useState([]); const [streamingChunks, setStreamingChunks] = useState([]); const [hasTextChunks, setHasTextChunks] = useState(false); const [isStreamingInitiated, setIsStreamingInitiated] = useState(false); + const [isRegionBlockedModalOpen, setIsRegionBlockedModalOpen] = + useState(false); + const hasResponseRef = useRef(false); const streamingChunksRef = useRef([]); - const { error, sendMessage: sendStreamMessage } = useChatStream(); + const previousSessionIdRef = useRef(null); + const { + error, + sendMessage: sendStreamMessage, + stopStreaming, + } = useChatStream(); const isStreaming = isStreamingInitiated || hasTextChunks; + useEffect(() => { + if (sessionId !== previousSessionIdRef.current) { + stopStreaming(previousSessionIdRef.current ?? undefined, true); + previousSessionIdRef.current = sessionId; + setMessages([]); + setStreamingChunks([]); + streamingChunksRef.current = []; + setHasTextChunks(false); + setIsStreamingInitiated(false); + hasResponseRef.current = false; + } + }, [sessionId, stopStreaming]); + const allMessages = useMemo(() => { const processedInitialMessages: ChatMessageData[] = []; - // Map to track tool calls by their ID so we can look up tool names for tool responses const toolCallMap = new Map(); for (const msg of initialMessages) { @@ -45,13 +73,9 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { ? new Date(msg.timestamp as string) : undefined; - // Remove page context from user messages when loading existing sessions if (role === "user") { content = removePageContext(content); - // Skip user messages that become empty after removing page context - if (!content.trim()) { - continue; - } + if (!content.trim()) continue; processedInitialMessages.push({ type: "message", role: "user", @@ -61,19 +85,15 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { continue; } - // Handle assistant messages first (before tool messages) to build tool call map if (role === "assistant") { - // Strip tags from content content = content .replace(/[\s\S]*?<\/thinking>/gi, "") .trim(); - // If assistant has tool calls, create tool_call messages for each if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) { for (const toolCall of toolCalls) { const toolName = toolCall.function.name; const toolId = toolCall.id; - // Store tool name for later lookup toolCallMap.set(toolId, toolName); try { @@ -96,7 +116,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { }); } } - // Only add assistant message if there's content after stripping thinking tags if (content.trim()) { processedInitialMessages.push({ type: "message", @@ -106,7 +125,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { }); } } else if (content.trim()) { - // Assistant message without tool calls, but with content processedInitialMessages.push({ type: "message", role: "assistant", @@ -117,7 +135,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { continue; } - // Handle tool messages - look up tool name from tool call map if (role === "tool") { const toolCallId = (msg.tool_call_id as string) || ""; const toolName = toolCallMap.get(toolCallId) || "unknown"; @@ -133,7 +150,6 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { continue; } - // Handle other message types (system, etc.) if (content.trim()) { processedInitialMessages.push({ type: "message", @@ -154,9 +170,10 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { context?: { url: string; content: string }, ) { if (!sessionId) { - console.error("Cannot send message: no session ID"); + console.error("[useChatContainer] Cannot send message: no session ID"); return; } + setIsRegionBlockedModalOpen(false); if (isUserMessage) { const userMessage = createUserMessage(content); setMessages((prev) => [...filterAuthMessages(prev), userMessage]); @@ -167,14 +184,19 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { streamingChunksRef.current = []; setHasTextChunks(false); setIsStreamingInitiated(true); + hasResponseRef.current = false; + const dispatcher = createStreamEventDispatcher({ setHasTextChunks, setStreamingChunks, streamingChunksRef, + hasResponseRef, setMessages, + setIsRegionBlockedModalOpen, sessionId, setIsStreamingInitiated, }); + try { await sendStreamMessage( sessionId, @@ -184,8 +206,12 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { context, ); } catch (err) { - console.error("Failed to send message:", err); + console.error("[useChatContainer] Failed to send message:", err); setIsStreamingInitiated(false); + + // Don't show error toast for AbortError (expected during cleanup) + if (err instanceof Error && err.name === "AbortError") return; + const errorMessage = err instanceof Error ? err.message : "Failed to send message"; toast.error("Failed to send message", { @@ -196,11 +222,63 @@ export function useChatContainer({ sessionId, initialMessages }: Args) { [sessionId, sendStreamMessage], ); + const handleStopStreaming = useCallback(() => { + stopStreaming(); + setStreamingChunks([]); + streamingChunksRef.current = []; + setHasTextChunks(false); + setIsStreamingInitiated(false); + }, [stopStreaming]); + + const { capturePageContext } = usePageContext(); + + // Send initial prompt if provided (for new sessions from homepage) + useEffect( + function handleInitialPrompt() { + if (!initialPrompt || !sessionId) return; + if (initialMessages.length > 0) return; + if (hasSentInitialPrompt(sessionId)) return; + + markInitialPromptSent(sessionId); + const context = capturePageContext(); + sendMessage(initialPrompt, true, context); + }, + [ + initialPrompt, + sessionId, + initialMessages.length, + sendMessage, + capturePageContext, + ], + ); + + async function sendMessageWithContext( + content: string, + isUserMessage: boolean = true, + ) { + const context = capturePageContext(); + await sendMessage(content, isUserMessage, context); + } + + function handleRegionModalOpenChange(open: boolean) { + setIsRegionBlockedModalOpen(open); + } + + function handleRegionModalClose() { + setIsRegionBlockedModalOpen(false); + } + return { messages: allMessages, streamingChunks, isStreaming, error, + isRegionBlockedModalOpen, + setIsRegionBlockedModalOpen, + sendMessageWithContext, + handleRegionModalOpenChange, + handleRegionModalClose, sendMessage, + stopStreaming: handleStopStreaming, }; } diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/ChatCredentialsSetup.tsx diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatCredentialsSetup/useChatCredentialsSetup.ts diff --git a/autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatErrorState/ChatErrorState.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx similarity index 100% rename from autogpt_platform/frontend/src/app/(platform)/chat/components/Chat/components/ChatErrorState/ChatErrorState.tsx rename to autogpt_platform/frontend/src/components/contextual/Chat/components/ChatErrorState/ChatErrorState.tsx diff --git a/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx new file mode 100644 index 0000000000..8cdecf0bf4 --- /dev/null +++ b/autogpt_platform/frontend/src/components/contextual/Chat/components/ChatInput/ChatInput.tsx @@ -0,0 +1,103 @@ +import { Button } from "@/components/atoms/Button/Button"; +import { cn } from "@/lib/utils"; +import { ArrowUpIcon, StopIcon } from "@phosphor-icons/react"; +import { useChatInput } from "./useChatInput"; + +export interface Props { + onSend: (message: string) => void; + disabled?: boolean; + isStreaming?: boolean; + onStop?: () => void; + placeholder?: string; + className?: string; +} + +export function ChatInput({ + onSend, + disabled = false, + isStreaming = false, + onStop, + placeholder = "Type your message...", + className, +}: Props) { + const inputId = "chat-input"; + const { value, setValue, handleKeyDown, handleSend, hasMultipleLines } = + useChatInput({ + onSend, + disabled: disabled || isStreaming, + maxRows: 4, + inputId, + }); + + function handleSubmit(e: React.FormEvent) { + e.preventDefault(); + handleSend(); + } + + function handleChange(e: React.ChangeEvent) { + setValue(e.target.value); + } + + return ( +
+
+
+