From c3a126e70523cdd0c507ffaf174d6696af8195f0 Mon Sep 17 00:00:00 2001 From: abhi1992002 Date: Fri, 30 Jan 2026 14:57:19 +0530 Subject: [PATCH] feat(chat): implement message ID reuse for tool call continuations - Added `_continuation_message_id` parameter to `stream_chat_completion` to allow reuse of message IDs for tool call follow-ups. - Modified message yielding logic to prevent duplicate messages when reusing IDs. - Ensured that the message start is only yielded for the initial call, improving message handling during continuations. This change enhances the chat completion flow by maintaining message integrity and reducing redundancy in message handling. --- .../backend/backend/api/features/chat/service.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py index 20216162b5..25ae7b8ec3 100644 --- a/autogpt_platform/backend/backend/api/features/chat/service.py +++ b/autogpt_platform/backend/backend/api/features/chat/service.py @@ -330,6 +330,7 @@ async def stream_chat_completion( retry_count: int = 0, session: ChatSession | None = None, context: dict[str, str] | None = None, # {url: str, content: str} + _continuation_message_id: str | None = None, # Internal: reuse message ID for tool call continuations ) -> AsyncGenerator[StreamBaseResponse, None]: """Main entry point for streaming chat completions with database handling. @@ -458,11 +459,15 @@ async def stream_chat_completion( # Generate unique IDs for AI SDK protocol import uuid as uuid_module - message_id = str(uuid_module.uuid4()) + # Reuse message ID for continuations (tool call follow-ups) to avoid duplicate messages + is_continuation = _continuation_message_id is not None + message_id = _continuation_message_id or str(uuid_module.uuid4()) text_block_id = str(uuid_module.uuid4()) - # Yield message start - yield StreamStart(messageId=message_id) + # Only yield message start for the initial call, not for continuations + # This prevents the AI SDK from creating duplicate message objects + if not is_continuation: + yield StreamStart(messageId=message_id) try: async for chunk in _stream_chat_chunks( @@ -690,6 +695,7 @@ async def stream_chat_completion( retry_count=retry_count + 1, session=session, context=context, + _continuation_message_id=message_id, # Reuse message ID since start was already sent ): yield chunk return # Exit after retry to avoid double-saving in finally block @@ -759,6 +765,7 @@ async def stream_chat_completion( session=session, # Pass session object to avoid Redis refetch context=context, tool_call_response=str(tool_response_messages), + _continuation_message_id=message_id, # Reuse message ID to avoid duplicates ): yield chunk