diff --git a/autogpt_platform/backend/backend/copilot/db.py b/autogpt_platform/backend/backend/copilot/db.py index b6e8e73e29..4e939502e1 100644 --- a/autogpt_platform/backend/backend/copilot/db.py +++ b/autogpt_platform/backend/backend/copilot/db.py @@ -163,10 +163,42 @@ async def get_chat_messages_paginated( # very start of the conversation (sequence 0). if boundary_msgs[0].sequence > 0: has_more = True + else: + # Forward mode: DB returned ASC. + # Tool-call tail boundary fix: if the last message in this page is a + # tool message, the NEXT forward page would start after it and begin + # mid-tool-group — the owning assistant message is on this page but + # the following tool results are on the next page. + # Trim the current page so it ends on the owning assistant message, + # which keeps tool groups intact across page boundaries. + if results and results[-1].role == "tool": + # Walk backward through results to find the last non-tool message. + trim_idx = len(results) - 1 + while trim_idx >= 0 and results[trim_idx].role == "tool": + trim_idx -= 1 + + if trim_idx >= 0: + # Trim results so the page ends at the owning assistant. + # Mark has_more=True so the client knows to fetch the rest. + results = results[: trim_idx + 1] + has_more = True + else: + # Entire page is tool messages with no visible owner — log and + # keep as-is so the caller is not stuck with an empty page. + logger.warning( + "Forward tail boundary: entire page is tool messages " + "for session=%s, no owning assistant found (%d msgs)", + session_id, + len(results), + ) messages = [ChatMessage.from_db(m) for m in results] oldest_sequence = messages[0].sequence if messages else None - newest_sequence = messages[-1].sequence if messages else None + # newest_sequence is only meaningful in forward mode; in backward mode it + # points to the last message of the page (not the session's newest message) + # which is not a valid forward cursor. Return None in backward mode so + # clients don't accidentally use it as one. + newest_sequence = messages[-1].sequence if (messages and forward) else None return PaginatedMessages( messages=messages, diff --git a/autogpt_platform/backend/backend/copilot/db_test.py b/autogpt_platform/backend/backend/copilot/db_test.py index 8b1dbf43c5..2c7045e751 100644 --- a/autogpt_platform/backend/backend/copilot/db_test.py +++ b/autogpt_platform/backend/backend/copilot/db_test.py @@ -271,10 +271,10 @@ async def test_after_sequence_returns_messages_in_order( @pytest.mark.asyncio -async def test_newest_sequence_populated_for_backward_mode( +async def test_newest_sequence_none_for_backward_mode( mock_db: tuple[AsyncMock, AsyncMock], ): - """newest_sequence is populated for backward-paginated results.""" + """newest_sequence is None in backward mode — it is not a valid forward cursor.""" find_first, _ = mock_db find_first.return_value = _make_session( messages=[_make_msg(5), _make_msg(4), _make_msg(3)], @@ -283,7 +283,7 @@ async def test_newest_sequence_populated_for_backward_mode( page = await get_chat_messages_paginated(SESSION_ID, limit=50) assert page is not None - assert page.newest_sequence == 5 + assert page.newest_sequence is None assert page.oldest_sequence == 3 @@ -302,6 +302,56 @@ async def test_forward_mode_no_boundary_expansion( assert find_many.call_count == 0 +@pytest.mark.asyncio +async def test_forward_tail_boundary_trims_trailing_tool_messages( + mock_db: tuple[AsyncMock, AsyncMock], +): + """Forward pages that end with tool messages are trimmed to the owning + assistant so the next after_sequence page doesn't start mid-tool-group.""" + find_first, _ = mock_db + # DB returns 4 messages ASC: assistant at 0, tool at 1, tool at 2, tool at 3 + find_first.return_value = _make_session( + messages=[ + _make_msg(0, role="assistant"), + _make_msg(1, role="tool"), + _make_msg(2, role="tool"), + _make_msg(3, role="tool"), + ], + ) + + page = await get_chat_messages_paginated(SESSION_ID, limit=10, from_start=True) + + assert page is not None + # Page should be trimmed to end at the assistant message + assert [m.sequence for m in page.messages] == [0] + assert page.newest_sequence == 0 + # has_more must be True so the client fetches the tool messages on next page + assert page.has_more is True + + +@pytest.mark.asyncio +async def test_forward_tail_boundary_no_trim_when_last_not_tool( + mock_db: tuple[AsyncMock, AsyncMock], +): + """Forward pages that end with a non-tool message are not trimmed.""" + find_first, _ = mock_db + find_first.return_value = _make_session( + messages=[ + _make_msg(0, role="user"), + _make_msg(1, role="assistant"), + _make_msg(2, role="tool"), + _make_msg(3, role="assistant"), + ], + ) + + page = await get_chat_messages_paginated(SESSION_ID, limit=10, from_start=True) + + assert page is not None + assert [m.sequence for m in page.messages] == [0, 1, 2, 3] + assert page.newest_sequence == 3 + assert page.has_more is False + + @pytest.mark.asyncio async def test_user_id_filter_applied_to_session_where( mock_db: tuple[AsyncMock, AsyncMock], diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/__tests__/useLoadMoreMessages.test.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/__tests__/useLoadMoreMessages.test.ts index c8d8f0d074..80ed40ece7 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/__tests__/useLoadMoreMessages.test.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/__tests__/useLoadMoreMessages.test.ts @@ -358,6 +358,60 @@ describe("useLoadMoreMessages", () => { expect(result.current.hasMore).toBe(false); }); + + it("forward truncation keeps first MAX_OLDER_MESSAGES items (not last)", async () => { + // 1990 messages already paged; load 20 more forward — total 2010 > 2000. + // Forward truncation must keep slice(0, 2000), not slice(-2000), + // to preserve the beginning of the conversation. + const forwardNearLimitArgs = { + ...BASE_ARGS, + forwardPaginated: true, + initialNewestSequence: 49, + initialOldestSequence: 0, + initialHasMore: true, + }; + + const { result } = renderHook((props) => useLoadMoreMessages(props), { + initialProps: forwardNearLimitArgs, + }); + + // First load: 1990 messages — advances newestSequence to 2039 + mockGetV2GetSession.mockResolvedValueOnce( + makeSuccessResponse({ + messages: Array.from({ length: 1990 }, (_, i) => ({ + role: "assistant", + content: `msg ${i + 50}`, + sequence: i + 50, + })), + has_more_messages: true, + newest_sequence: 2039, + }), + ); + + await act(async () => { + await result.current.loadMore(); + }); + + // Second load: 20 more messages pushes total to 2010 > 2000 — hasMore=false + mockGetV2GetSession.mockResolvedValueOnce( + makeSuccessResponse({ + messages: Array.from({ length: 20 }, (_, i) => ({ + role: "assistant", + content: `msg ${i + 2040}`, + sequence: i + 2040, + })), + has_more_messages: false, + newest_sequence: 2059, + }), + ); + + await act(async () => { + await result.current.loadMore(); + }); + + // After truncation, hasMore is forced false (total ≥ MAX_OLDER_MESSAGES). + expect(result.current.hasMore).toBe(false); + }); }); describe("loadMore — null cursor guard", () => { diff --git a/autogpt_platform/frontend/src/app/(platform)/copilot/useLoadMoreMessages.ts b/autogpt_platform/frontend/src/app/(platform)/copilot/useLoadMoreMessages.ts index 340cecfa06..49a13ef8d9 100644 --- a/autogpt_platform/frontend/src/app/(platform)/copilot/useLoadMoreMessages.ts +++ b/autogpt_platform/frontend/src/app/(platform)/copilot/useLoadMoreMessages.ts @@ -158,7 +158,15 @@ export function useLoadMoreMessages({ ? [...prev, ...newRaw] : [...newRaw, ...prev]; if (merged.length > MAX_OLDER_MESSAGES) { - return merged.slice(merged.length - MAX_OLDER_MESSAGES); + // Backward: discard the oldest (front) items — user has scrolled far + // back and we shed the furthest history. + // Forward: discard the newest (tail) items — we only ever fetch + // forward, so the tail is the most recently appended page; shedding + // it means the sentinel stalls, which is safer than discarding the + // beginning of the conversation the user is here to read. + return forwardPaginated + ? merged.slice(0, MAX_OLDER_MESSAGES) + : merged.slice(merged.length - MAX_OLDER_MESSAGES); } return merged; });