fix(backend/copilot): fix initial load missing messages + forward pagination for completed sessions

Completed copilot sessions with many messages were showing an empty view
because the backend returned only the newest 50 (all tool calls, no user
messages) and the frontend silently dropped messages with empty content.

Backend changes:
- get_chat_messages_paginated: add from_start (ASC) and after_sequence
  (forward cursor) modes alongside the existing before_sequence (DESC)
  backward mode
- PaginatedMessages: expose newest_sequence for forward-pagination cursors
- routes.py: detect completed sessions on initial load (no active stream)
  and use from_start=True; expose newest_sequence + forward_paginated in
  SessionDetailResponse; accept after_sequence query param
- openapi.json: add after_sequence param + newest_sequence / forward_paginated
  fields to SessionDetailResponse schema

Frontend changes:
- convertChatSessionToUiMessages: never drop user messages with empty content
- useLoadMoreMessages: support forward pagination via after_sequence cursor;
  append pages to end rather than prepending for completed sessions
- ChatMessagesContainer: move LoadMoreSentinel to bottom for forward pagination
- useChatSession / useCopilotPage / ChatContainer: wire up newestSequence and
  forwardPaginated props end-to-end

Tests: add 9 new unit tests for from_start and after_sequence pagination modes
This commit is contained in:
Zamil Majdy
2026-04-15 19:26:19 +07:00
parent 0284614df0
commit e8c356a728
11 changed files with 8141 additions and 1860 deletions

View File

@@ -185,6 +185,8 @@ class SessionDetailResponse(BaseModel):
active_stream: ActiveStreamInfo | None = None # Present if stream is still active
has_more_messages: bool = False
oldest_sequence: int | None = None
newest_sequence: int | None = None
forward_paginated: bool = False
total_prompt_tokens: int = 0
total_completion_tokens: int = 0
metadata: ChatSessionMetadata = ChatSessionMetadata()
@@ -451,50 +453,79 @@ async def get_session(
user_id: Annotated[str, Security(auth.get_user_id)],
limit: int = Query(default=50, ge=1, le=200),
before_sequence: int | None = Query(default=None, ge=0),
after_sequence: int | None = Query(default=None, ge=0),
) -> SessionDetailResponse:
"""
Retrieve the details of a specific chat session.
Supports cursor-based pagination via ``limit`` and ``before_sequence``.
When no pagination params are provided, returns the most recent messages.
Supports cursor-based pagination via ``limit``, ``before_sequence``, and
``after_sequence``.
On the initial load (no cursor provided) of a completed session, messages
are returned in forward order starting from sequence 0 so the user always
sees their initial prompt. Active sessions use the legacy newest-first
order so streaming context is preserved.
Args:
session_id: The unique identifier for the desired chat session.
user_id: The authenticated user's ID.
limit: Maximum number of messages to return (1-200, default 50).
before_sequence: Return messages with sequence < this value (cursor).
before_sequence: Return messages with sequence < this value (backward
pagination cursor, used by active-session load-more).
after_sequence: Return messages with sequence > this value (forward
pagination cursor, used by completed-session load-more).
Returns:
SessionDetailResponse: Details for the requested session, including
active_stream info and pagination metadata.
"""
is_initial_load = before_sequence is None and after_sequence is None
# Check active stream before the DB query on initial loads so we can
# choose the correct pagination direction (forward for completed sessions,
# newest-first for active ones).
active_session = None
last_message_id = None
if is_initial_load:
active_session, last_message_id = await stream_registry.get_active_session(
session_id, user_id
)
# Completed sessions on initial load start from sequence 0 so the user's
# initial prompt is always visible. Active sessions keep the legacy
# newest-first behavior to preserve streaming context.
from_start = is_initial_load and active_session is None
page = await get_chat_messages_paginated(
session_id, limit, before_sequence, user_id=user_id
session_id,
limit,
before_sequence=before_sequence,
after_sequence=after_sequence,
from_start=from_start,
user_id=user_id,
)
if page is None:
raise NotFoundError(f"Session {session_id} not found.")
messages = [
_strip_injected_context(message.model_dump()) for message in page.messages
]
# Only check active stream on initial load (not on "load more" requests)
logger.info(
f"[GET_SESSION] session={session_id}, active={active_session is not None}, "
f"from_start={from_start}, forward_paginated={from_start or after_sequence is not None}, "
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
)
active_stream_info = None
if before_sequence is None:
active_session, last_message_id = await stream_registry.get_active_session(
session_id, user_id
if active_session and last_message_id is not None:
active_stream_info = ActiveStreamInfo(
turn_id=active_session.turn_id,
last_message_id=last_message_id,
)
logger.info(
f"[GET_SESSION] session={session_id}, active_session={active_session is not None}, "
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
)
if active_session:
active_stream_info = ActiveStreamInfo(
turn_id=active_session.turn_id,
last_message_id=last_message_id,
)
# Skip session metadata on "load more" — frontend only needs messages
if before_sequence is not None:
if not is_initial_load:
return SessionDetailResponse(
id=page.session.session_id,
created_at=page.session.started_at.isoformat(),
@@ -504,6 +535,8 @@ async def get_session(
active_stream=None,
has_more_messages=page.has_more,
oldest_sequence=page.oldest_sequence,
newest_sequence=page.newest_sequence,
forward_paginated=after_sequence is not None,
total_prompt_tokens=0,
total_completion_tokens=0,
)
@@ -520,6 +553,8 @@ async def get_session(
active_stream=active_stream_info,
has_more_messages=page.has_more,
oldest_sequence=page.oldest_sequence,
newest_sequence=page.newest_sequence,
forward_paginated=from_start,
total_prompt_tokens=total_prompt,
total_completion_tokens=total_completion,
metadata=page.session.metadata,

View File

@@ -37,6 +37,7 @@ class PaginatedMessages(BaseModel):
messages: list[ChatMessage]
has_more: bool
oldest_sequence: int | None
newest_sequence: int | None
session: ChatSessionInfo
@@ -61,32 +62,43 @@ async def get_chat_messages_paginated(
session_id: str,
limit: int = 50,
before_sequence: int | None = None,
after_sequence: int | None = None,
from_start: bool = False,
user_id: str | None = None,
) -> PaginatedMessages | None:
"""Get paginated messages for a session, newest first.
"""Get paginated messages for a session.
Verifies session existence (and ownership when ``user_id`` is provided)
in parallel with the message query. Returns ``None`` when the session
is not found or does not belong to the user.
Three modes:
Args:
session_id: The chat session ID.
limit: Max messages to return.
before_sequence: Cursor — return messages with sequence < this value.
user_id: If provided, filters via ``Session.userId`` so only the
session owner's messages are returned (acts as an ownership guard).
- ``before_sequence`` set: backward pagination (DESC), returns messages
with sequence < ``before_sequence``. Used for active sessions or manual
backward navigation.
- ``from_start=True`` or ``after_sequence`` set: forward pagination (ASC).
Returns messages from sequence 0 (``from_start``) or after
``after_sequence``. Used on initial load of completed sessions and for
loading subsequent forward pages.
- Both cursors ``None`` and ``from_start=False``: newest-first (DESC
without filter). Used for active sessions on initial load.
Verifies session existence (and ownership when ``user_id`` is provided).
Returns ``None`` when the session is not found or does not belong to the
user.
"""
# Build session-existence / ownership check
session_where: ChatSessionWhereInput = {"id": session_id}
if user_id is not None:
session_where["userId"] = user_id
forward = from_start or after_sequence is not None
# Build message include — fetch paginated messages in the same query
msg_include: dict[str, Any] = {
"order_by": {"sequence": "desc"},
"order_by": {"sequence": "asc" if forward else "desc"},
"take": limit + 1,
}
if before_sequence is not None:
if after_sequence is not None:
msg_include["where"] = {"sequence": {"gt": after_sequence}}
elif before_sequence is not None:
msg_include["where"] = {"sequence": {"lt": before_sequence}}
# Single query: session existence/ownership + paginated messages
@@ -104,57 +116,60 @@ async def get_chat_messages_paginated(
has_more = len(results) > limit
results = results[:limit]
# Reverse to ascending order
results.reverse()
if not forward:
# Backward mode: DB returned DESC; reverse to ascending order.
results.reverse()
# Tool-call boundary fix: if the oldest message is a tool message,
# expand backward to include the preceding assistant message that
# owns the tool_calls, so convertChatSessionMessagesToUiMessages
# can pair them correctly.
_BOUNDARY_SCAN_LIMIT = 10
if results and results[0].role == "tool":
boundary_where: dict[str, Any] = {
"sessionId": session_id,
"sequence": {"lt": results[0].sequence},
}
if user_id is not None:
boundary_where["Session"] = {"is": {"userId": user_id}}
extra = await PrismaChatMessage.prisma().find_many(
where=boundary_where,
order={"sequence": "desc"},
take=_BOUNDARY_SCAN_LIMIT,
)
# Find the first non-tool message (should be the assistant)
boundary_msgs = []
found_owner = False
for msg in extra:
boundary_msgs.append(msg)
if msg.role != "tool":
found_owner = True
break
boundary_msgs.reverse()
if not found_owner:
logger.warning(
"Boundary expansion did not find owning assistant message "
"for session=%s before sequence=%s (%d msgs scanned)",
session_id,
results[0].sequence,
len(extra),
# Tool-call boundary fix: if the oldest message is a tool message,
# expand backward to include the preceding assistant message that
# owns the tool_calls, so convertChatSessionMessagesToUiMessages
# can pair them correctly.
_BOUNDARY_SCAN_LIMIT = 10
if results and results[0].role == "tool":
boundary_where: dict[str, Any] = {
"sessionId": session_id,
"sequence": {"lt": results[0].sequence},
}
if user_id is not None:
boundary_where["Session"] = {"is": {"userId": user_id}}
extra = await PrismaChatMessage.prisma().find_many(
where=boundary_where,
order={"sequence": "desc"},
take=_BOUNDARY_SCAN_LIMIT,
)
if boundary_msgs:
results = boundary_msgs + results
# Only mark has_more if the expanded boundary isn't the
# very start of the conversation (sequence 0).
if boundary_msgs[0].sequence > 0:
has_more = True
# Find the first non-tool message (should be the assistant)
boundary_msgs = []
found_owner = False
for msg in extra:
boundary_msgs.append(msg)
if msg.role != "tool":
found_owner = True
break
boundary_msgs.reverse()
if not found_owner:
logger.warning(
"Boundary expansion did not find owning assistant message "
"for session=%s before sequence=%s (%d msgs scanned)",
session_id,
results[0].sequence,
len(extra),
)
if boundary_msgs:
results = boundary_msgs + results
# Only mark has_more if the expanded boundary isn't the
# very start of the conversation (sequence 0).
if boundary_msgs[0].sequence > 0:
has_more = True
messages = [ChatMessage.from_db(m) for m in results]
oldest_sequence = messages[0].sequence if messages else None
newest_sequence = messages[-1].sequence if messages else None
return PaginatedMessages(
messages=messages,
has_more=has_more,
oldest_sequence=oldest_sequence,
newest_sequence=newest_sequence,
session=session_info,
)

View File

@@ -175,6 +175,133 @@ async def test_no_where_on_messages_without_before_sequence(
assert "where" not in include["Messages"]
# ---------- Forward pagination (from_start / after_sequence) ----------
@pytest.mark.asyncio
async def test_from_start_uses_asc_order_no_where(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""from_start=True queries messages in ASC order with no where filter."""
find_first, _ = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(0), _make_msg(1), _make_msg(2)],
)
await get_chat_messages_paginated(SESSION_ID, limit=50, from_start=True)
call_kwargs = find_first.call_args
include = call_kwargs.kwargs.get("include") or call_kwargs[1].get("include")
assert include["Messages"]["order_by"] == {"sequence": "asc"}
assert "where" not in include["Messages"]
@pytest.mark.asyncio
async def test_from_start_returns_messages_ascending(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""from_start=True returns messages in ascending sequence order."""
find_first, _ = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(0), _make_msg(1), _make_msg(2)],
)
page = await get_chat_messages_paginated(SESSION_ID, limit=50, from_start=True)
assert page is not None
assert [m.sequence for m in page.messages] == [0, 1, 2]
assert page.oldest_sequence == 0
assert page.newest_sequence == 2
assert page.has_more is False
@pytest.mark.asyncio
async def test_from_start_has_more_when_results_exceed_limit(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""from_start=True sets has_more when DB returns more than limit items."""
find_first, _ = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(0), _make_msg(1), _make_msg(2)],
)
page = await get_chat_messages_paginated(SESSION_ID, limit=2, from_start=True)
assert page is not None
assert page.has_more is True
assert [m.sequence for m in page.messages] == [0, 1]
assert page.newest_sequence == 1
@pytest.mark.asyncio
async def test_after_sequence_uses_gt_filter_asc_order(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""after_sequence adds a sequence > N where clause and uses ASC order."""
find_first, _ = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(11), _make_msg(12)],
)
await get_chat_messages_paginated(SESSION_ID, limit=50, after_sequence=10)
call_kwargs = find_first.call_args
include = call_kwargs.kwargs.get("include") or call_kwargs[1].get("include")
assert include["Messages"]["order_by"] == {"sequence": "asc"}
assert include["Messages"]["where"] == {"sequence": {"gt": 10}}
@pytest.mark.asyncio
async def test_after_sequence_returns_messages_in_order(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""after_sequence returns only messages with sequence > cursor, ascending."""
find_first, _ = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(11), _make_msg(12), _make_msg(13)],
)
page = await get_chat_messages_paginated(SESSION_ID, limit=50, after_sequence=10)
assert page is not None
assert [m.sequence for m in page.messages] == [11, 12, 13]
assert page.oldest_sequence == 11
assert page.newest_sequence == 13
assert page.has_more is False
@pytest.mark.asyncio
async def test_newest_sequence_populated_for_backward_mode(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""newest_sequence is populated for backward-paginated results."""
find_first, _ = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(5), _make_msg(4), _make_msg(3)],
)
page = await get_chat_messages_paginated(SESSION_ID, limit=50)
assert page is not None
assert page.newest_sequence == 5
assert page.oldest_sequence == 3
@pytest.mark.asyncio
async def test_forward_mode_no_boundary_expansion(
mock_db: tuple[AsyncMock, AsyncMock],
):
"""Forward pagination never triggers backward boundary expansion."""
find_first, find_many = mock_db
find_first.return_value = _make_session(
messages=[_make_msg(0, role="tool"), _make_msg(1, role="tool")],
)
await get_chat_messages_paginated(SESSION_ID, limit=50, from_start=True)
assert find_many.call_count == 0
@pytest.mark.asyncio
async def test_user_id_filter_applied_to_session_where(
mock_db: tuple[AsyncMock, AsyncMock],

View File

@@ -93,6 +93,7 @@ export function CopilotPage() {
hasMoreMessages,
isLoadingMore,
loadMore,
forwardPaginated,
// Mobile drawer
isMobile,
isDrawerOpen,
@@ -212,6 +213,7 @@ export function CopilotPage() {
hasMoreMessages={hasMoreMessages}
isLoadingMore={isLoadingMore}
onLoadMore={loadMore}
forwardPaginated={forwardPaginated}
droppedFiles={droppedFiles}
onDroppedFilesConsumed={handleDroppedFilesConsumed}
historicalDurations={historicalDurations}

View File

@@ -30,6 +30,7 @@ export interface ChatContainerProps {
hasMoreMessages?: boolean;
isLoadingMore?: boolean;
onLoadMore?: () => void;
forwardPaginated?: boolean;
/** Files dropped onto the chat window. */
droppedFiles?: File[];
/** Called after droppedFiles have been consumed by ChatInput. */
@@ -54,6 +55,7 @@ export const ChatContainer = ({
hasMoreMessages,
isLoadingMore,
onLoadMore,
forwardPaginated,
droppedFiles,
onDroppedFilesConsumed,
historicalDurations,
@@ -108,6 +110,7 @@ export const ChatContainer = ({
hasMoreMessages={hasMoreMessages}
isLoadingMore={isLoadingMore}
onLoadMore={onLoadMore}
forwardPaginated={forwardPaginated}
onRetry={handleRetry}
historicalDurations={historicalDurations}
/>

View File

@@ -42,6 +42,10 @@ interface Props {
hasMoreMessages?: boolean;
isLoadingMore?: boolean;
onLoadMore?: () => void;
/** When true the load-more sentinel is placed at the bottom (forward
* pagination for completed sessions). When false it is at the top
* (backward pagination for active sessions). */
forwardPaginated?: boolean;
onRetry?: () => void;
historicalDurations?: Map<string, number>;
}
@@ -205,6 +209,7 @@ export function ChatMessagesContainer({
hasMoreMessages,
isLoadingMore,
onLoadMore,
forwardPaginated,
onRetry,
historicalDurations,
}: Props) {
@@ -283,7 +288,7 @@ export function ChatMessagesContainer({
}
>
<ConversationContent className="flex min-h-full flex-1 flex-col gap-6 px-3 py-6">
{hasMoreMessages && onLoadMore && (
{hasMoreMessages && onLoadMore && !forwardPaginated && (
<LoadMoreSentinel
hasMore={hasMoreMessages}
isLoading={!!isLoadingMore}
@@ -442,6 +447,14 @@ export function ChatMessagesContainer({
</pre>
</details>
)}
{hasMoreMessages && onLoadMore && forwardPaginated && (
<LoadMoreSentinel
hasMore={hasMoreMessages}
isLoading={!!isLoadingMore}
messageCount={messages.length}
onLoadMore={onLoadMore}
/>
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>

View File

@@ -253,6 +253,11 @@ export function convertChatSessionMessagesToUiMessages(
}
}
// User messages must always be rendered, even with empty content, so the
// initial prompt is visible when reloading a session.
if (parts.length === 0 && msg.role === "user") {
parts.push({ type: "text", text: "", state: "done" });
}
if (parts.length === 0) return;
// Merge consecutive assistant messages into a single UIMessage

View File

@@ -85,6 +85,16 @@ export function useChatSession({ dryRun = false }: UseChatSessionOptions = {}) {
return sessionQuery.data.data.oldest_sequence ?? null;
}, [sessionQuery.data]);
const newestSequence = useMemo(() => {
if (sessionQuery.data?.status !== 200) return null;
return sessionQuery.data.data.newest_sequence ?? null;
}, [sessionQuery.data]);
const forwardPaginated = useMemo(() => {
if (sessionQuery.data?.status !== 200) return false;
return !!sessionQuery.data.data.forward_paginated;
}, [sessionQuery.data]);
// Memoize so the effect in useCopilotPage doesn't infinite-loop on a new
// array reference every render. Re-derives only when query data changes.
// When the session is complete (no active stream), mark dangling tool
@@ -172,6 +182,8 @@ export function useChatSession({ dryRun = false }: UseChatSessionOptions = {}) {
hasActiveStream,
hasMoreMessages,
oldestSequence,
newestSequence,
forwardPaginated,
isLoadingSession: sessionQuery.isLoading,
isSessionError: sessionQuery.isError,
createSession,

View File

@@ -56,6 +56,8 @@ export function useCopilotPage() {
hasActiveStream,
hasMoreMessages,
oldestSequence,
newestSequence,
forwardPaginated,
isLoadingSession,
isSessionError,
createSession,
@@ -83,18 +85,26 @@ export function useCopilotPage() {
copilotModel: isModeToggleEnabled ? copilotLlmModel : undefined,
});
const { olderMessages, hasMore, isLoadingMore, loadMore } =
const { pagedMessages, hasMore, isLoadingMore, loadMore } =
useLoadMoreMessages({
sessionId,
initialOldestSequence: oldestSequence,
initialNewestSequence: newestSequence,
initialHasMore: hasMoreMessages,
forwardPaginated,
initialPageRawMessages: rawSessionMessages,
});
// Combine older (paginated) messages with current page messages,
// merging consecutive assistant UIMessages at the page boundary so
// reasoning + response parts stay in a single bubble.
const messages = concatWithAssistantMerge(olderMessages, currentMessages);
// Combine paginated messages with current page messages, merging consecutive
// assistant UIMessages at the page boundary so reasoning + response parts
// stay in a single bubble.
// Forward pagination (completed sessions): current page is the beginning,
// paged messages are newer pages appended after.
// Backward pagination (active sessions): paged messages are older history
// prepended before the current page.
const messages = forwardPaginated
? concatWithAssistantMerge(currentMessages, pagedMessages)
: concatWithAssistantMerge(pagedMessages, currentMessages);
useCopilotNotifications(sessionId);
@@ -396,6 +406,7 @@ export function useCopilotPage() {
hasMoreMessages: hasMore,
isLoadingMore,
loadMore,
forwardPaginated,
// Mobile drawer
isMobile,
isDrawerOpen,

View File

@@ -9,7 +9,11 @@ import {
interface UseLoadMoreMessagesArgs {
sessionId: string | null;
initialOldestSequence: number | null;
initialNewestSequence: number | null;
initialHasMore: boolean;
/** True when the initial page was loaded from sequence 0 forward (completed
* sessions). False when loaded newest-first (active sessions). */
forwardPaginated: boolean;
/** Raw messages from the initial page, used for cross-page tool output matching. */
initialPageRawMessages: unknown[];
}
@@ -20,16 +24,21 @@ const MAX_OLDER_MESSAGES = 2000;
export function useLoadMoreMessages({
sessionId,
initialOldestSequence,
initialNewestSequence,
initialHasMore,
forwardPaginated,
initialPageRawMessages,
}: UseLoadMoreMessagesArgs) {
// Store accumulated raw messages from all older pages (in ascending order).
// Accumulated raw messages from all extra pages (ascending order).
// Re-converting them all together ensures tool outputs are matched across
// inter-page boundaries.
const [olderRawMessages, setOlderRawMessages] = useState<unknown[]>([]);
const [pagedRawMessages, setPagedRawMessages] = useState<unknown[]>([]);
const [oldestSequence, setOldestSequence] = useState<number | null>(
initialOldestSequence,
);
const [newestSequence, setNewestSequence] = useState<number | null>(
initialNewestSequence,
);
const [hasMore, setHasMore] = useState(initialHasMore);
const [isLoadingMore, setIsLoadingMore] = useState(false);
const isLoadingMoreRef = useRef(false);
@@ -47,8 +56,9 @@ export function useLoadMoreMessages({
// Session changed — full reset
prevSessionIdRef.current = sessionId;
prevInitialOldestRef.current = initialOldestSequence;
setOlderRawMessages([]);
setPagedRawMessages([]);
setOldestSequence(initialOldestSequence);
setNewestSequence(initialNewestSequence);
setHasMore(initialHasMore);
setIsLoadingMore(false);
isLoadingMoreRef.current = false;
@@ -56,13 +66,14 @@ export function useLoadMoreMessages({
epochRef.current += 1;
} else if (
prevInitialOldestRef.current !== initialOldestSequence &&
olderRawMessages.length > 0
pagedRawMessages.length > 0
) {
// Same session but initial window shifted (e.g. new messages arrived) —
// clear paged state to avoid gaps/duplicates
prevInitialOldestRef.current = initialOldestSequence;
setOlderRawMessages([]);
setPagedRawMessages([]);
setOldestSequence(initialOldestSequence);
setNewestSequence(initialNewestSequence);
setHasMore(initialHasMore);
setIsLoadingMore(false);
isLoadingMoreRef.current = false;
@@ -72,45 +83,44 @@ export function useLoadMoreMessages({
// Update from parent when initial data changes (e.g. refetch)
prevInitialOldestRef.current = initialOldestSequence;
setOldestSequence(initialOldestSequence);
setNewestSequence(initialNewestSequence);
setHasMore(initialHasMore);
}
}, [sessionId, initialOldestSequence, initialHasMore]);
}, [sessionId, initialOldestSequence, initialNewestSequence, initialHasMore]);
// Convert all accumulated raw messages in one pass so tool outputs
// are matched across inter-page boundaries. Initial page tool outputs
// are included via extraToolOutputs to handle the boundary between
// the last older page and the initial/streaming page.
const olderMessages: UIMessage<unknown, UIDataTypes, UITools>[] =
// are matched across inter-page boundaries.
// For backward pagination: initial page tool outputs are included via
// extraToolOutputs to handle the boundary between the last older page and
// the initial/streaming page.
const pagedMessages: UIMessage<unknown, UIDataTypes, UITools>[] =
useMemo(() => {
if (!sessionId || olderRawMessages.length === 0) return [];
if (!sessionId || pagedRawMessages.length === 0) return [];
const extraToolOutputs =
initialPageRawMessages.length > 0
!forwardPaginated && initialPageRawMessages.length > 0
? extractToolOutputsFromRaw(initialPageRawMessages)
: undefined;
return convertChatSessionMessagesToUiMessages(
sessionId,
olderRawMessages,
pagedRawMessages,
{ isComplete: true, extraToolOutputs },
).messages;
}, [sessionId, olderRawMessages, initialPageRawMessages]);
}, [sessionId, pagedRawMessages, initialPageRawMessages, forwardPaginated]);
async function loadMore() {
if (
!sessionId ||
!hasMore ||
isLoadingMoreRef.current ||
oldestSequence === null
)
return;
if (!sessionId || !hasMore || isLoadingMoreRef.current) return;
const cursor = forwardPaginated ? newestSequence : oldestSequence;
if (cursor === null) return;
const requestEpoch = epochRef.current;
isLoadingMoreRef.current = true;
setIsLoadingMore(true);
try {
const response = await getV2GetSession(sessionId, {
limit: 50,
before_sequence: oldestSequence,
});
const params = forwardPaginated
? { limit: 50, after_sequence: cursor }
: { limit: 50, before_sequence: cursor };
const response = await getV2GetSession(sessionId, params);
// Discard response if session/pagination was reset while awaiting
if (epochRef.current !== requestEpoch) return;
@@ -129,15 +139,24 @@ export function useLoadMoreMessages({
consecutiveErrorsRef.current = 0;
const newRaw = (response.data.messages ?? []) as unknown[];
setOlderRawMessages((prev) => {
const merged = [...newRaw, ...prev];
setPagedRawMessages((prev) => {
// Forward: append to end. Backward: prepend to start.
const merged = forwardPaginated
? [...prev, ...newRaw]
: [...newRaw, ...prev];
if (merged.length > MAX_OLDER_MESSAGES) {
return merged.slice(merged.length - MAX_OLDER_MESSAGES);
}
return merged;
});
setOldestSequence(response.data.oldest_sequence ?? null);
if (newRaw.length + olderRawMessages.length >= MAX_OLDER_MESSAGES) {
if (forwardPaginated) {
setNewestSequence(response.data.newest_sequence ?? null);
} else {
setOldestSequence(response.data.oldest_sequence ?? null);
}
if (newRaw.length + pagedRawMessages.length >= MAX_OLDER_MESSAGES) {
setHasMore(false);
} else {
setHasMore(!!response.data.has_more_messages);
@@ -157,5 +176,5 @@ export function useLoadMoreMessages({
}
}
return { olderMessages, hasMore, isLoadingMore, loadMore };
return { pagedMessages, hasMore, isLoadingMore, loadMore };
}

File diff suppressed because it is too large Load Diff