test(backend/copilot): remove tests for deleted transcript functions

- Remove TestReadTranscriptFile class (read_transcript_file deleted)
- Remove TestMergeWithPreviousTranscript class (merge_with_previous_transcript deleted)
- Remove TestTryUploadTranscript class (_try_upload_transcript deleted)
- All remaining tests pass (23 tests in transcript_test.py)
This commit is contained in:
Zamil Majdy
2026-03-06 18:58:42 +07:00
parent a1e0caa983
commit 203bf2ca32
2 changed files with 0 additions and 249 deletions

View File

@@ -145,84 +145,3 @@ class TestPrepareFileAttachments:
assert "Read tool" not in result.hint
assert len(result.image_blocks) == 1
class TestTryUploadTranscript:
"""Tests for _try_upload_transcript to prevent regression of double-upload bug.
Background: Previously transcripts were uploaded twice per turn (once in success path,
once in finally block), causing new data to be overwritten with stale data.
The fix was to remove the success path upload and only upload in the finally block.
Note: Full integration test of stream_chat_completion_sdk upload behavior requires
extensive mocking of SDK, locks, sessions, and sandbox infrastructure. This is
deferred to follow-up work. The code structure ensures single upload by having
only one call site in the finally block at service.py:1563-1570.
"""
@pytest.mark.asyncio
async def test_upload_succeeds_with_valid_transcript(self):
"""_try_upload_transcript should return True when upload succeeds."""
from backend.copilot.sdk.service import _try_upload_transcript
# Mock upload_transcript to succeed
with patch(
"backend.copilot.sdk.service.upload_transcript", new_callable=AsyncMock
) as mock_upload:
mock_upload.return_value = None # upload_transcript returns None on success
result = await _try_upload_transcript(
user_id="test-user",
session_id="test-session",
raw_content='{"type":"assistant","message":{"role":"assistant","content":"test"}}\n',
message_count=1,
log_prefix="[TEST]",
)
assert result is True
assert mock_upload.call_count == 1
@pytest.mark.asyncio
async def test_upload_returns_false_on_timeout(self):
"""_try_upload_transcript should return False when upload times out."""
import asyncio
from backend.copilot.sdk.service import _try_upload_transcript
# Mock upload_transcript to timeout
async def timeout_upload(*args, **kwargs):
await asyncio.sleep(100) # Longer than the 30s timeout
with patch(
"backend.copilot.sdk.service.upload_transcript", side_effect=timeout_upload
):
result = await _try_upload_transcript(
user_id="test-user",
session_id="test-session",
raw_content='{"type":"assistant"}\n',
message_count=1,
log_prefix="[TEST]",
)
assert result is False
@pytest.mark.asyncio
async def test_upload_returns_false_on_exception(self):
"""_try_upload_transcript should return False and log when upload raises exception."""
from backend.copilot.sdk.service import _try_upload_transcript
# Mock upload_transcript to raise exception
with patch(
"backend.copilot.sdk.service.upload_transcript", new_callable=AsyncMock
) as mock_upload:
mock_upload.side_effect = Exception("Upload failed")
result = await _try_upload_transcript(
user_id="test-user",
session_id="test-session",
raw_content='{"type":"assistant"}\n',
message_count=1,
log_prefix="[TEST]",
)
assert result is False

View File

@@ -5,8 +5,6 @@ import os
from .transcript import (
STRIPPABLE_TYPES,
merge_with_previous_transcript,
read_transcript_file,
strip_progress_entries,
validate_transcript,
write_transcript_to_tempfile,
@@ -39,58 +37,6 @@ PROGRESS_ENTRY = {
VALID_TRANSCRIPT = _make_jsonl(METADATA_LINE, FILE_HISTORY, USER_MSG, ASST_MSG)
# --- read_transcript_file ---
class TestReadTranscriptFile:
def test_returns_content_for_valid_file(self, tmp_path):
path = tmp_path / "session.jsonl"
path.write_text(VALID_TRANSCRIPT)
result = read_transcript_file(str(path))
assert result is not None
assert "user" in result
def test_returns_none_for_missing_file(self):
assert read_transcript_file("/nonexistent/path.jsonl") is None
def test_returns_none_for_empty_path(self):
assert read_transcript_file("") is None
def test_returns_none_for_empty_file(self, tmp_path):
path = tmp_path / "empty.jsonl"
path.write_text("")
assert read_transcript_file(str(path)) is None
def test_returns_none_for_metadata_only(self, tmp_path):
content = _make_jsonl(METADATA_LINE, FILE_HISTORY)
path = tmp_path / "meta.jsonl"
path.write_text(content)
assert read_transcript_file(str(path)) is None
def test_returns_none_for_invalid_json(self, tmp_path):
path = tmp_path / "bad.jsonl"
path.write_text("not json\n{}\n{}\n")
assert read_transcript_file(str(path)) is None
def test_returns_content_for_resume_transcript(self, tmp_path):
"""A --resume transcript with only assistant entries (no user) is valid."""
content = _make_jsonl(METADATA_LINE, FILE_HISTORY, ASST_MSG)
path = tmp_path / "resume.jsonl"
path.write_text(content)
result = read_transcript_file(str(path))
assert result is not None
assert "assistant" in result
def test_no_size_limit(self, tmp_path):
"""Large files are accepted — bucket storage has no size limit."""
big_content = {"type": "user", "uuid": "u9", "data": "x" * 1_000_000}
content = _make_jsonl(METADATA_LINE, FILE_HISTORY, big_content, ASST_MSG)
path = tmp_path / "big.jsonl"
path.write_text(content)
result = read_transcript_file(str(path))
assert result is not None
# --- write_transcript_to_tempfile ---
@@ -325,117 +271,3 @@ class TestStripProgressEntries:
lines = result.strip().split("\n")
asst_entry = json.loads(lines[-1])
assert asst_entry["parentUuid"] == "u1" # reparented
class TestMergeWithPreviousTranscript:
def test_no_previous_content(self):
"""Without previous content, returns new content unchanged."""
content = _make_jsonl(USER_MSG, ASST_MSG)
result = merge_with_previous_transcript(content, None)
assert result == content
def test_empty_previous_content(self):
result = merge_with_previous_transcript(_make_jsonl(USER_MSG), "")
assert result.strip() == json.dumps(USER_MSG)
def test_replaces_synthetic_with_real(self):
"""Synthetic assistant entries are replaced with real ones from previous."""
real_asst = {
"type": "assistant",
"uuid": "a1",
"message": {
"role": "assistant",
"model": "claude-opus-4-6",
"content": "real answer",
},
}
synthetic_asst = {
"type": "assistant",
"uuid": "a1",
"message": {
"role": "assistant",
"model": "<synthetic>",
"content": [{"type": "text", "text": "No response requested."}],
},
}
new_user = {
"type": "user",
"uuid": "u2",
"message": {"role": "user", "content": "follow up"},
}
new_asst = {
"type": "assistant",
"uuid": "a2",
"message": {
"role": "assistant",
"model": "claude-opus-4-6",
"content": "real answer 2",
},
}
previous = _make_jsonl(USER_MSG, real_asst)
new = _make_jsonl(USER_MSG, synthetic_asst, new_user, new_asst)
result = merge_with_previous_transcript(new, previous)
result_entries = [json.loads(line) for line in result.strip().split("\n")]
# The synthetic entry should be replaced with the real one
a1 = next(e for e in result_entries if e.get("uuid") == "a1")
assert a1["message"]["model"] == "claude-opus-4-6"
assert a1["message"]["content"] == "real answer"
# The new real entry should be preserved
a2 = next(e for e in result_entries if e.get("uuid") == "a2")
assert a2["message"]["model"] == "claude-opus-4-6"
def test_previous_wins_for_matching_uuids(self):
"""When same UUID exists in both transcripts, previous version is used."""
real_asst_new = {
"type": "assistant",
"uuid": "a1",
"message": {
"role": "assistant",
"model": "claude-opus-4-6",
"content": "new content",
},
}
real_asst_prev = {
"type": "assistant",
"uuid": "a1",
"message": {
"role": "assistant",
"model": "claude-opus-4-6",
"content": "previous content",
},
}
new = _make_jsonl(USER_MSG, real_asst_new)
previous = _make_jsonl(USER_MSG, real_asst_prev)
result = merge_with_previous_transcript(new, previous)
entries = [json.loads(line) for line in result.strip().split("\n")]
a1 = next(e for e in entries if e.get("uuid") == "a1")
# Previous always wins for matching UUIDs (hydrates real content)
assert a1["message"]["content"] == "previous content"
def test_no_matching_uuids(self):
"""When previous has no matching UUIDs, new content is returned as-is."""
synthetic = {
"type": "assistant",
"uuid": "a-new",
"message": {"model": "<synthetic>", "content": "No response requested."},
}
previous_asst = {
"type": "assistant",
"uuid": "a-old",
"message": {"model": "claude-opus-4-6", "content": "old"},
}
new = _make_jsonl(USER_MSG, synthetic)
previous = _make_jsonl(USER_MSG, previous_asst)
result = merge_with_previous_transcript(new, previous)
entries = [json.loads(line) for line in result.strip().split("\n")]
a = next(e for e in entries if e.get("uuid") == "a-new")
# Not replaced because UUID doesn't match
assert a["message"]["model"] == "<synthetic>"