mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
During Tally data extraction, the system now also generates personalized quick-action prompts as part of the existing LLM extraction call (configurable model, defaults to GPT-4o-mini, `temperature=0.0`). The prompt asks the LLM for 5 candidates, then the code validates (filters prompts >20 words) and keeps the top 3. These prompts are stored in the existing `CoPilotUnderstanding.data` JSON field (at the top level, not under `business`) and served to the frontend via a new API endpoint. The copilot chat page uses them instead of hardcoded defaults when available. ### Changes 🏗️ **Backend – Data models** (`understanding.py`): - Added `suggested_prompts` field to `BusinessUnderstandingInput` (optional) and `BusinessUnderstanding` (default empty list) - Updated `from_db()` to deserialize `suggested_prompts` from top-level of the data JSON - Updated `merge_business_understanding_data()` with overwrite strategy for prompts (full replace, not append) - `format_understanding_for_prompt()` intentionally does **not** include `suggested_prompts` — they are UI-only **Backend – Prompt generation** (`tally.py`): - Extended `_EXTRACTION_PROMPT` to request 5 suggested prompts alongside the existing business understanding fields — all extracted in a single LLM call (`temperature=0.0`) - Post-extraction validation filters out prompts exceeding 20 words and slices to the top 3 - Model is now configurable via `tally_extraction_llm_model` setting (defaults to `openai/gpt-4o-mini`) **Backend – API endpoint** (`routes.py`): - Added `GET /api/chat/suggested-prompts` (auth required) - Returns `{prompts: string[]}` from the user's cached business understanding (48h Redis TTL) - Returns empty array if no understanding or no prompts exist **Frontend** (`EmptySession/`): - `helpers.ts`: Extracted defaults to `DEFAULT_QUICK_ACTIONS`, `getQuickActions()` now accepts optional custom prompts and falls back to defaults - `EmptySession.tsx`: Calls `useGetV2GetSuggestedPrompts` hook (`staleTime: Infinity`) and passes results to `getQuickActions()` with hardcoded fallback - Fixed `useEffect` resize handler that previously used `window.innerWidth` as a dependency (re-ran every render); now uses a proper resize event listener - Added skeleton loading state while prompts are being fetched **Generated** (`__generated__/`): - Regenerated Orval API client with new endpoint types and hooks ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Backend format + lint + pyright pass - [x] Frontend format + lint pass - [x] All existing tally tests pass (28/28) - [x] All chat route tests pass (9/9) - [x] All invited_user tests pass (7/7) - [x] E2E: New user with tally data sees custom prompts on copilot page - [x] E2E: User without tally data sees hardcoded default prompts - [x] E2E: Clicking a custom prompt sends it as a chat message
103 lines
3.3 KiB
Python
103 lines
3.3 KiB
Python
"""Tests for business understanding merge and format logic."""
|
|
|
|
from datetime import datetime, timezone
|
|
from typing import Any
|
|
|
|
from backend.data.understanding import (
|
|
BusinessUnderstanding,
|
|
BusinessUnderstandingInput,
|
|
format_understanding_for_prompt,
|
|
merge_business_understanding_data,
|
|
)
|
|
|
|
|
|
def _make_input(**kwargs: Any) -> BusinessUnderstandingInput:
|
|
"""Create a BusinessUnderstandingInput with only the specified fields."""
|
|
return BusinessUnderstandingInput.model_validate(kwargs)
|
|
|
|
|
|
# ─── merge_business_understanding_data: suggested_prompts ─────────────
|
|
|
|
|
|
def test_merge_suggested_prompts_overwrites_existing():
|
|
"""New suggested_prompts should fully replace existing ones (not append)."""
|
|
existing = {
|
|
"name": "Alice",
|
|
"business": {"industry": "Tech", "version": 1},
|
|
"suggested_prompts": ["Old prompt 1", "Old prompt 2"],
|
|
}
|
|
input_data = _make_input(
|
|
suggested_prompts=["New prompt A", "New prompt B", "New prompt C"],
|
|
)
|
|
|
|
result = merge_business_understanding_data(existing, input_data)
|
|
|
|
assert result["suggested_prompts"] == [
|
|
"New prompt A",
|
|
"New prompt B",
|
|
"New prompt C",
|
|
]
|
|
|
|
|
|
def test_merge_suggested_prompts_none_preserves_existing():
|
|
"""When input has suggested_prompts=None, existing prompts are preserved."""
|
|
existing = {
|
|
"name": "Alice",
|
|
"business": {"industry": "Tech", "version": 1},
|
|
"suggested_prompts": ["Keep me"],
|
|
}
|
|
input_data = _make_input(industry="Finance")
|
|
|
|
result = merge_business_understanding_data(existing, input_data)
|
|
|
|
assert result["suggested_prompts"] == ["Keep me"]
|
|
assert result["business"]["industry"] == "Finance"
|
|
|
|
|
|
def test_merge_suggested_prompts_added_to_empty_data():
|
|
"""Suggested prompts are set at top level even when starting from empty data."""
|
|
existing: dict[str, Any] = {}
|
|
input_data = _make_input(suggested_prompts=["Prompt 1"])
|
|
|
|
result = merge_business_understanding_data(existing, input_data)
|
|
|
|
assert result["suggested_prompts"] == ["Prompt 1"]
|
|
|
|
|
|
def test_merge_suggested_prompts_empty_list_overwrites():
|
|
"""An explicit empty list should overwrite existing prompts."""
|
|
existing: dict[str, Any] = {
|
|
"suggested_prompts": ["Old prompt"],
|
|
"business": {"version": 1},
|
|
}
|
|
input_data = _make_input(suggested_prompts=[])
|
|
|
|
result = merge_business_understanding_data(existing, input_data)
|
|
|
|
assert result["suggested_prompts"] == []
|
|
|
|
|
|
# ─── format_understanding_for_prompt: excludes suggested_prompts ──────
|
|
|
|
|
|
def test_format_understanding_excludes_suggested_prompts():
|
|
"""suggested_prompts is UI-only and must NOT appear in the system prompt."""
|
|
understanding = BusinessUnderstanding(
|
|
id="test-id",
|
|
user_id="user-1",
|
|
created_at=datetime.now(tz=timezone.utc),
|
|
updated_at=datetime.now(tz=timezone.utc),
|
|
user_name="Alice",
|
|
industry="Technology",
|
|
suggested_prompts=["Automate reports", "Set up alerts", "Track KPIs"],
|
|
)
|
|
|
|
formatted = format_understanding_for_prompt(understanding)
|
|
|
|
assert "Alice" in formatted
|
|
assert "Technology" in formatted
|
|
assert "suggested_prompts" not in formatted
|
|
assert "Automate reports" not in formatted
|
|
assert "Set up alerts" not in formatted
|
|
assert "Track KPIs" not in formatted
|