feat(platform): replace suggestion pills with themed prompt categories (#12452)

## Summary



https://github.com/user-attachments/assets/13da6d36-5f35-429b-a6cf-e18316bb8709



Replaces the flat list of suggestion pills in the CoPilot empty session
with themed prompt categories (Learn, Create, Automate, Organize), each
shown as a popover with contextual prompts.

- **Backend**: Changes `suggested_prompts` from a flat `list[str]` to a
themed `dict[str, list[str]]` keyed by category. Updates Tally
extraction LLM prompt to generate prompts per theme, and the
`/suggested-prompts` API to return grouped themes. Legacy `list[str]`
rows are preserved under a `"General"` key for backward compatibility.
- **Frontend**: Replaces inline pill buttons with a `SuggestionThemes`
popover component. Each theme button (with icon) opens a dropdown of 5
relevant prompts. Falls back to hardcoded defaults when the API has no
personalized prompts. Normalizes partial API responses by padding
missing themes with defaults. Legacy `"General"` prompts are distributed
round-robin across themes so existing users keep their personalized
suggestions.

### Changes 🏗️

- `backend/data/understanding.py`: `suggested_prompts` field changed
from `list[str]` to `dict[str, list[str]]`; legacy list rows preserved
under `"General"` key; list items validated as strings
- `backend/data/tally.py`: LLM prompt updated to generate themed
prompts; validation now per-theme with blank-string rejection
- `backend/api/features/chat/routes.py`: New `SuggestedTheme` model;
endpoint returns `themes[]`
- `frontend/copilot/components/EmptySession/EmptySession.tsx`: Uses
generated API types directly (no cast)
- `frontend/copilot/components/EmptySession/helpers.ts`:
`DEFAULT_THEMES` replaces `DEFAULT_QUICK_ACTIONS`; `getSuggestionThemes`
normalizes partial API responses and distributes legacy `"General"`
prompts across themes
-
`frontend/copilot/components/EmptySession/components/SuggestionThemes/`:
New popover component with theme icons and loading states

### Checklist 📋

#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
  - [x] Verify themed suggestion buttons render on CoPilot empty session
  - [x] Click each theme button and confirm popover opens with prompts
  - [x] Click a prompt and confirm it sends the message
- [x] Verify fallback to default themes when API returns no custom
prompts
- [x] Verify legacy users' personalized prompts are preserved and
visible


🤖 Generated with [Claude Code](https://claude.com/claude-code)

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Ubbe
2026-03-19 18:46:12 +08:00
committed by GitHub
parent 1240f38f75
commit a5f9c43a41
11 changed files with 428 additions and 172 deletions

View File

@@ -898,12 +898,19 @@ async def session_assign_user(
# ========== Suggested Prompts ==========
class SuggestedPromptsResponse(BaseModel):
"""Response model for user-specific suggested prompts."""
class SuggestedTheme(BaseModel):
"""A themed group of suggested prompts."""
name: str
prompts: list[str]
class SuggestedPromptsResponse(BaseModel):
"""Response model for user-specific suggested prompts grouped by theme."""
themes: list[SuggestedTheme]
@router.get(
"/suggested-prompts",
dependencies=[Security(auth.requires_user)],
@@ -912,17 +919,21 @@ async def get_suggested_prompts(
user_id: Annotated[str, Security(auth.get_user_id)],
) -> SuggestedPromptsResponse:
"""
Get LLM-generated suggested prompts for the authenticated user.
Get LLM-generated suggested prompts grouped by theme.
Returns personalized quick-action prompts based on the user's
business understanding. Returns an empty list if no custom prompts
are available.
business understanding. Returns empty themes list if no custom
prompts are available.
"""
understanding = await get_business_understanding(user_id)
if understanding is None:
return SuggestedPromptsResponse(prompts=[])
if understanding is None or not understanding.suggested_prompts:
return SuggestedPromptsResponse(themes=[])
return SuggestedPromptsResponse(prompts=understanding.suggested_prompts)
themes = [
SuggestedTheme(name=name, prompts=prompts)
for name, prompts in understanding.suggested_prompts.items()
]
return SuggestedPromptsResponse(themes=themes)
# ========== Configuration ==========

View File

@@ -418,44 +418,51 @@ def _mock_get_business_understanding(
)
def test_suggested_prompts_returns_prompts(
def test_suggested_prompts_returns_themes(
mocker: pytest_mock.MockerFixture,
test_user_id: str,
) -> None:
"""User with understanding and prompts gets them back."""
"""User with themed prompts gets them back as themes list."""
mock_understanding = MagicMock()
mock_understanding.suggested_prompts = ["Do X", "Do Y", "Do Z"]
mock_understanding.suggested_prompts = {
"Learn": ["L1", "L2"],
"Create": ["C1"],
}
_mock_get_business_understanding(mocker, return_value=mock_understanding)
response = client.get("/suggested-prompts")
assert response.status_code == 200
assert response.json() == {"prompts": ["Do X", "Do Y", "Do Z"]}
data = response.json()
assert "themes" in data
themes_by_name = {t["name"]: t["prompts"] for t in data["themes"]}
assert themes_by_name["Learn"] == ["L1", "L2"]
assert themes_by_name["Create"] == ["C1"]
def test_suggested_prompts_no_understanding(
mocker: pytest_mock.MockerFixture,
test_user_id: str,
) -> None:
"""User with no understanding gets empty list."""
"""User with no understanding gets empty themes list."""
_mock_get_business_understanding(mocker, return_value=None)
response = client.get("/suggested-prompts")
assert response.status_code == 200
assert response.json() == {"prompts": []}
assert response.json() == {"themes": []}
def test_suggested_prompts_empty_prompts(
mocker: pytest_mock.MockerFixture,
test_user_id: str,
) -> None:
"""User with understanding but no prompts gets empty list."""
"""User with understanding but empty prompts gets empty themes list."""
mock_understanding = MagicMock()
mock_understanding.suggested_prompts = []
mock_understanding.suggested_prompts = {}
_mock_get_business_understanding(mocker, return_value=mock_understanding)
response = client.get("/suggested-prompts")
assert response.status_code == 200
assert response.json() == {"prompts": []}
assert response.json() == {"themes": []}

View File

@@ -40,6 +40,9 @@ _MAX_PAGES = 100
# LLM extraction timeout (seconds)
_LLM_TIMEOUT = 30
SUGGESTION_THEMES = ["Learn", "Create", "Automate", "Organize"]
PROMPTS_PER_THEME = 5
def mask_email(email: str) -> str:
"""Mask an email for safe logging: 'alice@example.com' -> 'a***e@example.com'."""
@@ -331,9 +334,11 @@ Fields:
- current_software (list of strings): software/tools currently used
- existing_automation (list of strings): existing automations
- additional_notes (string): any additional context
- suggested_prompts (list of 5 strings): short action prompts (each under 20 words) that would help \
this person get started with automating their work. Should be specific to their industry, role, and \
pain points; actionable and conversational in tone; focused on automation opportunities.
- suggested_prompts (object with keys "Learn", "Create", "Automate", "Organize"): for each key, \
provide a list of 5 short action prompts (each under 20 words) that would help this person. \
"Learn" = questions about AutoGPT features; "Create" = content/document generation tasks; \
"Automate" = recurring workflow automation ideas; "Organize" = structuring/prioritizing tasks. \
Should be specific to their industry, role, and pain points; actionable and conversational in tone.
Form data:
"""
@@ -381,23 +386,26 @@ async def extract_business_understanding_from_tally(
# Filter out null values before constructing
cleaned = {k: v for k, v in data.items() if v is not None}
# Validate suggested_prompts: filter >20 words, keep top 3
raw_prompts = cleaned.get("suggested_prompts", [])
if isinstance(raw_prompts, list):
valid = [
p.strip()
for p in raw_prompts
if isinstance(p, str) and len(p.strip().split()) <= 20
]
# This will keep up to 3 suggestions
short_prompts = valid[:3] if valid else None
if short_prompts:
cleaned["suggested_prompts"] = short_prompts
# Validate suggested_prompts: themed dict, filter >20 words, cap at 5 per theme
raw_prompts = cleaned.get("suggested_prompts", {})
if isinstance(raw_prompts, dict):
themed: dict[str, list[str]] = {}
for theme in SUGGESTION_THEMES:
theme_prompts = raw_prompts.get(theme, [])
if not isinstance(theme_prompts, list):
continue
valid = [
s
for p in theme_prompts
if isinstance(p, str) and (s := p.strip()) and len(s.split()) <= 20
]
if valid:
themed[theme] = valid[:PROMPTS_PER_THEME]
if themed:
cleaned["suggested_prompts"] = themed
else:
# We dont want to add a None value suggested_prompts field
cleaned.pop("suggested_prompts", None)
else:
# suggested_prompts must be a list - removing it as its not here
cleaned.pop("suggested_prompts", None)
return BusinessUnderstandingInput(**cleaned)

View File

@@ -284,7 +284,7 @@ async def test_populate_understanding_full_flow():
],
}
mock_input = MagicMock()
mock_input.suggested_prompts = ["Prompt 1", "Prompt 2", "Prompt 3"]
mock_input.suggested_prompts = {"Learn": ["P1"], "Create": ["P2"]}
with (
patch(
@@ -398,22 +398,25 @@ def test_extraction_prompt_no_format_placeholders():
@pytest.mark.asyncio
async def test_extract_business_understanding_from_tally_success():
"""Happy path: LLM returns valid JSON that maps to BusinessUnderstandingInput."""
async def test_extract_business_understanding_themed_prompts():
"""Happy path: LLM returns themed prompts as dict."""
mock_choice = MagicMock()
mock_choice.message.content = json.dumps(
{
"user_name": "Alice",
"business_name": "Acme Corp",
"industry": "Technology",
"pain_points": ["manual reporting"],
"suggested_prompts": [
"Automate weekly reports",
"Set up invoice processing",
"Create a customer onboarding flow",
"Track project deadlines automatically",
"Send follow-up emails after meetings",
],
"suggested_prompts": {
"Learn": ["Learn 1", "Learn 2", "Learn 3", "Learn 4", "Learn 5"],
"Create": [
"Create 1",
"Create 2",
"Create 3",
"Create 4",
"Create 5",
],
"Automate": ["Auto 1", "Auto 2", "Auto 3", "Auto 4", "Auto 5"],
"Organize": ["Org 1", "Org 2", "Org 3", "Org 4", "Org 5"],
},
}
)
mock_response = MagicMock()
@@ -426,33 +429,24 @@ async def test_extract_business_understanding_from_tally_success():
result = await extract_business_understanding_from_tally("Q: Name?\nA: Alice")
assert result.user_name == "Alice"
assert result.business_name == "Acme Corp"
assert result.industry == "Technology"
assert result.pain_points == ["manual reporting"]
# suggested_prompts validated and sliced to top 3
assert result.suggested_prompts == [
"Automate weekly reports",
"Set up invoice processing",
"Create a customer onboarding flow",
]
assert result.suggested_prompts is not None
assert len(result.suggested_prompts) == 4
assert len(result.suggested_prompts["Learn"]) == 5
@pytest.mark.asyncio
async def test_extract_business_understanding_from_tally_filters_long_prompts():
"""Prompts exceeding 20 words are excluded and only top 3 are kept."""
async def test_extract_themed_prompts_filters_long_and_unknown_keys():
"""Long prompts are filtered, unknown keys are dropped, each theme capped at 5."""
long_prompt = " ".join(["word"] * 21)
mock_choice = MagicMock()
mock_choice.message.content = json.dumps(
{
"user_name": "Alice",
"suggested_prompts": [
long_prompt,
"Short prompt one",
long_prompt,
"Short prompt two",
"Short prompt three",
"Short prompt four",
],
"suggested_prompts": {
"Learn": [long_prompt, "Valid learn 1", "Valid learn 2"],
"UnknownTheme": ["Should be dropped"],
"Automate": ["A1", "A2", "A3", "A4", "A5", "A6"],
},
}
)
mock_response = MagicMock()
@@ -464,11 +458,13 @@ async def test_extract_business_understanding_from_tally_filters_long_prompts():
with patch("backend.data.tally.AsyncOpenAI", return_value=mock_client):
result = await extract_business_understanding_from_tally("Q: Name?\nA: Alice")
assert result.suggested_prompts == [
"Short prompt one",
"Short prompt two",
"Short prompt three",
]
assert result.suggested_prompts is not None
# Unknown key dropped
assert "UnknownTheme" not in result.suggested_prompts
# Long prompt filtered
assert result.suggested_prompts["Learn"] == ["Valid learn 1", "Valid learn 2"]
# Capped at 5
assert result.suggested_prompts["Automate"] == ["A1", "A2", "A3", "A4", "A5"]
@pytest.mark.asyncio

View File

@@ -31,6 +31,25 @@ def _json_to_list(value: Any) -> list[str]:
return []
def _json_to_themed_prompts(value: Any) -> dict[str, list[str]]:
"""Convert Json field to themed prompts dict.
Handles both the new ``dict[str, list[str]]`` format and the legacy
``list[str]`` format. Legacy rows are placed under a ``"General"`` key so
existing personalised prompts remain readable until a backfill regenerates
them into the proper themed shape.
"""
if isinstance(value, dict):
return {
k: [i for i in v if isinstance(i, str)]
for k, v in value.items()
if isinstance(k, str) and isinstance(v, list)
}
if isinstance(value, list) and value:
return {"General": [str(p) for p in value if isinstance(p, str)]}
return {}
class BusinessUnderstandingInput(pydantic.BaseModel):
"""Input model for updating business understanding - all fields optional for incremental updates."""
@@ -87,8 +106,8 @@ class BusinessUnderstandingInput(pydantic.BaseModel):
)
# Suggested prompts (UI-only, not included in system prompt)
suggested_prompts: Optional[list[str]] = pydantic.Field(
None, description="LLM-generated suggested prompts based on business context"
suggested_prompts: Optional[dict[str, list[str]]] = pydantic.Field(
None, description="LLM-generated suggested prompts grouped by theme"
)
@@ -128,7 +147,7 @@ class BusinessUnderstanding(pydantic.BaseModel):
additional_notes: Optional[str] = None
# Suggested prompts (UI-only, not included in system prompt)
suggested_prompts: list[str] = pydantic.Field(default_factory=list)
suggested_prompts: dict[str, list[str]] = pydantic.Field(default_factory=dict)
@classmethod
def from_db(cls, db_record: CoPilotUnderstanding) -> "BusinessUnderstanding":
@@ -157,7 +176,7 @@ class BusinessUnderstanding(pydantic.BaseModel):
current_software=_json_to_list(business.get("current_software")),
existing_automation=_json_to_list(business.get("existing_automation")),
additional_notes=business.get("additional_notes"),
suggested_prompts=_json_to_list(data.get("suggested_prompts")),
suggested_prompts=_json_to_themed_prompts(data.get("suggested_prompts")),
)

View File

@@ -2,10 +2,12 @@
from datetime import datetime, timezone
from typing import Any
from unittest.mock import MagicMock
from backend.data.understanding import (
BusinessUnderstanding,
BusinessUnderstandingInput,
_json_to_themed_prompts,
format_understanding_for_prompt,
merge_business_understanding_data,
)
@@ -16,72 +18,113 @@ def _make_input(**kwargs: Any) -> BusinessUnderstandingInput:
return BusinessUnderstandingInput.model_validate(kwargs)
# ─── merge_business_understanding_data: suggested_prompts ─────────────
# ─── merge_business_understanding_data: themed prompts ─────────────────
def test_merge_suggested_prompts_overwrites_existing():
"""New suggested_prompts should fully replace existing ones (not append)."""
def test_merge_themed_prompts_overwrites_existing():
"""New themed prompts should fully replace existing ones (not merge)."""
existing = {
"name": "Alice",
"business": {"industry": "Tech", "version": 1},
"suggested_prompts": ["Old prompt 1", "Old prompt 2"],
"suggested_prompts": {
"Learn": ["Old learn prompt"],
"Create": ["Old create prompt"],
},
}
input_data = _make_input(
suggested_prompts=["New prompt A", "New prompt B", "New prompt C"],
)
new_prompts = {
"Automate": ["Schedule daily reports", "Set up email alerts"],
"Organize": ["Sort inbox by priority"],
}
input_data = _make_input(suggested_prompts=new_prompts)
result = merge_business_understanding_data(existing, input_data)
assert result["suggested_prompts"] == [
"New prompt A",
"New prompt B",
"New prompt C",
]
assert result["suggested_prompts"] == new_prompts
def test_merge_suggested_prompts_none_preserves_existing():
"""When input has suggested_prompts=None, existing prompts are preserved."""
def test_merge_themed_prompts_none_preserves_existing():
"""When input has suggested_prompts=None, existing themed prompts are preserved."""
existing_prompts = {
"Learn": ["How to automate?"],
"Create": ["Build a chatbot"],
}
existing = {
"name": "Alice",
"business": {"industry": "Tech", "version": 1},
"suggested_prompts": ["Keep me"],
"suggested_prompts": existing_prompts,
}
input_data = _make_input(industry="Finance")
result = merge_business_understanding_data(existing, input_data)
assert result["suggested_prompts"] == ["Keep me"]
assert result["suggested_prompts"] == existing_prompts
assert result["business"]["industry"] == "Finance"
def test_merge_suggested_prompts_added_to_empty_data():
"""Suggested prompts are set at top level even when starting from empty data."""
existing: dict[str, Any] = {}
input_data = _make_input(suggested_prompts=["Prompt 1"])
result = merge_business_understanding_data(existing, input_data)
assert result["suggested_prompts"] == ["Prompt 1"]
# ─── from_db: themed prompts deserialization ───────────────────────────
def test_merge_suggested_prompts_empty_list_overwrites():
"""An explicit empty list should overwrite existing prompts."""
existing: dict[str, Any] = {
"suggested_prompts": ["Old prompt"],
"business": {"version": 1},
def test_from_db_themed_prompts():
"""from_db correctly deserializes a themed dict for suggested_prompts."""
themed = {
"Learn": ["What can I automate?"],
"Create": ["Build a workflow"],
}
db_record = MagicMock()
db_record.id = "test-id"
db_record.userId = "user-1"
db_record.createdAt = datetime.now(tz=timezone.utc)
db_record.updatedAt = datetime.now(tz=timezone.utc)
db_record.data = {
"name": "Alice",
"business": {"industry": "Tech", "version": 1},
"suggested_prompts": themed,
}
input_data = _make_input(suggested_prompts=[])
result = merge_business_understanding_data(existing, input_data)
result = BusinessUnderstanding.from_db(db_record)
assert result["suggested_prompts"] == []
assert result.suggested_prompts == themed
# ─── format_understanding_for_prompt: excludes suggested_prompts ──────
def test_from_db_legacy_list_prompts_preserved_under_general():
"""from_db preserves legacy list[str] prompts under a 'General' key."""
db_record = MagicMock()
db_record.id = "test-id"
db_record.userId = "user-1"
db_record.createdAt = datetime.now(tz=timezone.utc)
db_record.updatedAt = datetime.now(tz=timezone.utc)
db_record.data = {
"name": "Alice",
"business": {"industry": "Tech", "version": 1},
"suggested_prompts": ["Old prompt 1", "Old prompt 2"],
}
result = BusinessUnderstanding.from_db(db_record)
assert result.suggested_prompts == {"General": ["Old prompt 1", "Old prompt 2"]}
def test_format_understanding_excludes_suggested_prompts():
"""suggested_prompts is UI-only and must NOT appear in the system prompt."""
# ─── _json_to_themed_prompts helper ───────────────────────────────────
def test_json_to_themed_prompts_with_dict():
value = {"Learn": ["a", "b"], "Create": ["c"]}
assert _json_to_themed_prompts(value) == {"Learn": ["a", "b"], "Create": ["c"]}
def test_json_to_themed_prompts_with_list_returns_general():
assert _json_to_themed_prompts(["a", "b"]) == {"General": ["a", "b"]}
def test_json_to_themed_prompts_with_none_returns_empty():
assert _json_to_themed_prompts(None) == {}
# ─── format_understanding_for_prompt: excludes themed prompts ──────────
def test_format_understanding_excludes_themed_prompts():
"""Themed suggested_prompts are UI-only and must NOT appear in the system prompt."""
understanding = BusinessUnderstanding(
id="test-id",
user_id="user-1",
@@ -89,7 +132,10 @@ def test_format_understanding_excludes_suggested_prompts():
updated_at=datetime.now(tz=timezone.utc),
user_name="Alice",
industry="Technology",
suggested_prompts=["Automate reports", "Set up alerts", "Track KPIs"],
suggested_prompts={
"Learn": ["Automate reports"],
"Create": ["Set up alerts", "Track KPIs"],
},
)
formatted = format_understanding_for_prompt(understanding)

View File

@@ -2,18 +2,17 @@
import { useGetV2GetSuggestedPrompts } from "@/app/api/__generated__/endpoints/chat/chat";
import { ChatInput } from "@/app/(platform)/copilot/components/ChatInput/ChatInput";
import { Button } from "@/components/atoms/Button/Button";
import { Skeleton } from "@/components/atoms/Skeleton/Skeleton";
import { Text } from "@/components/atoms/Text/Text";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { SpinnerGapIcon } from "@phosphor-icons/react";
import { motion } from "framer-motion";
import { useEffect, useState } from "react";
import {
getGreetingName,
getInputPlaceholder,
getQuickActions,
getSuggestionThemes,
} from "./helpers";
import { SuggestionThemes } from "./components/SuggestionThemes/SuggestionThemes";
interface Props {
inputLayoutId: string;
@@ -40,19 +39,16 @@ export function EmptySession({
useGetV2GetSuggestedPrompts({
query: { staleTime: Infinity },
});
const customPrompts =
const themes = getSuggestionThemes(
suggestedPromptsResponse?.status === 200
? suggestedPromptsResponse.data.prompts
: undefined;
const quickActions = getQuickActions(customPrompts);
const [loadingAction, setLoadingAction] = useState<string | null>(null);
? suggestedPromptsResponse.data.themes
: undefined,
);
const [inputPlaceholder, setInputPlaceholder] = useState(
getInputPlaceholder(),
);
// Use matchMedia instead of resize event — fires only when crossing
// the 500px and 1081px breakpoints defined in getInputPlaceholder(),
// rather than dozens of times per second during a window drag.
useEffect(() => {
function update() {
setInputPlaceholder(getInputPlaceholder(window.innerWidth));
@@ -68,17 +64,6 @@ export function EmptySession({
};
}, []);
async function handleQuickActionClick(action: string) {
if (isCreatingSession || loadingAction) return;
setLoadingAction(action);
try {
await onSend(action);
} finally {
setLoadingAction(null);
}
}
return (
<div className="flex h-full flex-1 items-center justify-center overflow-y-auto bg-[#f8f8f9] px-0 py-5 md:px-6 md:py-10">
<motion.div
@@ -115,34 +100,19 @@ export function EmptySession({
</div>
</div>
<div className="flex flex-wrap items-center justify-center gap-3 overflow-x-auto [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
{isLoadingPrompts
? Array.from({ length: 3 }, (_, i) => (
<Skeleton key={i} className="h-10 w-64 shrink-0 rounded-full" />
))
: quickActions.map((action) => (
<Button
key={action}
type="button"
variant="outline"
size="small"
onClick={() => void handleQuickActionClick(action)}
disabled={isCreatingSession || loadingAction !== null}
aria-busy={loadingAction === action}
leftIcon={
loadingAction === action ? (
<SpinnerGapIcon
className="h-4 w-4 animate-spin"
weight="bold"
/>
) : null
}
className="h-auto shrink-0 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
>
{action}
</Button>
))}
</div>
{isLoadingPrompts ? (
<div className="flex flex-wrap items-center justify-center gap-3">
{Array.from({ length: 4 }, (_, i) => (
<Skeleton key={i} className="h-10 w-28 shrink-0 rounded-full" />
))}
</div>
) : (
<SuggestionThemes
themes={themes}
onSend={onSend}
disabled={isCreatingSession}
/>
)}
</motion.div>
</div>
);

View File

@@ -0,0 +1,100 @@
"use client";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/molecules/Popover/Popover";
import { Button } from "@/components/atoms/Button/Button";
import {
BookOpenIcon,
PaintBrushIcon,
LightningIcon,
ListChecksIcon,
SpinnerGapIcon,
} from "@phosphor-icons/react";
import { useState } from "react";
import type { SuggestionTheme } from "../../helpers";
const THEME_ICONS: Record<string, typeof BookOpenIcon> = {
Learn: BookOpenIcon,
Create: PaintBrushIcon,
Automate: LightningIcon,
Organize: ListChecksIcon,
};
interface Props {
themes: SuggestionTheme[];
onSend: (prompt: string) => void | Promise<void>;
disabled?: boolean;
}
export function SuggestionThemes({ themes, onSend, disabled }: Props) {
const [openTheme, setOpenTheme] = useState<string | null>(null);
const [loadingPrompt, setLoadingPrompt] = useState<string | null>(null);
async function handlePromptClick(theme: string, prompt: string) {
if (disabled || loadingPrompt) return;
setLoadingPrompt(`${theme}:${prompt}`);
try {
await onSend(prompt);
} finally {
setLoadingPrompt(null);
setOpenTheme(null);
}
}
return (
<div className="flex flex-wrap items-center justify-center gap-3">
{themes.map((theme) => {
const Icon = THEME_ICONS[theme.name];
return (
<Popover
key={theme.name}
open={openTheme === theme.name}
onOpenChange={(open) => setOpenTheme(open ? theme.name : null)}
>
<PopoverTrigger asChild>
<Button
type="button"
variant="outline"
size="small"
disabled={disabled || loadingPrompt !== null}
className="shrink-0 gap-2 border-zinc-300 px-3 py-2 text-[.9rem] text-zinc-600"
>
{Icon && <Icon size={16} weight="regular" />}
{theme.name}
</Button>
</PopoverTrigger>
<PopoverContent align="center" className="w-80 p-2">
<ul className="grid gap-0.5">
{theme.prompts.map((prompt) => (
<li key={prompt}>
<button
type="button"
disabled={loadingPrompt !== null}
onClick={() => void handlePromptClick(theme.name, prompt)}
className="w-full rounded-md px-3 py-2 text-left text-sm text-zinc-700 transition-colors hover:bg-zinc-100 disabled:opacity-50"
>
{loadingPrompt === `${theme.name}:${prompt}` ? (
<span className="flex items-center gap-2">
<SpinnerGapIcon
className="h-4 w-4 animate-spin"
weight="bold"
/>
{prompt}
</span>
) : (
prompt
)}
</button>
</li>
))}
</ul>
</PopoverContent>
</Popover>
);
})}
</div>
);
}

View File

@@ -12,17 +12,87 @@ export function getInputPlaceholder(width?: number) {
return "What's your role and what eats up most of your day? e.g. 'I'm a recruiter and I hate...'";
}
const DEFAULT_QUICK_ACTIONS = [
"I don't know where to start, just ask me stuff",
"I do the same thing every week and it's killing me",
"Help me find where I'm wasting my time",
export interface SuggestionTheme {
name: string;
prompts: string[];
}
export const DEFAULT_THEMES: SuggestionTheme[] = [
{
name: "Learn",
prompts: [
"What can AutoGPT do for me?",
"Show me how agents work",
"What integrations are available?",
"How do I schedule an agent?",
"What are the most popular agents?",
],
},
{
name: "Create",
prompts: [
"Draft a weekly status report",
"Generate social media posts for my business",
"Create a competitive analysis summary",
"Write onboarding emails for new hires",
"Build a content calendar for next month",
],
},
{
name: "Automate",
prompts: [
"Monitor relevant websites for changes",
"Send me a daily news digest on my industry",
"Auto-reply to common customer questions",
"Track price changes on products I sell",
"Summarize my emails every morning",
],
},
{
name: "Organize",
prompts: [
"Sort my bookmarks into categories",
"Create a project timeline from my notes",
"Prioritize my task list by urgency",
"Build a decision matrix for vendor selection",
"Organize my meeting notes into action items",
],
},
];
export function getQuickActions(customPrompts?: string[]) {
if (customPrompts && customPrompts.length > 0) {
return customPrompts;
export function getSuggestionThemes(
apiThemes?: SuggestionTheme[],
): SuggestionTheme[] {
if (!apiThemes?.length) {
return DEFAULT_THEMES;
}
return DEFAULT_QUICK_ACTIONS;
const promptsByTheme = new Map(
apiThemes.map((theme) => [theme.name, theme.prompts] as const),
);
// Legacy users have prompts under "General" — distribute them across themes
const generalPrompts = (promptsByTheme.get("General") ?? []).filter(
(p) => p.trim().length > 0,
);
return DEFAULT_THEMES.map((theme, idx) => {
const personalized = (promptsByTheme.get(theme.name) ?? []).filter(
(p) => p.trim().length > 0,
);
// Spread legacy "General" prompts round-robin across themes
const legacySlice = generalPrompts.filter(
(_, i) => i % DEFAULT_THEMES.length === idx,
);
return {
name: theme.name,
prompts: Array.from(
new Set([...personalized, ...legacySlice, ...theme.prompts]),
).slice(0, theme.prompts.length),
};
});
}
export function getGreetingName(user?: User | null) {

View File

@@ -0,0 +1,15 @@
/**
* Generated by orval v7.13.0 🍺
* Do not edit manually.
* AutoGPT Agent Server
* This server is used to execute agents that are created by the AutoGPT system.
* OpenAPI spec version: 0.1
*/
import type { SuggestedTheme } from "./suggestedTheme";
/**
* Response model for user-specific suggested prompts grouped by theme.
*/
export interface SuggestedPromptsResponse {
themes: SuggestedTheme[];
}

View File

@@ -1396,7 +1396,7 @@
"get": {
"tags": ["v2", "chat", "chat"],
"summary": "Get Suggested Prompts",
"description": "Get LLM-generated suggested prompts for the authenticated user.\n\nReturns personalized quick-action prompts based on the user's\nbusiness understanding. Returns an empty list if no custom prompts\nare available.",
"description": "Get LLM-generated suggested prompts grouped by theme.\n\nReturns personalized quick-action prompts based on the user's\nbusiness understanding. Returns empty themes list if no custom\nprompts are available.",
"operationId": "getV2GetSuggestedPrompts",
"responses": {
"200": {
@@ -13126,6 +13126,20 @@
},
"SuggestedPromptsResponse": {
"properties": {
"themes": {
"items": { "$ref": "#/components/schemas/SuggestedTheme" },
"type": "array",
"title": "Themes"
}
},
"type": "object",
"required": ["themes"],
"title": "SuggestedPromptsResponse",
"description": "Response model for user-specific suggested prompts grouped by theme."
},
"SuggestedTheme": {
"properties": {
"name": { "type": "string", "title": "Name" },
"prompts": {
"items": { "type": "string" },
"type": "array",
@@ -13133,9 +13147,9 @@
}
},
"type": "object",
"required": ["prompts"],
"title": "SuggestedPromptsResponse",
"description": "Response model for user-specific suggested prompts."
"required": ["name", "prompts"],
"title": "SuggestedTheme",
"description": "A themed group of suggested prompts."
},
"SuggestionsResponse": {
"properties": {