mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Uncouple Copilot task execution from the REST API server. This should help performance and scalability, and allows task execution to continue regardless of the state of the user's connection. - Resolves #12023 ### Changes 🏗️ - Add `backend.copilot.executor`->`CoPilotExecutor` (setup similar to `backend.executor`->`ExecutionManager`). This executor service uses RabbitMQ-based task distribution, and sticks with the existing Redis Streams setup for task output. It uses a cluster lock mechanism to ensure a task is only executed by one pod, and the `DatabaseManager` for pooled DB access. - Add `backend.data.db_accessors` for automatic choice of direct/proxied DB access Chat requests now flow: API → RabbitMQ → CoPilot Executor → Redis Streams → SSE Client. This enables horizontal scaling of chat processing and isolates long-running LLM operations from the API service. - Move non-API Copilot stuff into `backend.copilot` (from `backend.api.features.chat`) - Updated import paths for all usages - Move `backend.executor.database` to `backend.data.db_manager` and add methods for copilot executor - Updated import paths for all usages - Make `backend.copilot.db` RPC-compatible (-> DB ops return ~~Prisma~~ Pydantic models) - Make `backend.data.workspace` RPC-compatible - Make `backend.data.graphs.get_store_listed_graphs` RPC-compatible DX: - Add `copilot_executor` service to Docker setup Config: - Add `Config.num_copilot_workers` (default 5) and `Config.copilot_executor_port` (default 8008) - Remove unused `Config.agent_server_port` > [!WARNING] > **This change adds a new microservice to the system, with entrypoint `backend.copilot.executor`.** > The `docker compose` setup has been updated, but if you run the Platform on something else, you'll have to update your deployment config to include this new service. > > When running locally, the `CoPilotExecutor` uses port 8008 by default. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] Copilot works - [x] Processes messages when triggered - [x] Can use its tools #### For configuration changes: - [x] `.env.default` is updated or already compatible with my changes - [x] `docker-compose.yml` is updated or already compatible with my changes - [x] I have included a list of my configuration changes in the PR description (under **Changes**) --------- Co-authored-by: Zamil Majdy <zamil.majdy@agpt.co>
120 lines
4.4 KiB
Python
120 lines
4.4 KiB
Python
"""Tool for capturing user business understanding incrementally."""
|
|
|
|
import logging
|
|
from typing import Any
|
|
|
|
from backend.copilot.model import ChatSession
|
|
from backend.data.db_accessors import understanding_db
|
|
from backend.data.understanding import BusinessUnderstandingInput
|
|
|
|
from .base import BaseTool
|
|
from .models import ErrorResponse, ToolResponseBase, UnderstandingUpdatedResponse
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class AddUnderstandingTool(BaseTool):
|
|
"""Tool for capturing user's business understanding incrementally."""
|
|
|
|
@property
|
|
def name(self) -> str:
|
|
return "add_understanding"
|
|
|
|
@property
|
|
def description(self) -> str:
|
|
return """Capture and store information about the user's business context,
|
|
workflows, pain points, and automation goals. Call this tool whenever the user
|
|
shares information about their business. Each call incrementally adds to the
|
|
existing understanding - you don't need to provide all fields at once.
|
|
|
|
Use this to build a comprehensive profile that helps recommend better agents
|
|
and automations for the user's specific needs."""
|
|
|
|
@property
|
|
def parameters(self) -> dict[str, Any]:
|
|
# Auto-generate from Pydantic model schema
|
|
schema = BusinessUnderstandingInput.model_json_schema()
|
|
properties = {}
|
|
for field_name, field_schema in schema.get("properties", {}).items():
|
|
prop: dict[str, Any] = {"description": field_schema.get("description", "")}
|
|
# Handle anyOf for Optional types
|
|
if "anyOf" in field_schema:
|
|
for option in field_schema["anyOf"]:
|
|
if option.get("type") != "null":
|
|
prop["type"] = option.get("type", "string")
|
|
if "items" in option:
|
|
prop["items"] = option["items"]
|
|
break
|
|
else:
|
|
prop["type"] = field_schema.get("type", "string")
|
|
if "items" in field_schema:
|
|
prop["items"] = field_schema["items"]
|
|
properties[field_name] = prop
|
|
return {"type": "object", "properties": properties, "required": []}
|
|
|
|
@property
|
|
def requires_auth(self) -> bool:
|
|
"""Requires authentication to store user-specific data."""
|
|
return True
|
|
|
|
async def _execute(
|
|
self,
|
|
user_id: str | None,
|
|
session: ChatSession,
|
|
**kwargs,
|
|
) -> ToolResponseBase:
|
|
"""
|
|
Capture and store business understanding incrementally.
|
|
|
|
Each call merges new data with existing understanding:
|
|
- String fields are overwritten if provided
|
|
- List fields are appended (with deduplication)
|
|
"""
|
|
session_id = session.session_id
|
|
|
|
if not user_id:
|
|
return ErrorResponse(
|
|
message="Authentication required to save business understanding.",
|
|
session_id=session_id,
|
|
)
|
|
|
|
# Check if any data was provided
|
|
if not any(v is not None for v in kwargs.values()):
|
|
return ErrorResponse(
|
|
message="Please provide at least one field to update.",
|
|
session_id=session_id,
|
|
)
|
|
|
|
# Build input model from kwargs (only include fields defined in the model)
|
|
valid_fields = set(BusinessUnderstandingInput.model_fields.keys())
|
|
input_data = BusinessUnderstandingInput(
|
|
**{k: v for k, v in kwargs.items() if k in valid_fields}
|
|
)
|
|
|
|
# Track which fields were updated
|
|
updated_fields = [
|
|
k for k, v in kwargs.items() if k in valid_fields and v is not None
|
|
]
|
|
|
|
# Upsert with merge
|
|
understanding = await understanding_db().upsert_business_understanding(
|
|
user_id, input_data
|
|
)
|
|
|
|
# Build current understanding summary (filter out empty values)
|
|
current_understanding = {
|
|
k: v
|
|
for k, v in understanding.model_dump(
|
|
exclude={"id", "user_id", "created_at", "updated_at"}
|
|
).items()
|
|
if v is not None and v != [] and v != ""
|
|
}
|
|
|
|
return UnderstandingUpdatedResponse(
|
|
message=f"Updated understanding with: {', '.join(updated_fields)}. "
|
|
"I now have a better picture of your business context.",
|
|
session_id=session_id,
|
|
updated_fields=updated_fields,
|
|
current_understanding=current_understanding,
|
|
)
|