mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
## Summary Reduce CoPilot per-turn token overhead by systematically trimming tool descriptions, parameter schemas, and system prompt content. All 35 MCP tool schemas are passed on every SDK call — this PR reduces their size. ### Strategy 1. **Tool descriptions**: Trimmed verbose multi-sentence explanations to concise single-sentence summaries while preserving meaning 2. **Parameter schemas**: Shortened parameter descriptions to essential info, removed some `default` values (handled in code) 3. **System prompt**: Condensed `_SHARED_TOOL_NOTES` and storage supplement template in `prompting.py` 4. **Cross-tool references**: Removed duplicate workflow hints (e.g. "call find_block before run_block" appeared in BOTH tools — kept only in the dependent tool). Critical cross-tool references retained (e.g. `continue_run_block` in `run_block`, `fix_agent_graph` in `validate_agent`, `get_doc_page` in `search_docs`, `web_fetch` preference in `browser_navigate`) ### Token Impact | Metric | Before | After | Reduction | |--------|--------|-------|-----------| | System Prompt | ~865 tokens | ~497 tokens | 43% | | Tool Schemas | ~9,744 tokens | ~6,470 tokens | 34% | | **Grand Total** | **~10,609 tokens** | **~6,967 tokens** | **34%** | Saves **~3,642 tokens per conversation turn**. ### Key Decisions - **Mostly description changes**: Tool logic, parameters, and types unchanged. However, some schema-level `default` fields were removed (e.g. `save` in `customize_agent`) — these are machine-readable metadata, not just prose, and may affect LLM behavior. - **Quality preserved**: All descriptions still convey what the tool does and essential usage patterns - **Cross-references trimmed carefully**: Kept prerequisite hints in the dependent tool (run_block mentions find_block) but removed the reverse (find_block no longer mentions run_block). Critical cross-tool guidance retained where removal would degrade model behavior. - **`run_time` description fixed**: Added missing supported values (today, last 30 days, ISO datetime) per review feedback ### Future Optimization The SDK passes all 35 tools on every call. The MCP protocol's `list_tools()` handler supports dynamic tool registration — a follow-up PR could implement lazy tool loading (register core tools + a discovery meta-tool) to further reduce per-turn token cost. ### Changes - Trimmed descriptions across 25 tool files - Condensed `_SHARED_TOOL_NOTES` and `_build_storage_supplement` in `prompting.py` - Fixed `run_time` schema description in `agent_output.py` ### Checklist #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: - [x] All 273 copilot tests pass locally - [x] All 35 tools load and produce valid schemas - [x] Before/after token dumps compared - [x] Formatting passes (`poetry run format`) - [x] CI green
144 lines
4.2 KiB
Python
144 lines
4.2 KiB
Python
"""Web fetch tool — safely retrieve public web page content."""
|
|
|
|
import logging
|
|
from typing import Any
|
|
|
|
import aiohttp
|
|
import html2text
|
|
|
|
from backend.copilot.model import ChatSession
|
|
from backend.util.request import Requests
|
|
|
|
from .base import BaseTool
|
|
from .models import ErrorResponse, ToolResponseBase, WebFetchResponse
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Limits
|
|
_MAX_CONTENT_BYTES = 102_400 # 100 KB download cap
|
|
_REQUEST_TIMEOUT = aiohttp.ClientTimeout(total=15)
|
|
|
|
# Content types we'll read as text
|
|
_TEXT_CONTENT_TYPES = {
|
|
"text/html",
|
|
"text/plain",
|
|
"text/xml",
|
|
"text/csv",
|
|
"text/markdown",
|
|
"application/json",
|
|
"application/xml",
|
|
"application/xhtml+xml",
|
|
"application/rss+xml",
|
|
"application/atom+xml",
|
|
# RFC 7807 — JSON problem details; used by many REST APIs for error responses
|
|
"application/problem+json",
|
|
"application/problem+xml",
|
|
"application/ld+json",
|
|
}
|
|
|
|
|
|
def _is_text_content(content_type: str) -> bool:
|
|
base = content_type.split(";")[0].strip().lower()
|
|
return base in _TEXT_CONTENT_TYPES or base.startswith("text/")
|
|
|
|
|
|
def _html_to_text(html: str) -> str:
|
|
h = html2text.HTML2Text()
|
|
h.ignore_links = False
|
|
h.ignore_images = True
|
|
h.body_width = 0
|
|
return h.handle(html)
|
|
|
|
|
|
class WebFetchTool(BaseTool):
|
|
"""Safely fetch content from a public URL using SSRF-protected HTTP."""
|
|
|
|
@property
|
|
def name(self) -> str:
|
|
return "web_fetch"
|
|
|
|
@property
|
|
def description(self) -> str:
|
|
return "Fetch a public web page. Public URLs only — internal addresses blocked. Returns readable text from HTML by default."
|
|
|
|
@property
|
|
def parameters(self) -> dict[str, Any]:
|
|
return {
|
|
"type": "object",
|
|
"properties": {
|
|
"url": {
|
|
"type": "string",
|
|
"description": "Public HTTP/HTTPS URL.",
|
|
},
|
|
"extract_text": {
|
|
"type": "boolean",
|
|
"description": "Extract text from HTML (default: true).",
|
|
"default": True,
|
|
},
|
|
},
|
|
"required": ["url"],
|
|
}
|
|
|
|
@property
|
|
def requires_auth(self) -> bool:
|
|
return False
|
|
|
|
async def _execute(
|
|
self,
|
|
user_id: str | None,
|
|
session: ChatSession,
|
|
**kwargs: Any,
|
|
) -> ToolResponseBase:
|
|
url: str = (kwargs.get("url") or "").strip()
|
|
extract_text: bool = kwargs.get("extract_text", True)
|
|
session_id = session.session_id if session else None
|
|
|
|
if not url:
|
|
return ErrorResponse(
|
|
message="Please provide a URL to fetch.",
|
|
error="missing_url",
|
|
session_id=session_id,
|
|
)
|
|
|
|
try:
|
|
client = Requests(raise_for_status=False, retry_max_attempts=1)
|
|
response = await client.get(url, timeout=_REQUEST_TIMEOUT)
|
|
except ValueError as e:
|
|
# validate_url raises ValueError for SSRF / blocked IPs
|
|
return ErrorResponse(
|
|
message=f"URL blocked: {e}",
|
|
error="url_blocked",
|
|
session_id=session_id,
|
|
)
|
|
except Exception as e:
|
|
logger.warning(f"[web_fetch] Request failed for {url}: {e}")
|
|
return ErrorResponse(
|
|
message=f"Failed to fetch URL: {e}",
|
|
error="fetch_failed",
|
|
session_id=session_id,
|
|
)
|
|
|
|
content_type = response.headers.get("content-type", "")
|
|
if not _is_text_content(content_type):
|
|
return ErrorResponse(
|
|
message=f"Non-text content type: {content_type.split(';')[0]}",
|
|
error="unsupported_content_type",
|
|
session_id=session_id,
|
|
)
|
|
|
|
raw = response.content[:_MAX_CONTENT_BYTES]
|
|
text = raw.decode("utf-8", errors="replace")
|
|
|
|
if extract_text and "html" in content_type.lower():
|
|
text = _html_to_text(text)
|
|
|
|
return WebFetchResponse(
|
|
message=f"Fetched {url}",
|
|
url=response.url,
|
|
status_code=response.status,
|
|
content_type=content_type.split(";")[0].strip(),
|
|
content=text,
|
|
truncated=False,
|
|
session_id=session_id,
|
|
)
|