mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-05 04:15:08 -05:00
Compare commits
3 Commits
otto/secrt
...
fix/sentry
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d58df37238 | ||
|
|
4f908d5cb3 | ||
|
|
9c41512944 |
@@ -1,123 +0,0 @@
|
||||
"""Save binary block outputs to workspace, return references instead of base64."""
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
import hashlib
|
||||
import logging
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
from backend.util.workspace import WorkspaceManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BINARY_FIELDS = {"png", "jpeg", "pdf"} # Base64 encoded
|
||||
TEXT_FIELDS = {"svg"} # Large text, save raw
|
||||
SAVEABLE_FIELDS = BINARY_FIELDS | TEXT_FIELDS
|
||||
SIZE_THRESHOLD = 1024 # Only process content > 1KB (string length, not decoded size)
|
||||
|
||||
|
||||
async def process_binary_outputs(
|
||||
outputs: dict[str, list[Any]],
|
||||
workspace_manager: WorkspaceManager,
|
||||
block_name: str,
|
||||
) -> dict[str, list[Any]]:
|
||||
"""
|
||||
Replace binary data in block outputs with workspace:// references.
|
||||
|
||||
Deduplicates identical content within a single call (e.g., same PDF
|
||||
appearing in both main_result and results).
|
||||
"""
|
||||
cache: dict[str, str] = {} # content_hash -> workspace_ref
|
||||
|
||||
processed: dict[str, list[Any]] = {}
|
||||
for name, items in outputs.items():
|
||||
processed_items: list[Any] = []
|
||||
for item in items:
|
||||
processed_items.append(
|
||||
await _process_item(item, workspace_manager, block_name, cache)
|
||||
)
|
||||
processed[name] = processed_items
|
||||
return processed
|
||||
|
||||
|
||||
async def _process_item(
|
||||
item: Any, wm: WorkspaceManager, block: str, cache: dict
|
||||
) -> Any:
|
||||
"""Recursively process an item, handling dicts and lists."""
|
||||
if isinstance(item, dict):
|
||||
return await _process_dict(item, wm, block, cache)
|
||||
if isinstance(item, list):
|
||||
processed: list[Any] = []
|
||||
for i in item:
|
||||
processed.append(await _process_item(i, wm, block, cache))
|
||||
return processed
|
||||
return item
|
||||
|
||||
|
||||
async def _process_dict(
|
||||
data: dict, wm: WorkspaceManager, block: str, cache: dict
|
||||
) -> dict:
|
||||
"""Process a dict, saving binary fields and recursing into nested structures."""
|
||||
result: dict[str, Any] = {}
|
||||
|
||||
for key, value in data.items():
|
||||
if (
|
||||
key in SAVEABLE_FIELDS
|
||||
and isinstance(value, str)
|
||||
and len(value) > SIZE_THRESHOLD
|
||||
):
|
||||
content_hash = hashlib.sha256(value.encode()).hexdigest()
|
||||
|
||||
if content_hash in cache:
|
||||
result[key] = cache[content_hash]
|
||||
elif ref := await _save(value, key, wm, block):
|
||||
cache[content_hash] = ref
|
||||
result[key] = ref
|
||||
else:
|
||||
result[key] = value # Save failed, keep original
|
||||
|
||||
elif isinstance(value, dict):
|
||||
result[key] = await _process_dict(value, wm, block, cache)
|
||||
elif isinstance(value, list):
|
||||
processed: list[Any] = []
|
||||
for i in value:
|
||||
processed.append(await _process_item(i, wm, block, cache))
|
||||
result[key] = processed
|
||||
else:
|
||||
result[key] = value
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def _save(value: str, field: str, wm: WorkspaceManager, block: str) -> str | None:
|
||||
"""Save content to workspace, return workspace:// reference or None on failure."""
|
||||
try:
|
||||
if field in BINARY_FIELDS:
|
||||
content = _decode_base64(value)
|
||||
if content is None:
|
||||
return None
|
||||
else:
|
||||
content = value.encode("utf-8")
|
||||
|
||||
ext = {"jpeg": "jpg"}.get(field, field)
|
||||
filename = f"{block.lower().replace(' ', '_')[:20]}_{field}_{uuid.uuid4().hex[:12]}.{ext}"
|
||||
|
||||
file = await wm.write_file(content=content, filename=filename)
|
||||
return f"workspace://{file.id}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save {field} to workspace for block '{block}': {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _decode_base64(value: str) -> bytes | None:
|
||||
"""Decode base64, handling data URI format. Returns None on failure."""
|
||||
try:
|
||||
if value.startswith("data:"):
|
||||
value = value.split(",", 1)[1] if "," in value else value
|
||||
# Normalize padding and use strict validation to prevent corrupted data
|
||||
padded = value + "=" * (-len(value) % 4)
|
||||
return base64.b64decode(padded, validate=True)
|
||||
except (binascii.Error, ValueError):
|
||||
return None
|
||||
@@ -8,16 +8,12 @@ from typing import Any
|
||||
from pydantic_core import PydanticUndefined
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.binary_output_processor import (
|
||||
process_binary_outputs,
|
||||
)
|
||||
from backend.data.block import get_block
|
||||
from backend.data.execution import ExecutionContext
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.workspace import get_or_create_workspace
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.util.exceptions import BlockError
|
||||
from backend.util.workspace import WorkspaceManager
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import (
|
||||
@@ -325,19 +321,11 @@ class RunBlockTool(BaseTool):
|
||||
):
|
||||
outputs[output_name].append(output_data)
|
||||
|
||||
# Save binary outputs to workspace to prevent context bloat
|
||||
workspace_manager = WorkspaceManager(
|
||||
user_id, workspace.id, session.session_id
|
||||
)
|
||||
processed_outputs = await process_binary_outputs(
|
||||
dict(outputs), workspace_manager, block.name
|
||||
)
|
||||
|
||||
return BlockOutputResponse(
|
||||
message=f"Block '{block.name}' executed successfully",
|
||||
block_id=block_id,
|
||||
block_name=block.name,
|
||||
outputs=processed_outputs,
|
||||
outputs=dict(outputs),
|
||||
success=True,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
import base64
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.binary_output_processor import (
|
||||
_decode_base64,
|
||||
process_binary_outputs,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workspace_manager():
|
||||
wm = AsyncMock()
|
||||
wm.write_file = AsyncMock(return_value=MagicMock(id="file-123"))
|
||||
return wm
|
||||
|
||||
|
||||
class TestDecodeBase64:
|
||||
def test_raw_base64(self):
|
||||
assert _decode_base64(base64.b64encode(b"test").decode()) == b"test"
|
||||
|
||||
def test_data_uri(self):
|
||||
encoded = base64.b64encode(b"test").decode()
|
||||
assert _decode_base64(f"data:image/png;base64,{encoded}") == b"test"
|
||||
|
||||
def test_invalid_returns_none(self):
|
||||
assert _decode_base64("not base64!!!") is None
|
||||
|
||||
|
||||
class TestProcessBinaryOutputs:
|
||||
@pytest.mark.asyncio
|
||||
async def test_saves_large_binary(self, workspace_manager):
|
||||
content = base64.b64encode(b"x" * 2000).decode()
|
||||
outputs = {"result": [{"png": content, "text": "ok"}]}
|
||||
|
||||
result = await process_binary_outputs(outputs, workspace_manager, "Test")
|
||||
|
||||
assert result["result"][0]["png"] == "workspace://file-123"
|
||||
assert result["result"][0]["text"] == "ok"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_skips_small_content(self, workspace_manager):
|
||||
content = base64.b64encode(b"tiny").decode()
|
||||
outputs = {"result": [{"png": content}]}
|
||||
|
||||
result = await process_binary_outputs(outputs, workspace_manager, "Test")
|
||||
|
||||
assert result["result"][0]["png"] == content
|
||||
workspace_manager.write_file.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_deduplicates_identical_content(self, workspace_manager):
|
||||
content = base64.b64encode(b"x" * 2000).decode()
|
||||
outputs = {"a": [{"pdf": content}], "b": [{"pdf": content}]}
|
||||
|
||||
result = await process_binary_outputs(outputs, workspace_manager, "Test")
|
||||
|
||||
assert result["a"][0]["pdf"] == result["b"][0]["pdf"] == "workspace://file-123"
|
||||
assert workspace_manager.write_file.call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_failure_preserves_original(self, workspace_manager):
|
||||
workspace_manager.write_file.side_effect = Exception("Storage error")
|
||||
content = base64.b64encode(b"x" * 2000).decode()
|
||||
|
||||
result = await process_binary_outputs(
|
||||
{"r": [{"png": content}]}, workspace_manager, "Test"
|
||||
)
|
||||
|
||||
assert result["r"][0]["png"] == content
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handles_nested_structures(self, workspace_manager):
|
||||
content = base64.b64encode(b"x" * 2000).decode()
|
||||
outputs = {"result": [{"outer": {"inner": {"png": content}}}]}
|
||||
|
||||
result = await process_binary_outputs(outputs, workspace_manager, "Test")
|
||||
|
||||
assert result["result"][0]["outer"]["inner"]["png"] == "workspace://file-123"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handles_lists_in_output(self, workspace_manager):
|
||||
content = base64.b64encode(b"x" * 2000).decode()
|
||||
outputs = {"result": [{"images": [{"png": content}, {"png": content}]}]}
|
||||
|
||||
result = await process_binary_outputs(outputs, workspace_manager, "Test")
|
||||
|
||||
assert result["result"][0]["images"][0]["png"] == "workspace://file-123"
|
||||
assert result["result"][0]["images"][1]["png"] == "workspace://file-123"
|
||||
# Deduplication should still work
|
||||
assert workspace_manager.write_file.call_count == 1
|
||||
@@ -162,8 +162,16 @@ class LinearClient:
|
||||
"searchTerm": team_name,
|
||||
}
|
||||
|
||||
team_id = await self.query(query, variables)
|
||||
return team_id["teams"]["nodes"][0]["id"]
|
||||
result = await self.query(query, variables)
|
||||
nodes = result["teams"]["nodes"]
|
||||
|
||||
if not nodes:
|
||||
raise LinearAPIException(
|
||||
f"Team '{team_name}' not found. Check the team name or key and try again.",
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
return nodes[0]["id"]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
@@ -240,17 +248,44 @@ class LinearClient:
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
async def try_search_issues(self, term: str) -> list[Issue]:
|
||||
async def try_search_issues(
|
||||
self,
|
||||
term: str,
|
||||
max_results: int = 10,
|
||||
team_id: str | None = None,
|
||||
) -> list[Issue]:
|
||||
try:
|
||||
query = """
|
||||
query SearchIssues($term: String!, $includeComments: Boolean!) {
|
||||
searchIssues(term: $term, includeComments: $includeComments) {
|
||||
query SearchIssues(
|
||||
$term: String!,
|
||||
$first: Int,
|
||||
$teamId: String
|
||||
) {
|
||||
searchIssues(
|
||||
term: $term,
|
||||
first: $first,
|
||||
teamId: $teamId
|
||||
) {
|
||||
nodes {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
priority
|
||||
createdAt
|
||||
state {
|
||||
id
|
||||
name
|
||||
type
|
||||
}
|
||||
project {
|
||||
id
|
||||
name
|
||||
}
|
||||
assignee {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -258,7 +293,8 @@ class LinearClient:
|
||||
|
||||
variables: dict[str, Any] = {
|
||||
"term": term,
|
||||
"includeComments": True,
|
||||
"first": max_results,
|
||||
"teamId": team_id,
|
||||
}
|
||||
|
||||
issues = await self.query(query, variables)
|
||||
|
||||
@@ -17,7 +17,7 @@ from ._config import (
|
||||
LinearScope,
|
||||
linear,
|
||||
)
|
||||
from .models import CreateIssueResponse, Issue
|
||||
from .models import CreateIssueResponse, Issue, State
|
||||
|
||||
|
||||
class LinearCreateIssueBlock(Block):
|
||||
@@ -135,9 +135,20 @@ class LinearSearchIssuesBlock(Block):
|
||||
description="Linear credentials with read permissions",
|
||||
required_scopes={LinearScope.READ},
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="Maximum number of results to return",
|
||||
default=10,
|
||||
ge=1,
|
||||
le=100,
|
||||
)
|
||||
team_name: str | None = SchemaField(
|
||||
description="Optional team name to filter results (e.g., 'Internal', 'Open Source')",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
issues: list[Issue] = SchemaField(description="List of issues")
|
||||
error: str = SchemaField(description="Error message if the search failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -145,8 +156,11 @@ class LinearSearchIssuesBlock(Block):
|
||||
description="Searches for issues on Linear",
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING},
|
||||
test_input={
|
||||
"term": "Test issue",
|
||||
"max_results": 10,
|
||||
"team_name": None,
|
||||
"credentials": TEST_CREDENTIALS_INPUT_OAUTH,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS_OAUTH,
|
||||
@@ -156,10 +170,14 @@ class LinearSearchIssuesBlock(Block):
|
||||
[
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
state=State(
|
||||
id="state1", name="In Progress", type="started"
|
||||
),
|
||||
createdAt="2026-01-15T10:00:00.000Z",
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -168,10 +186,12 @@ class LinearSearchIssuesBlock(Block):
|
||||
"search_issues": lambda *args, **kwargs: [
|
||||
Issue(
|
||||
id="abc123",
|
||||
identifier="abc123",
|
||||
identifier="TST-123",
|
||||
title="Test issue",
|
||||
description="Test description",
|
||||
priority=1,
|
||||
state=State(id="state1", name="In Progress", type="started"),
|
||||
createdAt="2026-01-15T10:00:00.000Z",
|
||||
)
|
||||
]
|
||||
},
|
||||
@@ -181,10 +201,22 @@ class LinearSearchIssuesBlock(Block):
|
||||
async def search_issues(
|
||||
credentials: OAuth2Credentials | APIKeyCredentials,
|
||||
term: str,
|
||||
max_results: int = 10,
|
||||
team_name: str | None = None,
|
||||
) -> list[Issue]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Issue] = await client.try_search_issues(term=term)
|
||||
return response
|
||||
|
||||
# Resolve team name to ID if provided
|
||||
# Raises LinearAPIException with descriptive message if team not found
|
||||
team_id: str | None = None
|
||||
if team_name:
|
||||
team_id = await client.try_get_team_by_name(team_name=team_name)
|
||||
|
||||
return await client.try_search_issues(
|
||||
term=term,
|
||||
max_results=max_results,
|
||||
team_id=team_id,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
@@ -196,7 +228,10 @@ class LinearSearchIssuesBlock(Block):
|
||||
"""Execute the issue search"""
|
||||
try:
|
||||
issues = await self.search_issues(
|
||||
credentials=credentials, term=input_data.term
|
||||
credentials=credentials,
|
||||
term=input_data.term,
|
||||
max_results=input_data.max_results,
|
||||
team_name=input_data.team_name,
|
||||
)
|
||||
yield "issues", issues
|
||||
except LinearAPIException as e:
|
||||
|
||||
@@ -36,12 +36,21 @@ class Project(BaseModel):
|
||||
content: str | None = None
|
||||
|
||||
|
||||
class State(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
type: str | None = (
|
||||
None # Workflow state type (e.g., "triage", "backlog", "started", "completed", "canceled")
|
||||
)
|
||||
|
||||
|
||||
class Issue(BaseModel):
|
||||
id: str
|
||||
identifier: str
|
||||
title: str
|
||||
description: str | None
|
||||
priority: int
|
||||
state: State | None = None
|
||||
project: Project | None = None
|
||||
createdAt: str | None = None
|
||||
comments: list[Comment] | None = None
|
||||
|
||||
@@ -6,6 +6,8 @@ from pydantic import SecretStr
|
||||
from sentry_sdk.integrations import DidNotEnable
|
||||
from sentry_sdk.integrations.anthropic import AnthropicIntegration
|
||||
from sentry_sdk.integrations.asyncio import AsyncioIntegration
|
||||
from sentry_sdk.integrations.fastapi import FastApiIntegration
|
||||
from sentry_sdk.integrations.httpx import HttpxIntegration
|
||||
from sentry_sdk.integrations.launchdarkly import LaunchDarklyIntegration
|
||||
from sentry_sdk.integrations.logging import LoggingIntegration
|
||||
|
||||
@@ -37,6 +39,8 @@ def sentry_init():
|
||||
_experiments={"enable_logs": True},
|
||||
integrations=[
|
||||
AsyncioIntegration(),
|
||||
FastApiIntegration(), # Traces FastAPI requests with detailed spans
|
||||
HttpxIntegration(), # Traces outgoing HTTP calls (OpenAI, external APIs)
|
||||
LoggingIntegration(sentry_logs_level=logging.INFO),
|
||||
AnthropicIntegration(
|
||||
include_prompts=False,
|
||||
|
||||
@@ -62,7 +62,6 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Get Store Agent Details](block-integrations/system/store_operations.md#get-store-agent-details) | Get detailed information about an agent from the store |
|
||||
| [Get Weather Information](block-integrations/basic.md#get-weather-information) | Retrieves weather information for a specified location using OpenWeatherMap API |
|
||||
| [Human In The Loop](block-integrations/basic.md#human-in-the-loop) | Pause execution and wait for human approval or modification of data |
|
||||
| [Linear Search Issues](block-integrations/linear/issues.md#linear-search-issues) | Searches for issues on Linear |
|
||||
| [List Is Empty](block-integrations/basic.md#list-is-empty) | Checks if a list is empty |
|
||||
| [List Library Agents](block-integrations/system/library_operations.md#list-library-agents) | List all agents in your personal library |
|
||||
| [Note](block-integrations/basic.md#note) | A visual annotation block that displays a sticky note in the workflow editor for documentation and organization purposes |
|
||||
@@ -571,6 +570,7 @@ Below is a comprehensive list of all available blocks, categorized by their prim
|
||||
| [Linear Create Comment](block-integrations/linear/comment.md#linear-create-comment) | Creates a new comment on a Linear issue |
|
||||
| [Linear Create Issue](block-integrations/linear/issues.md#linear-create-issue) | Creates a new issue on Linear |
|
||||
| [Linear Get Project Issues](block-integrations/linear/issues.md#linear-get-project-issues) | Gets issues from a Linear project filtered by status and assignee |
|
||||
| [Linear Search Issues](block-integrations/linear/issues.md#linear-search-issues) | Searches for issues on Linear |
|
||||
| [Linear Search Projects](block-integrations/linear/projects.md#linear-search-projects) | Searches for projects on Linear |
|
||||
|
||||
## Hardware
|
||||
|
||||
@@ -90,9 +90,9 @@ Searches for issues on Linear
|
||||
|
||||
### How it works
|
||||
<!-- MANUAL: how_it_works -->
|
||||
This block searches for issues in Linear using a text query. It searches across issue titles, descriptions, and other fields to find matching issues.
|
||||
This block searches for issues in Linear using a text query. It searches across issue titles, descriptions, and other fields to find matching issues. You can limit the number of results returned using the `max_results` parameter (default: 10, max: 100) to control token consumption and response size.
|
||||
|
||||
Returns a list of issues matching the search term.
|
||||
Optionally filter results by team name to narrow searches to specific workspaces. If a team name is provided, the block resolves it to a team ID before searching. Returns matching issues with their state, creation date, project, and assignee information. If the search or team resolution fails, an error message is returned.
|
||||
<!-- END MANUAL -->
|
||||
|
||||
### Inputs
|
||||
@@ -100,12 +100,14 @@ Returns a list of issues matching the search term.
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| term | Term to search for issues | str | Yes |
|
||||
| max_results | Maximum number of results to return | int | No |
|
||||
| team_name | Optional team name to filter results (e.g., 'Internal', 'Open Source') | str | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
| Output | Description | Type |
|
||||
|--------|-------------|------|
|
||||
| error | Error message if the operation failed | str |
|
||||
| error | Error message if the search failed | str |
|
||||
| issues | List of issues | List[Issue] |
|
||||
|
||||
### Possible use case
|
||||
|
||||
Reference in New Issue
Block a user