mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-11 15:25:16 -05:00
Compare commits
16 Commits
fix/copilo
...
swiftyos/s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
abc49d5409 | ||
|
|
277f435c39 | ||
|
|
35825a618d | ||
|
|
420fe877b8 | ||
|
|
062fe1aa70 | ||
|
|
2cd0d4fe0f | ||
|
|
1ecae8c87e | ||
|
|
659338f90c | ||
|
|
4df5b7bde7 | ||
|
|
a03fe3494f | ||
|
|
65f60596fe | ||
|
|
017a00af46 | ||
|
|
52650eed1d | ||
|
|
6f9b1a8337 | ||
|
|
01ada8b85d | ||
|
|
2facfccbea |
@@ -22,7 +22,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.workflow_run.head_branch }}
|
||||
fetch-depth: 0
|
||||
|
||||
2
.github/workflows/claude-dependabot.yml
vendored
2
.github/workflows/claude-dependabot.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
actions: read # Required for CI access
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
actions: read # Required for CI access
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
||||
2
.github/workflows/copilot-setup-steps.yml
vendored
2
.github/workflows/copilot-setup-steps.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
# If you do not check out your code, Copilot will do this for you.
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
2
.github/workflows/docs-block-sync.yml
vendored
2
.github/workflows/docs-block-sync.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/docs-claude-review.yml
vendored
2
.github/workflows/docs-claude-review.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/docs-enhance.yml
vendored
2
.github/workflows/docs-enhance.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
|
||||
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v4
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
|
||||
@@ -17,7 +17,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.ref_name || 'master' }}
|
||||
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v4
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
|
||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
@@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Dispatch Deploy Event
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v4
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
@@ -110,7 +110,7 @@ jobs:
|
||||
|
||||
- name: Dispatch Undeploy Event (from comment)
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v4
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v4
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
|
||||
10
.github/workflows/platform-frontend-ci.yml
vendored
10
.github/workflows/platform-frontend-ci.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Check for component changes
|
||||
uses: dorny/paths-filter@v3
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -148,7 +148,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
@@ -277,7 +277,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
|
||||
4
.github/workflows/platform-fullstack-ci.yml
vendored
4
.github/workflows/platform-fullstack-ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
|
||||
2
.github/workflows/repo-workflow-checker.yml
vendored
2
.github/workflows/repo-workflow-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
|
||||
@@ -45,6 +45,11 @@ AutoGPT Platform is a monorepo containing:
|
||||
- Backend/Frontend services use YAML anchors for consistent configuration
|
||||
- Supabase services (`db/docker/docker-compose.yml`) follow the same pattern
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
- **`dev`** is the main development branch. All PRs should target `dev`.
|
||||
- **`master`** is the production branch. Only used for production releases.
|
||||
|
||||
### Creating Pull Requests
|
||||
|
||||
- Create the PR against the `dev` branch of the repository.
|
||||
|
||||
@@ -93,6 +93,12 @@ class ChatConfig(BaseSettings):
|
||||
description="Name of the prompt in Langfuse to fetch",
|
||||
)
|
||||
|
||||
# Extended thinking configuration for Claude models
|
||||
thinking_enabled: bool = Field(
|
||||
default=True,
|
||||
description="Enable adaptive thinking for Claude models via OpenRouter",
|
||||
)
|
||||
|
||||
@field_validator("api_key", mode="before")
|
||||
@classmethod
|
||||
def get_api_key(cls, v):
|
||||
|
||||
@@ -1066,6 +1066,10 @@ async def _stream_chat_chunks(
|
||||
:128
|
||||
] # OpenRouter limit
|
||||
|
||||
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||
if config.thinking_enabled and "anthropic" in model.lower():
|
||||
extra_body["reasoning"] = {"enabled": True}
|
||||
|
||||
api_call_start = time_module.perf_counter()
|
||||
stream = await client.chat.completions.create(
|
||||
model=model,
|
||||
@@ -1829,6 +1833,10 @@ async def _generate_llm_continuation(
|
||||
if session_id:
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
|
||||
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||
extra_body["reasoning"] = {"enabled": True}
|
||||
|
||||
retry_count = 0
|
||||
last_error: Exception | None = None
|
||||
response = None
|
||||
@@ -1959,6 +1967,10 @@ async def _generate_llm_continuation_with_streaming(
|
||||
if session_id:
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
|
||||
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||
extra_body["reasoning"] = {"enabled": True}
|
||||
|
||||
# Make streaming LLM call (no tools - just text response)
|
||||
from typing import cast
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.chat.tools.base import BaseTool, ToolResponseBase
|
||||
from backend.api.features.chat.tools.models import (
|
||||
BlockInfoSummary,
|
||||
BlockInputFieldInfo,
|
||||
BlockListResponse,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
@@ -54,7 +53,8 @@ class FindBlockTool(BaseTool):
|
||||
"Blocks are reusable components that perform specific tasks like "
|
||||
"sending emails, making API calls, processing text, etc. "
|
||||
"IMPORTANT: Use this tool FIRST to get the block's 'id' before calling run_block. "
|
||||
"The response includes each block's id, required_inputs, and input_schema."
|
||||
"The response includes each block's id, name, and description. "
|
||||
"Call run_block with the block's id to see detailed inputs/outputs and execute it."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -123,7 +123,7 @@ class FindBlockTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Enrich results with full block information
|
||||
# Enrich results with block information
|
||||
blocks: list[BlockInfoSummary] = []
|
||||
for result in results:
|
||||
block_id = result["content_id"]
|
||||
@@ -140,65 +140,11 @@ class FindBlockTool(BaseTool):
|
||||
):
|
||||
continue
|
||||
|
||||
# Get input/output schemas
|
||||
input_schema = {}
|
||||
output_schema = {}
|
||||
try:
|
||||
input_schema = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
try:
|
||||
output_schema = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
|
||||
# Get categories from block instance
|
||||
categories = []
|
||||
if hasattr(block, "categories") and block.categories:
|
||||
categories = [cat.value for cat in block.categories]
|
||||
|
||||
# Extract required inputs for easier use
|
||||
required_inputs: list[BlockInputFieldInfo] = []
|
||||
if input_schema:
|
||||
properties = input_schema.get("properties", {})
|
||||
required_fields = set(input_schema.get("required", []))
|
||||
# Get credential field names to exclude from required inputs
|
||||
credentials_fields = set(
|
||||
block.input_schema.get_credentials_fields().keys()
|
||||
)
|
||||
|
||||
for field_name, field_schema in properties.items():
|
||||
# Skip credential fields - they're handled separately
|
||||
if field_name in credentials_fields:
|
||||
continue
|
||||
|
||||
required_inputs.append(
|
||||
BlockInputFieldInfo(
|
||||
name=field_name,
|
||||
type=field_schema.get("type", "string"),
|
||||
description=field_schema.get("description", ""),
|
||||
required=field_name in required_fields,
|
||||
default=field_schema.get("default"),
|
||||
)
|
||||
)
|
||||
|
||||
blocks.append(
|
||||
BlockInfoSummary(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
categories=categories,
|
||||
input_schema=input_schema,
|
||||
output_schema=output_schema,
|
||||
required_inputs=required_inputs,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -227,8 +173,7 @@ class FindBlockTool(BaseTool):
|
||||
return BlockListResponse(
|
||||
message=(
|
||||
f"Found {len(blocks)} block(s) matching '{query}'. "
|
||||
"To execute a block, use run_block with the block's 'id' field "
|
||||
"and provide 'input_data' matching the block's input_schema."
|
||||
"To see a block's inputs/outputs and execute it, use run_block with the block's 'id'."
|
||||
),
|
||||
blocks=blocks,
|
||||
count=len(blocks),
|
||||
|
||||
@@ -18,7 +18,13 @@ _TEST_USER_ID = "test-user-find-block"
|
||||
|
||||
|
||||
def make_mock_block(
|
||||
block_id: str, name: str, block_type: BlockType, disabled: bool = False
|
||||
block_id: str,
|
||||
name: str,
|
||||
block_type: BlockType,
|
||||
disabled: bool = False,
|
||||
input_schema: dict | None = None,
|
||||
output_schema: dict | None = None,
|
||||
credentials_fields: dict | None = None,
|
||||
):
|
||||
"""Create a mock block for testing."""
|
||||
mock = MagicMock()
|
||||
@@ -28,10 +34,13 @@ def make_mock_block(
|
||||
mock.block_type = block_type
|
||||
mock.disabled = disabled
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.input_schema.jsonschema.return_value = input_schema or {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = credentials_fields or {}
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {}
|
||||
mock.output_schema.jsonschema.return_value = output_schema or {}
|
||||
mock.categories = []
|
||||
return mock
|
||||
|
||||
@@ -137,3 +146,241 @@ class TestFindBlockFiltering:
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert len(response.blocks) == 1
|
||||
assert response.blocks[0].id == "normal-block-id"
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_response_size_average_chars_per_block(self):
|
||||
"""Measure average chars per block in the serialized response."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Realistic block definitions modeled after real blocks
|
||||
block_defs = [
|
||||
{
|
||||
"id": "http-block-id",
|
||||
"name": "Send Web Request",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The URL to send the request to",
|
||||
},
|
||||
"method": {
|
||||
"type": "string",
|
||||
"description": "The HTTP method to use",
|
||||
},
|
||||
"headers": {
|
||||
"type": "object",
|
||||
"description": "Headers to include in the request",
|
||||
},
|
||||
"json_format": {
|
||||
"type": "boolean",
|
||||
"description": "If true, send the body as JSON",
|
||||
},
|
||||
"body": {
|
||||
"type": "object",
|
||||
"description": "Form/JSON body payload",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "HTTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["url", "method"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "object",
|
||||
"description": "The response from the server",
|
||||
},
|
||||
"client_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 4xx status codes",
|
||||
},
|
||||
"server_error": {
|
||||
"type": "object",
|
||||
"description": "Errors on 5xx status codes",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Errors for all other exceptions",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "email-block-id",
|
||||
"name": "Send Email",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"to_email": {
|
||||
"type": "string",
|
||||
"description": "Recipient email address",
|
||||
},
|
||||
"subject": {
|
||||
"type": "string",
|
||||
"description": "Subject of the email",
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Body of the email",
|
||||
},
|
||||
"config": {
|
||||
"type": "object",
|
||||
"description": "SMTP Config",
|
||||
},
|
||||
"credentials": {
|
||||
"type": "object",
|
||||
"description": "SMTP credentials",
|
||||
},
|
||||
},
|
||||
"required": ["to_email", "subject", "body", "credentials"],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Status of the email sending operation",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if sending failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {"credentials": True},
|
||||
},
|
||||
{
|
||||
"id": "claude-code-block-id",
|
||||
"name": "Claude Code",
|
||||
"input_schema": {
|
||||
"properties": {
|
||||
"e2b_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for E2B platform",
|
||||
},
|
||||
"anthropic_credentials": {
|
||||
"type": "object",
|
||||
"description": "API key for Anthropic",
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Task or instruction for Claude Code",
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Sandbox timeout in seconds",
|
||||
},
|
||||
"setup_commands": {
|
||||
"type": "array",
|
||||
"description": "Shell commands to run before execution",
|
||||
},
|
||||
"working_directory": {
|
||||
"type": "string",
|
||||
"description": "Working directory for Claude Code",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID to resume a conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "Sandbox ID to reconnect to",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Previous conversation history",
|
||||
},
|
||||
"dispose_sandbox": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to dispose sandbox after execution",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
"e2b_credentials",
|
||||
"anthropic_credentials",
|
||||
"prompt",
|
||||
],
|
||||
},
|
||||
"output_schema": {
|
||||
"properties": {
|
||||
"response": {
|
||||
"type": "string",
|
||||
"description": "Output from Claude Code execution",
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"description": "Files created/modified by Claude Code",
|
||||
},
|
||||
"conversation_history": {
|
||||
"type": "string",
|
||||
"description": "Full conversation history",
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Session ID for this conversation",
|
||||
},
|
||||
"sandbox_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the sandbox instance",
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message if execution failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
"credentials_fields": {
|
||||
"e2b_credentials": True,
|
||||
"anthropic_credentials": True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
search_results = [
|
||||
{"content_id": d["id"], "score": 0.9 - i * 0.1}
|
||||
for i, d in enumerate(block_defs)
|
||||
]
|
||||
mock_blocks = {
|
||||
d["id"]: make_mock_block(
|
||||
block_id=d["id"],
|
||||
name=d["name"],
|
||||
block_type=BlockType.STANDARD,
|
||||
input_schema=d["input_schema"],
|
||||
output_schema=d["output_schema"],
|
||||
credentials_fields=d["credentials_fields"],
|
||||
)
|
||||
for d in block_defs
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.find_block.unified_hybrid_search",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(search_results, len(search_results)),
|
||||
), patch(
|
||||
"backend.api.features.chat.tools.find_block.get_block",
|
||||
side_effect=lambda bid: mock_blocks.get(bid),
|
||||
):
|
||||
tool = FindBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID, session=session, query="test"
|
||||
)
|
||||
|
||||
assert isinstance(response, BlockListResponse)
|
||||
assert response.count == len(block_defs)
|
||||
|
||||
total_chars = len(response.model_dump_json())
|
||||
avg_chars = total_chars // response.count
|
||||
|
||||
# Print for visibility in test output
|
||||
print(f"\nTotal response size: {total_chars} chars")
|
||||
print(f"Number of blocks: {response.count}")
|
||||
print(f"Average chars per block: {avg_chars}")
|
||||
|
||||
# The old response was ~90K for 10 blocks (~9K per block).
|
||||
# Previous optimization reduced it to ~1.5K per block (no raw JSON schemas).
|
||||
# Now with only id/name/description, we expect ~300 chars per block.
|
||||
assert avg_chars < 500, (
|
||||
f"Average chars per block ({avg_chars}) exceeds 500. "
|
||||
f"Total response: {total_chars} chars for {response.count} blocks."
|
||||
)
|
||||
|
||||
@@ -25,6 +25,7 @@ class ResponseType(str, Enum):
|
||||
AGENT_SAVED = "agent_saved"
|
||||
CLARIFICATION_NEEDED = "clarification_needed"
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_DETAILS = "block_details"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||
DOC_PAGE = "doc_page"
|
||||
@@ -334,13 +335,6 @@ class BlockInfoSummary(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
required_inputs: list[BlockInputFieldInfo] = Field(
|
||||
default_factory=list,
|
||||
description="List of required input fields for this block",
|
||||
)
|
||||
|
||||
|
||||
class BlockListResponse(ToolResponseBase):
|
||||
@@ -350,10 +344,25 @@ class BlockListResponse(ToolResponseBase):
|
||||
blocks: list[BlockInfoSummary]
|
||||
count: int
|
||||
query: str
|
||||
usage_hint: str = Field(
|
||||
default="To execute a block, call run_block with block_id set to the block's "
|
||||
"'id' field and input_data containing the required fields from input_schema."
|
||||
)
|
||||
|
||||
|
||||
class BlockDetails(BaseModel):
|
||||
"""Detailed block information."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
inputs: dict[str, Any] = {}
|
||||
outputs: dict[str, Any] = {}
|
||||
credentials: list[CredentialsMetaInput] = []
|
||||
|
||||
|
||||
class BlockDetailsResponse(ToolResponseBase):
|
||||
"""Response for block details (first run_block attempt)."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_DETAILS
|
||||
block: BlockDetails
|
||||
user_authenticated: bool = False
|
||||
|
||||
|
||||
class BlockOutputResponse(ToolResponseBase):
|
||||
|
||||
@@ -22,6 +22,8 @@ from backend.util.exceptions import BlockError
|
||||
from .base import BaseTool
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
BlockDetails,
|
||||
BlockDetailsResponse,
|
||||
BlockOutputResponse,
|
||||
ErrorResponse,
|
||||
SetupInfo,
|
||||
@@ -50,8 +52,8 @@ class RunBlockTool(BaseTool):
|
||||
"Execute a specific block with the provided input data. "
|
||||
"IMPORTANT: You MUST call find_block first to get the block's 'id' - "
|
||||
"do NOT guess or make up block IDs. "
|
||||
"Use the 'id' from find_block results and provide input_data "
|
||||
"matching the block's required_inputs."
|
||||
"On first attempt (without input_data), returns detailed schema showing "
|
||||
"required inputs and outputs. Then call again with proper input_data to execute."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -66,11 +68,19 @@ class RunBlockTool(BaseTool):
|
||||
"NEVER guess this - always get it from find_block first."
|
||||
),
|
||||
},
|
||||
"block_name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The block's human-readable name from find_block results. "
|
||||
"Used for display purposes in the UI."
|
||||
),
|
||||
},
|
||||
"input_data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Input values for the block. Use the 'required_inputs' field "
|
||||
"from find_block to see what fields are needed."
|
||||
"Input values for the block. "
|
||||
"First call with empty {} to see the block's schema, "
|
||||
"then call again with proper values to execute."
|
||||
),
|
||||
},
|
||||
},
|
||||
@@ -155,6 +165,34 @@ class RunBlockTool(BaseTool):
|
||||
await self._resolve_block_credentials(user_id, block, input_data)
|
||||
)
|
||||
|
||||
# Get block schemas for details/validation
|
||||
try:
|
||||
input_schema: dict[str, Any] = block.input_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate input schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid input schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
try:
|
||||
output_schema: dict[str, Any] = block.output_schema.jsonschema()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to generate output schema for block %s: %s",
|
||||
block_id,
|
||||
e,
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Block '{block.name}' has an invalid output schema",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if missing_credentials:
|
||||
# Return setup requirements response with missing credentials
|
||||
credentials_fields_info = block.input_schema.get_credentials_fields_info()
|
||||
@@ -187,6 +225,39 @@ class RunBlockTool(BaseTool):
|
||||
graph_version=None,
|
||||
)
|
||||
|
||||
# Check if this is a first attempt (required inputs missing)
|
||||
# Return block details so user can see what inputs are needed
|
||||
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
|
||||
required_keys = set(input_schema.get("required", []))
|
||||
required_non_credential_keys = required_keys - credentials_fields
|
||||
provided_input_keys = set(input_data.keys()) - credentials_fields
|
||||
|
||||
# Show details when there are required non-credential inputs and none are provided
|
||||
if required_non_credential_keys and not (
|
||||
required_non_credential_keys & provided_input_keys
|
||||
):
|
||||
# Get credentials info for the response
|
||||
credentials_meta = []
|
||||
for field_name, cred_meta in matched_credentials.items():
|
||||
credentials_meta.append(cred_meta)
|
||||
|
||||
return BlockDetailsResponse(
|
||||
message=(
|
||||
f"Block '{block.name}' details. "
|
||||
"Provide input_data matching the inputs schema to execute the block."
|
||||
),
|
||||
session_id=session_id,
|
||||
block=BlockDetails(
|
||||
id=block_id,
|
||||
name=block.name,
|
||||
description=block.description or "",
|
||||
inputs=input_schema,
|
||||
outputs=output_schema,
|
||||
credentials=credentials_meta,
|
||||
),
|
||||
user_authenticated=True,
|
||||
)
|
||||
|
||||
try:
|
||||
# Get or create user's workspace for CoPilot file operations
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
|
||||
@@ -0,0 +1,153 @@
|
||||
"""Tests for BlockDetailsResponse in RunBlockTool."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.api.features.chat.tools.models import BlockDetailsResponse
|
||||
from backend.api.features.chat.tools.run_block import RunBlockTool
|
||||
from backend.data.block import BlockType
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
from ._test_data import make_session
|
||||
|
||||
_TEST_USER_ID = "test-user-run-block-details"
|
||||
|
||||
|
||||
def make_mock_block_with_inputs(
|
||||
block_id: str, name: str, description: str = "Test description"
|
||||
):
|
||||
"""Create a mock block with input/output schemas for testing."""
|
||||
mock = MagicMock()
|
||||
mock.id = block_id
|
||||
mock.name = name
|
||||
mock.description = description
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
# Input schema with non-credential fields
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"url": {"type": "string", "description": "URL to fetch"},
|
||||
"method": {"type": "string", "description": "HTTP method"},
|
||||
},
|
||||
"required": ["url"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
# Output schema
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"response": {"type": "object", "description": "HTTP response"},
|
||||
"error": {"type": "string", "description": "Error message"},
|
||||
}
|
||||
}
|
||||
|
||||
return mock
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_no_input_provided():
|
||||
"""When run_block is called without input_data, it should return BlockDetailsResponse."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with inputs
|
||||
http_block = make_mock_block_with_inputs(
|
||||
"http-block-id", "HTTP Request", "Send HTTP requests"
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=http_block,
|
||||
):
|
||||
# Mock credentials check to return no missing credentials
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=({}, []), # (matched_credentials, missing_credentials)
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="http-block-id",
|
||||
input_data={}, # Empty input data
|
||||
)
|
||||
|
||||
# Should return BlockDetailsResponse showing the schema
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "http-block-id"
|
||||
assert response.block.name == "HTTP Request"
|
||||
assert response.block.description == "Send HTTP requests"
|
||||
assert "url" in response.block.inputs["properties"]
|
||||
assert "method" in response.block.inputs["properties"]
|
||||
assert "response" in response.block.outputs["properties"]
|
||||
assert response.user_authenticated is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_run_block_returns_details_when_only_credentials_provided():
|
||||
"""When only credentials are provided (no actual input), should return details."""
|
||||
session = make_session(user_id=_TEST_USER_ID)
|
||||
|
||||
# Create a block with both credential and non-credential inputs
|
||||
mock = MagicMock()
|
||||
mock.id = "api-block-id"
|
||||
mock.name = "API Call"
|
||||
mock.description = "Make API calls"
|
||||
mock.block_type = BlockType.STANDARD
|
||||
mock.disabled = False
|
||||
|
||||
mock.input_schema = MagicMock()
|
||||
mock.input_schema.jsonschema.return_value = {
|
||||
"properties": {
|
||||
"credentials": {"type": "object", "description": "API credentials"},
|
||||
"endpoint": {"type": "string", "description": "API endpoint"},
|
||||
},
|
||||
"required": ["credentials", "endpoint"],
|
||||
}
|
||||
mock.input_schema.get_credentials_fields.return_value = {"credentials": True}
|
||||
mock.input_schema.get_credentials_fields_info.return_value = {}
|
||||
|
||||
mock.output_schema = MagicMock()
|
||||
mock.output_schema.jsonschema.return_value = {
|
||||
"properties": {"result": {"type": "object"}}
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.api.features.chat.tools.run_block.get_block",
|
||||
return_value=mock,
|
||||
):
|
||||
with patch.object(
|
||||
RunBlockTool,
|
||||
"_resolve_block_credentials",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(
|
||||
{
|
||||
"credentials": CredentialsMetaInput(
|
||||
id="cred-id",
|
||||
provider=ProviderName("test_provider"),
|
||||
type="api_key",
|
||||
title="Test Credential",
|
||||
)
|
||||
},
|
||||
[],
|
||||
),
|
||||
):
|
||||
tool = RunBlockTool()
|
||||
response = await tool._execute(
|
||||
user_id=_TEST_USER_ID,
|
||||
session=session,
|
||||
block_id="api-block-id",
|
||||
input_data={"credentials": {"some": "cred"}}, # Only credential
|
||||
)
|
||||
|
||||
# Should return details because no non-credential inputs provided
|
||||
assert isinstance(response, BlockDetailsResponse)
|
||||
assert response.block.id == "api-block-id"
|
||||
assert response.block.name == "API Call"
|
||||
@@ -743,6 +743,11 @@ class GraphModel(Graph, GraphMeta):
|
||||
# For invalid blocks, we still raise immediately as this is a structural issue
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
if block.disabled:
|
||||
raise ValueError(
|
||||
f"Block {node.block_id} is disabled and cannot be used in graphs"
|
||||
)
|
||||
|
||||
node_input_mask = (
|
||||
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
|
||||
)
|
||||
|
||||
@@ -213,6 +213,9 @@ async def execute_node(
|
||||
block_name=node_block.name,
|
||||
)
|
||||
|
||||
if node_block.disabled:
|
||||
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
|
||||
|
||||
# Sanity check: validate the execution input.
|
||||
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
|
||||
if input_data is None:
|
||||
|
||||
10
autogpt_platform/backend/poetry.lock
generated
10
autogpt_platform/backend/poetry.lock
generated
@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
|
||||
|
||||
[[package]]
|
||||
name = "aiofiles"
|
||||
version = "24.1.0"
|
||||
version = "25.1.0"
|
||||
description = "File support for asyncio."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
|
||||
{file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
|
||||
{file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"},
|
||||
{file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "fc135114e01de39c8adf70f6132045e7d44a19473c1279aee0978de65aad1655"
|
||||
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
|
||||
|
||||
@@ -76,7 +76,7 @@ yt-dlp = "2025.12.08"
|
||||
zerobouncesdk = "^1.1.2"
|
||||
# NOTE: please insert new dependencies in their alphabetical location
|
||||
pytest-snapshot = "^0.9.0"
|
||||
aiofiles = "^24.1.0"
|
||||
aiofiles = "^25.1.0"
|
||||
tiktoken = "^0.12.0"
|
||||
aioclamd = "^1.0.0"
|
||||
setuptools = "^80.9.0"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"use client";
|
||||
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { SidebarProvider } from "@/components/ui/sidebar";
|
||||
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
||||
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
||||
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
||||
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
||||
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
|
||||
import { useCopilotPage } from "./useCopilotPage";
|
||||
|
||||
export function CopilotPage() {
|
||||
@@ -34,7 +34,11 @@ export function CopilotPage() {
|
||||
} = useCopilotPage();
|
||||
|
||||
if (isUserLoading || !isLoggedIn) {
|
||||
return <LoadingSpinner size="large" cover />;
|
||||
return (
|
||||
<div className="fixed inset-0 z-50 flex items-center justify-center bg-[#f8f8f9]">
|
||||
<ScaleLoader className="text-neutral-400" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -143,10 +143,10 @@ export const ChatMessagesContainer = ({
|
||||
|
||||
return (
|
||||
<Conversation className="min-h-0 flex-1">
|
||||
<ConversationContent className="gap-6 px-3 py-6">
|
||||
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6">
|
||||
{isLoading && messages.length === 0 && (
|
||||
<div className="flex flex-1 items-center justify-center">
|
||||
<LoadingSpinner size="large" className="text-neutral-400" />
|
||||
<div className="flex min-h-full flex-1 items-center justify-center">
|
||||
<LoadingSpinner className="text-neutral-600" />
|
||||
</div>
|
||||
)}
|
||||
{messages.map((message, messageIndex) => {
|
||||
|
||||
@@ -121,8 +121,8 @@ export function ChatSidebar() {
|
||||
className="mt-4 flex flex-col gap-1"
|
||||
>
|
||||
{isLoadingSessions ? (
|
||||
<div className="flex items-center justify-center py-4">
|
||||
<LoadingSpinner size="small" className="text-neutral-400" />
|
||||
<div className="flex min-h-[30rem] items-center justify-center py-4">
|
||||
<LoadingSpinner size="small" className="text-neutral-600" />
|
||||
</div>
|
||||
) : sessions.length === 0 ? (
|
||||
<p className="py-4 text-center text-sm text-neutral-500">
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
.loader {
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.loader::after,
|
||||
.loader::before {
|
||||
content: "";
|
||||
box-sizing: border-box;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border-radius: 50%;
|
||||
background: currentColor;
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 0;
|
||||
animation: animloader 2s linear infinite;
|
||||
}
|
||||
|
||||
.loader::after {
|
||||
animation-delay: 1s;
|
||||
}
|
||||
|
||||
@keyframes animloader {
|
||||
0% {
|
||||
transform: scale(0);
|
||||
opacity: 1;
|
||||
}
|
||||
100% {
|
||||
transform: scale(1);
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
import { cn } from "@/lib/utils";
|
||||
import styles from "./ScaleLoader.module.css";
|
||||
|
||||
interface Props {
|
||||
size?: number;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function ScaleLoader({ size = 48, className }: Props) {
|
||||
return (
|
||||
<div
|
||||
className={cn(styles.loader, className)}
|
||||
style={{ width: size, height: size }}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -49,12 +49,7 @@ interface Props {
|
||||
part: CreateAgentToolPart;
|
||||
}
|
||||
|
||||
function getAccordionMeta(output: CreateAgentToolOutput): {
|
||||
icon: React.ReactNode;
|
||||
title: React.ReactNode;
|
||||
titleClassName?: string;
|
||||
description?: string;
|
||||
} {
|
||||
function getAccordionMeta(output: CreateAgentToolOutput) {
|
||||
const icon = <AccordionIcon />;
|
||||
|
||||
if (isAgentSavedOutput(output)) {
|
||||
@@ -73,6 +68,7 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
||||
icon,
|
||||
title: "Needs clarification",
|
||||
description: `${questions.length} question${questions.length === 1 ? "" : "s"}`,
|
||||
expanded: true,
|
||||
};
|
||||
}
|
||||
if (
|
||||
@@ -97,18 +93,23 @@ function getAccordionMeta(output: CreateAgentToolOutput): {
|
||||
export function CreateAgentTool({ part }: Props) {
|
||||
const text = getAnimationText(part);
|
||||
const { onSend } = useCopilotChatActions();
|
||||
|
||||
const isStreaming =
|
||||
part.state === "input-streaming" || part.state === "input-available";
|
||||
|
||||
const output = getCreateAgentToolOutput(part);
|
||||
|
||||
const isError =
|
||||
part.state === "output-error" || (!!output && isErrorOutput(output));
|
||||
|
||||
const isOperating =
|
||||
!!output &&
|
||||
(isOperationStartedOutput(output) ||
|
||||
isOperationPendingOutput(output) ||
|
||||
isOperationInProgressOutput(output));
|
||||
|
||||
const progress = useAsymptoticProgress(isOperating);
|
||||
|
||||
const hasExpandableContent =
|
||||
part.state === "output-available" &&
|
||||
!!output &&
|
||||
@@ -149,10 +150,7 @@ export function CreateAgentTool({ part }: Props) {
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && output && (
|
||||
<ToolAccordion
|
||||
{...getAccordionMeta(output)}
|
||||
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
|
||||
>
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isOperating && (
|
||||
<ContentGrid>
|
||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||
|
||||
@@ -146,10 +146,7 @@ export function EditAgentTool({ part }: Props) {
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && output && (
|
||||
<ToolAccordion
|
||||
{...getAccordionMeta(output)}
|
||||
defaultExpanded={isOperating || isClarificationNeededOutput(output)}
|
||||
>
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isOperating && (
|
||||
<ContentGrid>
|
||||
<ProgressBar value={progress} className="max-w-[280px]" />
|
||||
|
||||
@@ -61,14 +61,7 @@ export function RunAgentTool({ part }: Props) {
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && output && (
|
||||
<ToolAccordion
|
||||
{...getAccordionMeta(output)}
|
||||
defaultExpanded={
|
||||
isRunAgentExecutionStartedOutput(output) ||
|
||||
isRunAgentSetupRequirementsOutput(output) ||
|
||||
isRunAgentAgentDetailsOutput(output)
|
||||
}
|
||||
>
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isRunAgentExecutionStartedOutput(output) && (
|
||||
<ExecutionStartedCard output={output} />
|
||||
)}
|
||||
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
WarningDiamondIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
|
||||
export interface RunAgentInput {
|
||||
username_agent_slug?: string;
|
||||
@@ -171,7 +171,7 @@ export function ToolIcon({
|
||||
);
|
||||
}
|
||||
if (isStreaming) {
|
||||
return <SpinnerLoader size={40} className="text-neutral-700" />;
|
||||
return <OrbitLoader size={24} />;
|
||||
}
|
||||
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
||||
}
|
||||
@@ -203,7 +203,7 @@ export function getAccordionMeta(output: RunAgentToolOutput): {
|
||||
? output.status.trim()
|
||||
: "started";
|
||||
return {
|
||||
icon: <SpinnerLoader size={28} className="text-neutral-700" />,
|
||||
icon: <OrbitLoader size={28} className="text-neutral-700" />,
|
||||
title: output.graph_name,
|
||||
description: `Status: ${statusText}`,
|
||||
};
|
||||
|
||||
@@ -55,13 +55,7 @@ export function RunBlockTool({ part }: Props) {
|
||||
</div>
|
||||
|
||||
{hasExpandableContent && output && (
|
||||
<ToolAccordion
|
||||
{...getAccordionMeta(output)}
|
||||
defaultExpanded={
|
||||
isRunBlockBlockOutput(output) ||
|
||||
isRunBlockSetupRequirementsOutput(output)
|
||||
}
|
||||
>
|
||||
<ToolAccordion {...getAccordionMeta(output)}>
|
||||
{isRunBlockBlockOutput(output) && <BlockOutputCard output={output} />}
|
||||
|
||||
{isRunBlockSetupRequirementsOutput(output) && (
|
||||
|
||||
@@ -8,20 +8,39 @@ import {
|
||||
WarningDiamondIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import type { ToolUIPart } from "ai";
|
||||
import { SpinnerLoader } from "../../components/SpinnerLoader/SpinnerLoader";
|
||||
import { OrbitLoader } from "../../components/OrbitLoader/OrbitLoader";
|
||||
|
||||
/** Block details returned on first run_block attempt (before input_data provided). */
|
||||
export interface BlockDetailsResponse {
|
||||
type: typeof ResponseType.block_details;
|
||||
message: string;
|
||||
session_id?: string | null;
|
||||
block: {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
inputs: Record<string, unknown>;
|
||||
outputs: Record<string, unknown>;
|
||||
credentials: unknown[];
|
||||
};
|
||||
user_authenticated: boolean;
|
||||
}
|
||||
|
||||
export interface RunBlockInput {
|
||||
block_id?: string;
|
||||
block_name?: string;
|
||||
input_data?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export type RunBlockToolOutput =
|
||||
| SetupRequirementsResponse
|
||||
| BlockDetailsResponse
|
||||
| BlockOutputResponse
|
||||
| ErrorResponse;
|
||||
|
||||
const RUN_BLOCK_OUTPUT_TYPES = new Set<string>([
|
||||
ResponseType.setup_requirements,
|
||||
ResponseType.block_details,
|
||||
ResponseType.block_output,
|
||||
ResponseType.error,
|
||||
]);
|
||||
@@ -35,6 +54,15 @@ export function isRunBlockSetupRequirementsOutput(
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockDetailsOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockDetailsResponse {
|
||||
return (
|
||||
output.type === ResponseType.block_details ||
|
||||
("block" in output && typeof output.block === "object")
|
||||
);
|
||||
}
|
||||
|
||||
export function isRunBlockBlockOutput(
|
||||
output: RunBlockToolOutput,
|
||||
): output is BlockOutputResponse {
|
||||
@@ -64,6 +92,7 @@ function parseOutput(output: unknown): RunBlockToolOutput | null {
|
||||
return output as RunBlockToolOutput;
|
||||
}
|
||||
if ("block_id" in output) return output as BlockOutputResponse;
|
||||
if ("block" in output) return output as BlockDetailsResponse;
|
||||
if ("setup_info" in output) return output as SetupRequirementsResponse;
|
||||
if ("error" in output || "details" in output)
|
||||
return output as ErrorResponse;
|
||||
@@ -84,17 +113,25 @@ export function getAnimationText(part: {
|
||||
output?: unknown;
|
||||
}): string {
|
||||
const input = part.input as RunBlockInput | undefined;
|
||||
const blockName = input?.block_name?.trim();
|
||||
const blockId = input?.block_id?.trim();
|
||||
const blockText = blockId ? ` "${blockId}"` : "";
|
||||
// Prefer block_name if available, otherwise fall back to block_id
|
||||
const blockText = blockName
|
||||
? ` "${blockName}"`
|
||||
: blockId
|
||||
? ` "${blockId}"`
|
||||
: "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available":
|
||||
return `Running the block${blockText}`;
|
||||
return `Running${blockText}`;
|
||||
case "output-available": {
|
||||
const output = parseOutput(part.output);
|
||||
if (!output) return `Running the block${blockText}`;
|
||||
if (!output) return `Running${blockText}`;
|
||||
if (isRunBlockBlockOutput(output)) return `Ran "${output.block_name}"`;
|
||||
if (isRunBlockDetailsOutput(output))
|
||||
return `Details for "${output.block.name}"`;
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
return `Setup needed for "${output.setup_info.agent_name}"`;
|
||||
}
|
||||
@@ -120,7 +157,7 @@ export function ToolIcon({
|
||||
);
|
||||
}
|
||||
if (isStreaming) {
|
||||
return <SpinnerLoader size={40} className="text-neutral-700" />;
|
||||
return <OrbitLoader size={24} />;
|
||||
}
|
||||
return <PlayIcon size={14} weight="regular" className="text-neutral-400" />;
|
||||
}
|
||||
@@ -149,7 +186,7 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
if (isRunBlockBlockOutput(output)) {
|
||||
const keys = Object.keys(output.outputs ?? {});
|
||||
return {
|
||||
icon: <SpinnerLoader size={32} className="text-neutral-700" />,
|
||||
icon: <OrbitLoader size={24} className="text-neutral-700" />,
|
||||
title: output.block_name,
|
||||
description:
|
||||
keys.length > 0
|
||||
@@ -158,6 +195,21 @@ export function getAccordionMeta(output: RunBlockToolOutput): {
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockDetailsOutput(output)) {
|
||||
const inputKeys = Object.keys(
|
||||
(output.block.inputs as { properties?: Record<string, unknown> })
|
||||
?.properties ?? {},
|
||||
);
|
||||
return {
|
||||
icon,
|
||||
title: output.block.name,
|
||||
description:
|
||||
inputKeys.length > 0
|
||||
? `${inputKeys.length} input field${inputKeys.length === 1 ? "" : "s"} available`
|
||||
: output.message,
|
||||
};
|
||||
}
|
||||
|
||||
if (isRunBlockSetupRequirementsOutput(output)) {
|
||||
const missingCredsCount = Object.keys(
|
||||
(output.setup_info.user_readiness?.missing_credentials ?? {}) as Record<
|
||||
|
||||
@@ -3,7 +3,6 @@ import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||
import { useChat } from "@ai-sdk/react";
|
||||
import { DefaultChatTransport } from "ai";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
import { useChatSession } from "./useChatSession";
|
||||
|
||||
@@ -11,7 +10,6 @@ export function useCopilotPage() {
|
||||
const { isUserLoading, isLoggedIn } = useSupabase();
|
||||
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
||||
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
||||
const router = useRouter();
|
||||
|
||||
const {
|
||||
sessionId,
|
||||
@@ -54,10 +52,6 @@ export function useCopilotPage() {
|
||||
transport: transport ?? undefined,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (!isUserLoading && !isLoggedIn) router.replace("/login");
|
||||
}, [isUserLoading, isLoggedIn]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!hydratedMessages || hydratedMessages.length === 0) return;
|
||||
setMessages((prev) => {
|
||||
|
||||
@@ -7013,62 +7013,13 @@
|
||||
"properties": {
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"description": { "type": "string", "title": "Description" },
|
||||
"categories": {
|
||||
"items": { "type": "string" },
|
||||
"type": "array",
|
||||
"title": "Categories"
|
||||
},
|
||||
"input_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Input Schema"
|
||||
},
|
||||
"output_schema": {
|
||||
"additionalProperties": true,
|
||||
"type": "object",
|
||||
"title": "Output Schema"
|
||||
},
|
||||
"required_inputs": {
|
||||
"items": { "$ref": "#/components/schemas/BlockInputFieldInfo" },
|
||||
"type": "array",
|
||||
"title": "Required Inputs",
|
||||
"description": "List of required input fields for this block"
|
||||
}
|
||||
"description": { "type": "string", "title": "Description" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"name",
|
||||
"description",
|
||||
"categories",
|
||||
"input_schema",
|
||||
"output_schema"
|
||||
],
|
||||
"required": ["id", "name", "description"],
|
||||
"title": "BlockInfoSummary",
|
||||
"description": "Summary of a block for search results."
|
||||
},
|
||||
"BlockInputFieldInfo": {
|
||||
"properties": {
|
||||
"name": { "type": "string", "title": "Name" },
|
||||
"type": { "type": "string", "title": "Type" },
|
||||
"description": {
|
||||
"type": "string",
|
||||
"title": "Description",
|
||||
"default": ""
|
||||
},
|
||||
"required": {
|
||||
"type": "boolean",
|
||||
"title": "Required",
|
||||
"default": false
|
||||
},
|
||||
"default": { "anyOf": [{}, { "type": "null" }], "title": "Default" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["name", "type"],
|
||||
"title": "BlockInputFieldInfo",
|
||||
"description": "Information about a block input field."
|
||||
},
|
||||
"BlockListResponse": {
|
||||
"properties": {
|
||||
"type": {
|
||||
@@ -7086,12 +7037,7 @@
|
||||
"title": "Blocks"
|
||||
},
|
||||
"count": { "type": "integer", "title": "Count" },
|
||||
"query": { "type": "string", "title": "Query" },
|
||||
"usage_hint": {
|
||||
"type": "string",
|
||||
"title": "Usage Hint",
|
||||
"default": "To execute a block, call run_block with block_id set to the block's 'id' field and input_data containing the required fields from input_schema."
|
||||
}
|
||||
"query": { "type": "string", "title": "Query" }
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["message", "blocks", "count", "query"],
|
||||
@@ -10490,6 +10436,7 @@
|
||||
"agent_saved",
|
||||
"clarification_needed",
|
||||
"block_list",
|
||||
"block_details",
|
||||
"block_output",
|
||||
"doc_search_results",
|
||||
"doc_page",
|
||||
|
||||
@@ -6,6 +6,7 @@ import { SupabaseClient } from "@supabase/supabase-js";
|
||||
export const PROTECTED_PAGES = [
|
||||
"/auth/authorize",
|
||||
"/auth/integrations",
|
||||
"/copilot",
|
||||
"/monitor",
|
||||
"/build",
|
||||
"/onboarding",
|
||||
|
||||
Reference in New Issue
Block a user