mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-03-17 03:00:27 -04:00
Compare commits
1 Commits
master
...
otto/secrt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e2547de50 |
@@ -53,7 +53,6 @@ from backend.copilot.tools.models import (
|
||||
UnderstandingUpdatedResponse,
|
||||
)
|
||||
from backend.copilot.tracking import track_user_message
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.data.workspace import get_or_create_workspace
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
@@ -128,7 +127,6 @@ class SessionSummaryResponse(BaseModel):
|
||||
created_at: str
|
||||
updated_at: str
|
||||
title: str | None = None
|
||||
is_processing: bool
|
||||
|
||||
|
||||
class ListSessionsResponse(BaseModel):
|
||||
@@ -187,28 +185,6 @@ async def list_sessions(
|
||||
"""
|
||||
sessions, total_count = await get_user_sessions(user_id, limit, offset)
|
||||
|
||||
# Batch-check Redis for active stream status on each session
|
||||
processing_set: set[str] = set()
|
||||
if sessions:
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
pipe = redis.pipeline(transaction=False)
|
||||
for session in sessions:
|
||||
pipe.hget(
|
||||
f"{config.session_meta_prefix}{session.session_id}",
|
||||
"status",
|
||||
)
|
||||
statuses = await pipe.execute()
|
||||
processing_set = {
|
||||
session.session_id
|
||||
for session, st in zip(sessions, statuses)
|
||||
if st == "running"
|
||||
}
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Failed to fetch processing status from Redis; " "defaulting to empty"
|
||||
)
|
||||
|
||||
return ListSessionsResponse(
|
||||
sessions=[
|
||||
SessionSummaryResponse(
|
||||
@@ -216,7 +192,6 @@ async def list_sessions(
|
||||
created_at=session.started_at.isoformat(),
|
||||
updated_at=session.updated_at.isoformat(),
|
||||
title=session.title,
|
||||
is_processing=session.session_id in processing_set,
|
||||
)
|
||||
for session in sessions
|
||||
],
|
||||
|
||||
@@ -165,6 +165,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
id: str
|
||||
graph_id: str
|
||||
graph_version: int
|
||||
owner_user_id: str
|
||||
|
||||
image_url: str | None
|
||||
|
||||
@@ -205,9 +206,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
default_factory=list,
|
||||
description="List of recent executions with status, score, and summary",
|
||||
)
|
||||
can_access_graph: bool = pydantic.Field(
|
||||
description="Indicates whether the same user owns the corresponding graph"
|
||||
)
|
||||
can_access_graph: bool
|
||||
is_latest_version: bool
|
||||
is_favorite: bool
|
||||
folder_id: str | None = None
|
||||
@@ -325,6 +324,7 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
id=agent.id,
|
||||
graph_id=agent.agentGraphId,
|
||||
graph_version=agent.agentGraphVersion,
|
||||
owner_user_id=agent.userId,
|
||||
image_url=agent.imageUrl,
|
||||
creator_name=creator_name,
|
||||
creator_image_url=creator_image_url,
|
||||
|
||||
@@ -42,6 +42,7 @@ async def test_get_library_agents_success(
|
||||
id="test-agent-1",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
@@ -66,6 +67,7 @@ async def test_get_library_agents_success(
|
||||
id="test-agent-2",
|
||||
graph_id="test-agent-2",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Test Agent 2",
|
||||
description="Test Description 2",
|
||||
image_url=None,
|
||||
@@ -129,6 +131,7 @@ async def test_get_favorite_library_agents_success(
|
||||
id="test-agent-1",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Favorite Agent 1",
|
||||
description="Test Favorite Description 1",
|
||||
image_url=None,
|
||||
@@ -181,6 +184,7 @@ def test_add_agent_to_library_success(
|
||||
id="test-library-agent-id",
|
||||
graph_id="test-agent-1",
|
||||
graph_version=1,
|
||||
owner_user_id=test_user_id,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
|
||||
@@ -94,8 +94,3 @@ class NotificationPayload(pydantic.BaseModel):
|
||||
|
||||
class OnboardingNotificationPayload(NotificationPayload):
|
||||
step: OnboardingStep | None
|
||||
|
||||
|
||||
class CopilotCompletionPayload(NotificationPayload):
|
||||
session_id: str
|
||||
status: Literal["completed", "failed"]
|
||||
|
||||
@@ -156,15 +156,10 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
CODESTRAL = "mistralai/codestral-2508"
|
||||
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
|
||||
COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024"
|
||||
COHERE_COMMAND_A_03_2025 = "cohere/command-a-03-2025"
|
||||
COHERE_COMMAND_A_TRANSLATE_08_2025 = "cohere/command-a-translate-08-2025"
|
||||
COHERE_COMMAND_A_REASONING_08_2025 = "cohere/command-a-reasoning-08-2025"
|
||||
COHERE_COMMAND_A_VISION_07_2025 = "cohere/command-a-vision-07-2025"
|
||||
DEEPSEEK_CHAT = "deepseek/deepseek-chat" # Actually: DeepSeek V3
|
||||
DEEPSEEK_R1_0528 = "deepseek/deepseek-r1-0528"
|
||||
PERPLEXITY_SONAR = "perplexity/sonar"
|
||||
PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro"
|
||||
PERPLEXITY_SONAR_REASONING_PRO = "perplexity/sonar-reasoning-pro"
|
||||
PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
|
||||
NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b"
|
||||
NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b"
|
||||
@@ -172,7 +167,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1"
|
||||
AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1"
|
||||
MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b"
|
||||
MICROSOFT_PHI_4 = "microsoft/phi-4"
|
||||
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
|
||||
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
|
||||
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
|
||||
@@ -468,36 +462,6 @@ MODEL_METADATA = {
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata(
|
||||
"open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2
|
||||
),
|
||||
LlmModel.COHERE_COMMAND_A_03_2025: ModelMetadata(
|
||||
"open_router", 256000, 8192, "Command A 03.2025", "OpenRouter", "Cohere", 2
|
||||
),
|
||||
LlmModel.COHERE_COMMAND_A_TRANSLATE_08_2025: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
8192,
|
||||
"Command A Translate 08.2025",
|
||||
"OpenRouter",
|
||||
"Cohere",
|
||||
2,
|
||||
),
|
||||
LlmModel.COHERE_COMMAND_A_REASONING_08_2025: ModelMetadata(
|
||||
"open_router",
|
||||
256000,
|
||||
32768,
|
||||
"Command A Reasoning 08.2025",
|
||||
"OpenRouter",
|
||||
"Cohere",
|
||||
3,
|
||||
),
|
||||
LlmModel.COHERE_COMMAND_A_VISION_07_2025: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
8192,
|
||||
"Command A Vision 07.2025",
|
||||
"OpenRouter",
|
||||
"Cohere",
|
||||
2,
|
||||
),
|
||||
LlmModel.DEEPSEEK_CHAT: ModelMetadata(
|
||||
"open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1
|
||||
),
|
||||
@@ -510,15 +474,6 @@ MODEL_METADATA = {
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata(
|
||||
"open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2
|
||||
),
|
||||
LlmModel.PERPLEXITY_SONAR_REASONING_PRO: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
8000,
|
||||
"Sonar Reasoning Pro",
|
||||
"OpenRouter",
|
||||
"Perplexity",
|
||||
2,
|
||||
),
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
|
||||
"open_router",
|
||||
128000,
|
||||
@@ -564,9 +519,6 @@ MODEL_METADATA = {
|
||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata(
|
||||
"open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1
|
||||
),
|
||||
LlmModel.MICROSOFT_PHI_4: ModelMetadata(
|
||||
"open_router", 16384, 16384, "Phi-4", "OpenRouter", "Microsoft", 1
|
||||
),
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata(
|
||||
"open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1
|
||||
),
|
||||
|
||||
@@ -4,7 +4,7 @@ from enum import Enum
|
||||
from typing import Any, Literal
|
||||
|
||||
import openai
|
||||
from pydantic import SecretStr, field_validator
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks._base import (
|
||||
Block,
|
||||
@@ -13,7 +13,6 @@ from backend.blocks._base import (
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
)
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
@@ -36,20 +35,6 @@ class PerplexityModel(str, Enum):
|
||||
SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
|
||||
|
||||
|
||||
def _sanitize_perplexity_model(value: Any) -> PerplexityModel:
|
||||
"""Return a valid PerplexityModel, falling back to SONAR for invalid values."""
|
||||
if isinstance(value, PerplexityModel):
|
||||
return value
|
||||
try:
|
||||
return PerplexityModel(value)
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
f"Invalid PerplexityModel '{value}', "
|
||||
f"falling back to {PerplexityModel.SONAR.value}"
|
||||
)
|
||||
return PerplexityModel.SONAR
|
||||
|
||||
|
||||
PerplexityCredentials = CredentialsMetaInput[
|
||||
Literal[ProviderName.OPEN_ROUTER], Literal["api_key"]
|
||||
]
|
||||
@@ -88,25 +73,6 @@ class PerplexityBlock(Block):
|
||||
advanced=False,
|
||||
)
|
||||
credentials: PerplexityCredentials = PerplexityCredentialsField()
|
||||
|
||||
@field_validator("model", mode="before")
|
||||
@classmethod
|
||||
def fallback_invalid_model(cls, v: Any) -> PerplexityModel:
|
||||
"""Fall back to SONAR if the model value is not a valid
|
||||
PerplexityModel (e.g. an OpenAI model ID set by the agent
|
||||
generator)."""
|
||||
return _sanitize_perplexity_model(v)
|
||||
|
||||
@classmethod
|
||||
def validate_data(cls, data: BlockInput) -> str | None:
|
||||
"""Sanitize the model field before JSON schema validation so that
|
||||
invalid values are replaced with the default instead of raising a
|
||||
BlockInputError."""
|
||||
model_value = data.get("model")
|
||||
if model_value is not None:
|
||||
data["model"] = _sanitize_perplexity_model(model_value).value
|
||||
return super().validate_data(data)
|
||||
|
||||
system_prompt: str = SchemaField(
|
||||
title="System Prompt",
|
||||
default="",
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
"""Unit tests for PerplexityBlock model fallback behavior."""
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.blocks.perplexity import (
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
PerplexityBlock,
|
||||
PerplexityModel,
|
||||
)
|
||||
|
||||
|
||||
def _make_input(**overrides) -> dict:
|
||||
defaults = {
|
||||
"prompt": "test query",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
}
|
||||
defaults.update(overrides)
|
||||
return defaults
|
||||
|
||||
|
||||
class TestPerplexityModelFallback:
|
||||
"""Tests for fallback_invalid_model field_validator."""
|
||||
|
||||
def test_invalid_model_falls_back_to_sonar(self):
|
||||
inp = PerplexityBlock.Input(**_make_input(model="gpt-5.2-2025-12-11"))
|
||||
assert inp.model == PerplexityModel.SONAR
|
||||
|
||||
def test_another_invalid_model_falls_back_to_sonar(self):
|
||||
inp = PerplexityBlock.Input(**_make_input(model="gpt-4o"))
|
||||
assert inp.model == PerplexityModel.SONAR
|
||||
|
||||
def test_valid_model_string_is_kept(self):
|
||||
inp = PerplexityBlock.Input(**_make_input(model="perplexity/sonar-pro"))
|
||||
assert inp.model == PerplexityModel.SONAR_PRO
|
||||
|
||||
def test_valid_enum_value_is_kept(self):
|
||||
inp = PerplexityBlock.Input(
|
||||
**_make_input(model=PerplexityModel.SONAR_DEEP_RESEARCH)
|
||||
)
|
||||
assert inp.model == PerplexityModel.SONAR_DEEP_RESEARCH
|
||||
|
||||
def test_default_model_when_omitted(self):
|
||||
inp = PerplexityBlock.Input(**_make_input())
|
||||
assert inp.model == PerplexityModel.SONAR
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model_value",
|
||||
[
|
||||
"perplexity/sonar",
|
||||
"perplexity/sonar-pro",
|
||||
"perplexity/sonar-deep-research",
|
||||
],
|
||||
)
|
||||
def test_all_valid_models_accepted(self, model_value: str):
|
||||
inp = PerplexityBlock.Input(**_make_input(model=model_value))
|
||||
assert inp.model.value == model_value
|
||||
|
||||
|
||||
class TestPerplexityValidateData:
|
||||
"""Tests for validate_data which runs during block execution (before
|
||||
Pydantic instantiation). Invalid models must be sanitized here so
|
||||
JSON schema validation does not reject them."""
|
||||
|
||||
def test_invalid_model_sanitized_before_schema_validation(self):
|
||||
data = _make_input(model="gpt-5.2-2025-12-11")
|
||||
error = PerplexityBlock.Input.validate_data(data)
|
||||
assert error is None
|
||||
assert data["model"] == PerplexityModel.SONAR.value
|
||||
|
||||
def test_valid_model_unchanged_by_validate_data(self):
|
||||
data = _make_input(model="perplexity/sonar-pro")
|
||||
error = PerplexityBlock.Input.validate_data(data)
|
||||
assert error is None
|
||||
assert data["model"] == "perplexity/sonar-pro"
|
||||
|
||||
def test_missing_model_uses_default(self):
|
||||
data = _make_input() # no model key
|
||||
error = PerplexityBlock.Input.validate_data(data)
|
||||
assert error is None
|
||||
inp = PerplexityBlock.Input(**data)
|
||||
assert inp.model == PerplexityModel.SONAR
|
||||
@@ -26,17 +26,3 @@ For other services, search the MCP registry at https://registry.modelcontextprot
|
||||
|
||||
If the server requires credentials, a `SetupRequirementsResponse` is returned with an OAuth
|
||||
login prompt. Once the user completes the flow and confirms, retry the same call immediately.
|
||||
|
||||
### Communication style
|
||||
|
||||
Avoid technical jargon like "MCP server", "OAuth", or "credentials" when talking to the user.
|
||||
Use plain, friendly language instead:
|
||||
|
||||
| Instead of… | Say… |
|
||||
|---|---|
|
||||
| "Let me connect to Sentry's MCP server and discover what tools are available." | "I can connect to Sentry and help identify important issues." |
|
||||
| "Let me connect to Sentry's MCP server now." | "Next, I'll connect to Sentry." |
|
||||
| "The MCP server at mcp.sentry.dev requires authentication. Please connect your credentials to continue." | "To continue, sign in to Sentry and approve access." |
|
||||
| "Sentry's MCP server needs OAuth authentication. You should see a prompt to connect your Sentry account…" | "You should see a prompt to sign in to Sentry. Once connected, I can help surface critical issues right away." |
|
||||
|
||||
Use **"connect to [Service]"** or **"sign in to [Service]"** — never "MCP server", "OAuth", or "credentials".
|
||||
|
||||
@@ -23,11 +23,6 @@ from typing import Any, Literal
|
||||
|
||||
import orjson
|
||||
|
||||
from backend.api.model import CopilotCompletionPayload
|
||||
from backend.data.notification_bus import (
|
||||
AsyncRedisNotificationEventBus,
|
||||
NotificationEvent,
|
||||
)
|
||||
from backend.data.redis_client import get_redis_async
|
||||
|
||||
from .config import ChatConfig
|
||||
@@ -43,7 +38,6 @@ from .response_model import (
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
_notification_bus = AsyncRedisNotificationEventBus()
|
||||
|
||||
# Track background tasks for this pod (just the asyncio.Task reference, not subscribers)
|
||||
_local_sessions: dict[str, asyncio.Task] = {}
|
||||
@@ -751,29 +745,6 @@ async def mark_session_completed(
|
||||
|
||||
# Clean up local session reference if exists
|
||||
_local_sessions.pop(session_id, None)
|
||||
|
||||
# Publish copilot completion notification via WebSocket
|
||||
if meta:
|
||||
parsed = _parse_session_meta(meta, session_id)
|
||||
if parsed.user_id:
|
||||
try:
|
||||
await _notification_bus.publish(
|
||||
NotificationEvent(
|
||||
user_id=parsed.user_id,
|
||||
payload=CopilotCompletionPayload(
|
||||
type="copilot_completion",
|
||||
event="session_completed",
|
||||
session_id=session_id,
|
||||
status=status,
|
||||
),
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to publish copilot completion notification "
|
||||
f"for session {session_id}: {e}"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
@@ -829,12 +829,8 @@ class AgentFixer:
|
||||
|
||||
For nodes whose block has category "AI", this function ensures that the
|
||||
input_default has a "model" parameter set to one of the allowed models.
|
||||
If missing or set to an unsupported value, it is replaced with the
|
||||
appropriate default.
|
||||
|
||||
Blocks that define their own ``enum`` constraint on the ``model`` field
|
||||
in their inputSchema (e.g. PerplexityBlock) are validated against that
|
||||
enum instead of the generic allowed set.
|
||||
If missing or set to an unsupported value, it is replaced with
|
||||
default_model.
|
||||
|
||||
Args:
|
||||
agent: The agent dictionary to fix
|
||||
@@ -844,7 +840,7 @@ class AgentFixer:
|
||||
Returns:
|
||||
The fixed agent dictionary
|
||||
"""
|
||||
generic_allowed_models = {"gpt-4o", "claude-opus-4-6"}
|
||||
allowed_models = {"gpt-4o", "claude-opus-4-6"}
|
||||
|
||||
# Create a mapping of block_id to block for quick lookup
|
||||
block_map = {block.get("id"): block for block in blocks}
|
||||
@@ -872,36 +868,20 @@ class AgentFixer:
|
||||
input_default = node.get("input_default", {})
|
||||
current_model = input_default.get("model")
|
||||
|
||||
# Determine allowed models and default from the block's schema.
|
||||
# Blocks with a block-specific enum on the model field (e.g.
|
||||
# PerplexityBlock) use their own enum values; others use the
|
||||
# generic set.
|
||||
model_schema = (
|
||||
block.get("inputSchema", {}).get("properties", {}).get("model", {})
|
||||
)
|
||||
block_model_enum = model_schema.get("enum")
|
||||
|
||||
if block_model_enum:
|
||||
allowed_models = set(block_model_enum)
|
||||
fallback_model = model_schema.get("default", block_model_enum[0])
|
||||
else:
|
||||
allowed_models = generic_allowed_models
|
||||
fallback_model = default_model
|
||||
|
||||
if current_model not in allowed_models:
|
||||
block_name = block.get("name", "Unknown AI Block")
|
||||
if current_model is None:
|
||||
self.add_fix_log(
|
||||
f"Added model parameter '{fallback_model}' to AI "
|
||||
f"Added model parameter '{default_model}' to AI "
|
||||
f"block node {node_id} ({block_name})"
|
||||
)
|
||||
else:
|
||||
self.add_fix_log(
|
||||
f"Replaced unsupported model '{current_model}' "
|
||||
f"with '{fallback_model}' on AI block node "
|
||||
f"with '{default_model}' on AI block node "
|
||||
f"{node_id} ({block_name})"
|
||||
)
|
||||
input_default["model"] = fallback_model
|
||||
input_default["model"] = default_model
|
||||
node["input_default"] = input_default
|
||||
fixed_count += 1
|
||||
|
||||
|
||||
@@ -475,111 +475,6 @@ class TestFixAiModelParameter:
|
||||
|
||||
assert result["nodes"][0]["input_default"]["model"] == "claude-opus-4-6"
|
||||
|
||||
def test_block_specific_enum_uses_block_default(self):
|
||||
"""Blocks with their own model enum (e.g. PerplexityBlock) should use
|
||||
the block's allowed models and default, not the generic ones."""
|
||||
fixer = AgentFixer()
|
||||
block_id = generate_uuid()
|
||||
node = _make_node(
|
||||
node_id="n1",
|
||||
block_id=block_id,
|
||||
input_default={"model": "gpt-5.2-2025-12-11"},
|
||||
)
|
||||
agent = _make_agent(nodes=[node])
|
||||
|
||||
blocks = [
|
||||
{
|
||||
"id": block_id,
|
||||
"name": "PerplexityBlock",
|
||||
"categories": [{"category": "AI"}],
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"model": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"perplexity/sonar",
|
||||
"perplexity/sonar-pro",
|
||||
"perplexity/sonar-deep-research",
|
||||
],
|
||||
"default": "perplexity/sonar",
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
result = fixer.fix_ai_model_parameter(agent, blocks)
|
||||
|
||||
assert result["nodes"][0]["input_default"]["model"] == "perplexity/sonar"
|
||||
|
||||
def test_block_specific_enum_valid_model_unchanged(self):
|
||||
"""A valid block-specific model should not be replaced."""
|
||||
fixer = AgentFixer()
|
||||
block_id = generate_uuid()
|
||||
node = _make_node(
|
||||
node_id="n1",
|
||||
block_id=block_id,
|
||||
input_default={"model": "perplexity/sonar-pro"},
|
||||
)
|
||||
agent = _make_agent(nodes=[node])
|
||||
|
||||
blocks = [
|
||||
{
|
||||
"id": block_id,
|
||||
"name": "PerplexityBlock",
|
||||
"categories": [{"category": "AI"}],
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"model": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"perplexity/sonar",
|
||||
"perplexity/sonar-pro",
|
||||
"perplexity/sonar-deep-research",
|
||||
],
|
||||
"default": "perplexity/sonar",
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
result = fixer.fix_ai_model_parameter(agent, blocks)
|
||||
|
||||
assert result["nodes"][0]["input_default"]["model"] == "perplexity/sonar-pro"
|
||||
|
||||
def test_block_specific_enum_missing_model_gets_block_default(self):
|
||||
"""Missing model on a block with enum should use the block's default."""
|
||||
fixer = AgentFixer()
|
||||
block_id = generate_uuid()
|
||||
node = _make_node(node_id="n1", block_id=block_id, input_default={})
|
||||
agent = _make_agent(nodes=[node])
|
||||
|
||||
blocks = [
|
||||
{
|
||||
"id": block_id,
|
||||
"name": "PerplexityBlock",
|
||||
"categories": [{"category": "AI"}],
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"model": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"perplexity/sonar",
|
||||
"perplexity/sonar-pro",
|
||||
"perplexity/sonar-deep-research",
|
||||
],
|
||||
"default": "perplexity/sonar",
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
result = fixer.fix_ai_model_parameter(agent, blocks)
|
||||
|
||||
assert result["nodes"][0]["input_default"]["model"] == "perplexity/sonar"
|
||||
|
||||
|
||||
class TestFixAgentExecutorBlocks:
|
||||
"""Tests for fix_agent_executor_blocks."""
|
||||
|
||||
@@ -34,11 +34,6 @@ logger = logging.getLogger(__name__)
|
||||
_AUTH_STATUS_CODES = {401, 403}
|
||||
|
||||
|
||||
def _service_name(host: str) -> str:
|
||||
"""Strip the 'mcp.' prefix from an MCP hostname: 'mcp.sentry.dev' → 'sentry.dev'"""
|
||||
return host[4:] if host.startswith("mcp.") else host
|
||||
|
||||
|
||||
class RunMCPToolTool(BaseTool):
|
||||
"""
|
||||
Tool for discovering and executing tools on any MCP server.
|
||||
@@ -308,8 +303,8 @@ class RunMCPToolTool(BaseTool):
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Unable to connect to {_service_name(server_host(server_url))} "
|
||||
"— no credentials configured."
|
||||
f"The MCP server at {server_host(server_url)} requires authentication, "
|
||||
"but no credential configuration was found."
|
||||
),
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -317,13 +312,15 @@ class RunMCPToolTool(BaseTool):
|
||||
missing_creds_list = list(missing_creds_dict.values())
|
||||
|
||||
host = server_host(server_url)
|
||||
service = _service_name(host)
|
||||
return SetupRequirementsResponse(
|
||||
message=(f"To continue, sign in to {service} and approve access."),
|
||||
message=(
|
||||
f"The MCP server at {host} requires authentication. "
|
||||
"Please connect your credentials to continue."
|
||||
),
|
||||
session_id=session_id,
|
||||
setup_info=SetupInfo(
|
||||
agent_id=server_url,
|
||||
agent_name=service,
|
||||
agent_name=f"MCP: {host}",
|
||||
user_readiness=UserReadiness(
|
||||
has_all_credentials=False,
|
||||
missing_credentials=missing_creds_dict,
|
||||
|
||||
@@ -756,4 +756,4 @@ async def test_build_setup_requirements_returns_setup_response():
|
||||
)
|
||||
assert isinstance(result, SetupRequirementsResponse)
|
||||
assert result.setup_info.agent_id == _SERVER_URL
|
||||
assert "sign in" in result.message.lower()
|
||||
assert "authentication" in result.message.lower()
|
||||
|
||||
@@ -116,15 +116,10 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.CODESTRAL: 1,
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||
LlmModel.COHERE_COMMAND_A_03_2025: 3,
|
||||
LlmModel.COHERE_COMMAND_A_TRANSLATE_08_2025: 3,
|
||||
LlmModel.COHERE_COMMAND_A_REASONING_08_2025: 6,
|
||||
LlmModel.COHERE_COMMAND_A_VISION_07_2025: 3,
|
||||
LlmModel.DEEPSEEK_CHAT: 2,
|
||||
LlmModel.DEEPSEEK_R1_0528: 1,
|
||||
LlmModel.PERPLEXITY_SONAR: 1,
|
||||
LlmModel.PERPLEXITY_SONAR_PRO: 5,
|
||||
LlmModel.PERPLEXITY_SONAR_REASONING_PRO: 5,
|
||||
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1,
|
||||
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1,
|
||||
@@ -132,7 +127,6 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.AMAZON_NOVA_MICRO_V1: 1,
|
||||
LlmModel.AMAZON_NOVA_PRO_V1: 1,
|
||||
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1,
|
||||
LlmModel.MICROSOFT_PHI_4: 1,
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1,
|
||||
LlmModel.META_LLAMA_4_SCOUT: 1,
|
||||
LlmModel.META_LLAMA_4_MAVERICK: 1,
|
||||
|
||||
@@ -8,8 +8,6 @@ from backend.api.model import NotificationPayload
|
||||
from backend.data.event_bus import AsyncRedisEventBus
|
||||
from backend.util.settings import Settings
|
||||
|
||||
_settings = Settings()
|
||||
|
||||
|
||||
class NotificationEvent(BaseModel):
|
||||
"""Generic notification event destined for websocket delivery."""
|
||||
@@ -28,7 +26,7 @@ class AsyncRedisNotificationEventBus(AsyncRedisEventBus[NotificationEvent]):
|
||||
|
||||
@property
|
||||
def event_bus_name(self) -> str:
|
||||
return _settings.config.notification_event_bus_name
|
||||
return Settings().config.notification_event_bus_name
|
||||
|
||||
async def publish(self, event: NotificationEvent) -> None:
|
||||
await self.publish_event(event, event.user_id)
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
-- Fix PerplexityBlock nodes that have invalid model values (e.g. gpt-4o,
|
||||
-- gpt-5.2-2025-12-11) set by the agent generator. Defaults them to the
|
||||
-- standard "perplexity/sonar" model.
|
||||
--
|
||||
-- PerplexityBlock ID: c8a5f2e9-8b3d-4a7e-9f6c-1d5e3c9b7a4f
|
||||
-- Valid models: perplexity/sonar, perplexity/sonar-pro, perplexity/sonar-deep-research
|
||||
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{model}',
|
||||
'"perplexity/sonar"'::jsonb
|
||||
)
|
||||
WHERE "agentBlockId" = 'c8a5f2e9-8b3d-4a7e-9f6c-1d5e3c9b7a4f'
|
||||
AND "constantInput"::jsonb ? 'model'
|
||||
AND "constantInput"::jsonb->>'model' NOT IN (
|
||||
'perplexity/sonar',
|
||||
'perplexity/sonar-pro',
|
||||
'perplexity/sonar-deep-research'
|
||||
);
|
||||
|
||||
-- Update AgentPreset input overrides (stored in AgentNodeExecutionInputOutput).
|
||||
-- The table links to AgentNode through AgentNodeExecution, not directly.
|
||||
UPDATE "AgentNodeExecutionInputOutput" io
|
||||
SET "data" = JSONB_SET(
|
||||
io."data"::jsonb,
|
||||
'{model}',
|
||||
'"perplexity/sonar"'::jsonb
|
||||
)
|
||||
FROM "AgentNodeExecution" exe
|
||||
JOIN "AgentNode" n ON n."id" = exe."agentNodeId"
|
||||
WHERE io."agentPresetId" IS NOT NULL
|
||||
AND (io."referencedByInputExecId" = exe."id" OR io."referencedByOutputExecId" = exe."id")
|
||||
AND n."agentBlockId" = 'c8a5f2e9-8b3d-4a7e-9f6c-1d5e3c9b7a4f'
|
||||
AND io."data"::jsonb ? 'model'
|
||||
AND io."data"::jsonb->>'model' NOT IN (
|
||||
'perplexity/sonar',
|
||||
'perplexity/sonar-pro',
|
||||
'perplexity/sonar-deep-research'
|
||||
);
|
||||
@@ -4,6 +4,7 @@
|
||||
"id": "test-agent-1",
|
||||
"graph_id": "test-agent-1",
|
||||
"graph_version": 1,
|
||||
"owner_user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||
"image_url": null,
|
||||
"creator_name": "Test Creator",
|
||||
"creator_image_url": "",
|
||||
@@ -50,6 +51,7 @@
|
||||
"id": "test-agent-2",
|
||||
"graph_id": "test-agent-2",
|
||||
"graph_version": 1,
|
||||
"owner_user_id": "3e53486c-cf57-477e-ba2a-cb02dc828e1a",
|
||||
"image_url": null,
|
||||
"creator_name": "Test Creator",
|
||||
"creator_image_url": "",
|
||||
|
||||
@@ -75,7 +75,7 @@ export const getSecondCalculatorNode = () => {
|
||||
export const getFormContainerSelector = (blockId: string): string | null => {
|
||||
const node = getNodeByBlockId(blockId);
|
||||
if (node) {
|
||||
return `[data-id="form-creator-container-${node.id}-node"]`;
|
||||
return `[data-id="form-creator-container-${node.id}"]`;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*
|
||||
* Typography (body, small, action, info, tip, warning) uses Tailwind utilities directly in steps.ts
|
||||
*/
|
||||
import "shepherd.js/dist/css/shepherd.css";
|
||||
import "./tutorial.css";
|
||||
|
||||
export const injectTutorialStyles = () => {
|
||||
|
||||
@@ -1,14 +1,3 @@
|
||||
.new-builder-tutorial-disable {
|
||||
opacity: 0.3 !important;
|
||||
pointer-events: none !important;
|
||||
filter: grayscale(100%) !important;
|
||||
}
|
||||
|
||||
.new-builder-tutorial-highlight {
|
||||
position: relative;
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.new-builder-tutorial-highlight * {
|
||||
opacity: 1 !important;
|
||||
filter: none !important;
|
||||
|
||||
@@ -1,810 +0,0 @@
|
||||
"use client";
|
||||
import React, {
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useState,
|
||||
} from "react";
|
||||
|
||||
import {
|
||||
CredentialsMetaInput,
|
||||
CredentialsType,
|
||||
Graph,
|
||||
GraphExecutionID,
|
||||
LibraryAgentPreset,
|
||||
LibraryAgentPresetID,
|
||||
LibraryAgentPresetUpdatable,
|
||||
Schedule,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
|
||||
import { RunAgentInputs } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/RunAgentInputs/RunAgentInputs";
|
||||
import { ScheduleTaskDialog } from "@/components/contextual/CronScheduler/cron-scheduler-dialog";
|
||||
import ActionButtonGroup from "@/components/__legacy__/action-button-group";
|
||||
import type { ButtonAction } from "@/components/__legacy__/types";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/__legacy__/ui/card";
|
||||
import {
|
||||
IconCross,
|
||||
IconPlay,
|
||||
IconSave,
|
||||
} from "@/components/__legacy__/ui/icons";
|
||||
import { Input } from "@/components/__legacy__/ui/input";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
|
||||
import {
|
||||
findSavedCredentialByProviderAndType,
|
||||
findSavedUserCredentialByProviderAndType,
|
||||
} from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
|
||||
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
|
||||
import {
|
||||
useToast,
|
||||
useToastOnFail,
|
||||
} from "@/components/molecules/Toast/use-toast";
|
||||
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
|
||||
import { cn, isEmpty } from "@/lib/utils";
|
||||
import { CredentialsProvidersContext } from "@/providers/agent-credentials/credentials-provider";
|
||||
import { ClockIcon, CopyIcon, InfoIcon } from "@phosphor-icons/react";
|
||||
import { CalendarClockIcon, Trash2Icon } from "lucide-react";
|
||||
|
||||
import { analytics } from "@/services/analytics";
|
||||
import {
|
||||
AgentStatus,
|
||||
AgentStatusChip,
|
||||
} from "@/app/(platform)/build/components/legacy-builder/agent-status-chip";
|
||||
|
||||
export function AgentRunDraftView({
|
||||
graph,
|
||||
agentPreset,
|
||||
doRun: _doRun,
|
||||
onRun,
|
||||
onCreatePreset,
|
||||
onUpdatePreset,
|
||||
doDeletePreset,
|
||||
doCreateSchedule: _doCreateSchedule,
|
||||
onCreateSchedule,
|
||||
agentActions,
|
||||
className,
|
||||
recommendedScheduleCron,
|
||||
}: {
|
||||
graph: Graph;
|
||||
agentActions?: ButtonAction[];
|
||||
recommendedScheduleCron?: string | null;
|
||||
doRun?: (
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => Promise<void>;
|
||||
onRun?: (runID: GraphExecutionID) => void;
|
||||
doCreateSchedule?: (
|
||||
cronExpression: string,
|
||||
scheduleName: string,
|
||||
inputs: Record<string, any>,
|
||||
credentialsInputs: Record<string, CredentialsMetaInput>,
|
||||
) => Promise<void>;
|
||||
onCreateSchedule?: (schedule: Schedule) => void;
|
||||
className?: string;
|
||||
} & (
|
||||
| {
|
||||
onCreatePreset?: (preset: LibraryAgentPreset) => void;
|
||||
agentPreset?: never;
|
||||
onUpdatePreset?: never;
|
||||
doDeletePreset?: never;
|
||||
}
|
||||
| {
|
||||
onCreatePreset?: never;
|
||||
agentPreset: LibraryAgentPreset;
|
||||
onUpdatePreset: (preset: LibraryAgentPreset) => void;
|
||||
doDeletePreset: (presetID: LibraryAgentPresetID) => void;
|
||||
}
|
||||
)): React.ReactNode {
|
||||
const api = useBackendAPI();
|
||||
const { toast } = useToast();
|
||||
const toastOnFail = useToastOnFail();
|
||||
const allProviders = useContext(CredentialsProvidersContext);
|
||||
|
||||
const [inputValues, setInputValues] = useState<Record<string, any>>({});
|
||||
const [inputCredentials, setInputCredentials] = useState<
|
||||
Record<string, CredentialsMetaInput>
|
||||
>({});
|
||||
const [presetName, setPresetName] = useState<string>("");
|
||||
const [presetDescription, setPresetDescription] = useState<string>("");
|
||||
const [changedPresetAttributes, setChangedPresetAttributes] = useState<
|
||||
Set<keyof LibraryAgentPresetUpdatable>
|
||||
>(new Set());
|
||||
const [cronScheduleDialogOpen, setCronScheduleDialogOpen] = useState(false);
|
||||
|
||||
// Update values if agentPreset parameter is changed
|
||||
useEffect(() => {
|
||||
setInputValues(agentPreset?.inputs ?? {});
|
||||
setInputCredentials(agentPreset?.credentials ?? {});
|
||||
setPresetName(agentPreset?.name ?? "");
|
||||
setPresetDescription(agentPreset?.description ?? "");
|
||||
setChangedPresetAttributes(new Set());
|
||||
}, [agentPreset]);
|
||||
|
||||
const agentInputSchema = useMemo(
|
||||
() => graph.trigger_setup_info?.config_schema ?? graph.input_schema,
|
||||
[graph],
|
||||
);
|
||||
const agentInputFields = useMemo(
|
||||
() =>
|
||||
Object.fromEntries(
|
||||
Object.entries(agentInputSchema.properties).filter(
|
||||
([_, subSchema]) => !subSchema.hidden,
|
||||
),
|
||||
),
|
||||
[agentInputSchema],
|
||||
);
|
||||
const agentCredentialsInputFields = useMemo(
|
||||
() => graph.credentials_input_schema.properties,
|
||||
[graph],
|
||||
);
|
||||
const credentialFields = useMemo(
|
||||
function getCredentialFields() {
|
||||
return Object.entries(agentCredentialsInputFields);
|
||||
},
|
||||
[agentCredentialsInputFields],
|
||||
);
|
||||
const requiredCredentials = useMemo(
|
||||
function getRequiredCredentials() {
|
||||
return new Set(
|
||||
(graph.credentials_input_schema?.required as string[]) || [],
|
||||
);
|
||||
},
|
||||
[graph.credentials_input_schema?.required],
|
||||
);
|
||||
|
||||
useEffect(
|
||||
function initializeDefaultCredentials() {
|
||||
if (!allProviders) return;
|
||||
if (!graph.credentials_input_schema?.properties) return;
|
||||
if (requiredCredentials.size === 0) return;
|
||||
|
||||
setInputCredentials(function updateCredentials(currentCreds) {
|
||||
const next = { ...currentCreds };
|
||||
let didAdd = false;
|
||||
|
||||
for (const key of requiredCredentials) {
|
||||
if (next[key]) continue;
|
||||
const schema = graph.credentials_input_schema.properties[key];
|
||||
if (!schema) continue;
|
||||
|
||||
const providerNames = schema.credentials_provider || [];
|
||||
const credentialTypes = schema.credentials_types || [];
|
||||
const requiredScopes = schema.credentials_scopes;
|
||||
|
||||
const userCredential = findSavedUserCredentialByProviderAndType(
|
||||
providerNames,
|
||||
credentialTypes,
|
||||
requiredScopes,
|
||||
allProviders,
|
||||
);
|
||||
|
||||
const savedCredential =
|
||||
userCredential ||
|
||||
findSavedCredentialByProviderAndType(
|
||||
providerNames,
|
||||
credentialTypes,
|
||||
requiredScopes,
|
||||
allProviders,
|
||||
);
|
||||
|
||||
if (!savedCredential) continue;
|
||||
|
||||
next[key] = {
|
||||
id: savedCredential.id,
|
||||
provider: savedCredential.provider,
|
||||
type: savedCredential.type as CredentialsType,
|
||||
title: savedCredential.title,
|
||||
};
|
||||
didAdd = true;
|
||||
}
|
||||
|
||||
if (!didAdd) return currentCreds;
|
||||
return next;
|
||||
});
|
||||
},
|
||||
[
|
||||
allProviders,
|
||||
graph.credentials_input_schema?.properties,
|
||||
requiredCredentials,
|
||||
],
|
||||
);
|
||||
|
||||
const [allRequiredInputsAreSet, missingInputs] = useMemo(() => {
|
||||
const nonEmptyInputs = new Set(
|
||||
Object.keys(inputValues).filter((k) => !isEmpty(inputValues[k])),
|
||||
);
|
||||
const requiredInputs = new Set(
|
||||
agentInputSchema.required as string[] | undefined,
|
||||
);
|
||||
// Backwards-compatible implementation of isSupersetOf and difference
|
||||
const isSuperset = Array.from(requiredInputs).every((item) =>
|
||||
nonEmptyInputs.has(item),
|
||||
);
|
||||
const difference = Array.from(requiredInputs).filter(
|
||||
(item) => !nonEmptyInputs.has(item),
|
||||
);
|
||||
return [isSuperset, difference];
|
||||
}, [agentInputSchema.required, inputValues]);
|
||||
const [allCredentialsAreSet, missingCredentials] = useMemo(
|
||||
function getCredentialStatus() {
|
||||
const missing = Array.from(requiredCredentials).filter((key) => {
|
||||
const cred = inputCredentials[key];
|
||||
return !cred || !cred.id;
|
||||
});
|
||||
return [missing.length === 0, missing];
|
||||
},
|
||||
[requiredCredentials, inputCredentials],
|
||||
);
|
||||
function addChangedCredentials(prev: Set<keyof LibraryAgentPresetUpdatable>) {
|
||||
const next = new Set(prev);
|
||||
next.add("credentials");
|
||||
return next;
|
||||
}
|
||||
|
||||
function handleCredentialChange(key: string, value?: CredentialsMetaInput) {
|
||||
setInputCredentials(function updateInputCredentials(currentCreds) {
|
||||
const next = { ...currentCreds };
|
||||
if (value === undefined) {
|
||||
delete next[key];
|
||||
return next;
|
||||
}
|
||||
next[key] = value;
|
||||
return next;
|
||||
});
|
||||
setChangedPresetAttributes(addChangedCredentials);
|
||||
}
|
||||
|
||||
const notifyMissingInputs = useCallback(
|
||||
(needPresetName: boolean = true) => {
|
||||
const allMissingFields = (
|
||||
needPresetName && !presetName
|
||||
? [graph.has_external_trigger ? "trigger_name" : "preset_name"]
|
||||
: []
|
||||
)
|
||||
.concat(missingInputs)
|
||||
.concat(missingCredentials);
|
||||
toast({
|
||||
title: "⚠️ Not all required inputs are set",
|
||||
description: `Please set ${allMissingFields.map((k) => `\`${k}\``).join(", ")}`,
|
||||
});
|
||||
},
|
||||
[missingInputs, missingCredentials],
|
||||
);
|
||||
|
||||
const doRun = useCallback(async () => {
|
||||
// Manually running webhook-triggered agents is not supported
|
||||
if (graph.has_external_trigger) return;
|
||||
|
||||
if (!agentPreset || changedPresetAttributes.size > 0) {
|
||||
if (!allRequiredInputsAreSet || !allCredentialsAreSet) {
|
||||
notifyMissingInputs(false);
|
||||
return;
|
||||
}
|
||||
if (_doRun) {
|
||||
await _doRun(inputValues, inputCredentials);
|
||||
return;
|
||||
}
|
||||
// TODO: on executing preset with changes, ask for confirmation and offer save+run
|
||||
const newRun = await api
|
||||
.executeGraph(
|
||||
graph.id,
|
||||
graph.version,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
"library",
|
||||
)
|
||||
.catch(toastOnFail("execute agent"));
|
||||
|
||||
if (newRun && onRun) onRun(newRun.id);
|
||||
} else {
|
||||
await api
|
||||
.executeLibraryAgentPreset(agentPreset.id)
|
||||
.then((newRun) => onRun && onRun(newRun.id))
|
||||
.catch(toastOnFail("execute agent preset"));
|
||||
}
|
||||
|
||||
analytics.sendDatafastEvent("run_agent", {
|
||||
name: graph.name,
|
||||
id: graph.id,
|
||||
});
|
||||
}, [api, graph, inputValues, inputCredentials, onRun, toastOnFail]);
|
||||
|
||||
const doCreatePreset = useCallback(async () => {
|
||||
if (!onCreatePreset) return;
|
||||
|
||||
if (!presetName || !allRequiredInputsAreSet || !allCredentialsAreSet) {
|
||||
notifyMissingInputs();
|
||||
return;
|
||||
}
|
||||
|
||||
await api
|
||||
.createLibraryAgentPreset({
|
||||
name: presetName,
|
||||
description: presetDescription,
|
||||
graph_id: graph.id,
|
||||
graph_version: graph.version,
|
||||
inputs: inputValues,
|
||||
credentials: inputCredentials,
|
||||
})
|
||||
.then((newPreset) => {
|
||||
onCreatePreset(newPreset);
|
||||
setChangedPresetAttributes(new Set()); // reset change tracker
|
||||
})
|
||||
.catch(toastOnFail("save agent preset"));
|
||||
}, [
|
||||
api,
|
||||
graph,
|
||||
presetName,
|
||||
presetDescription,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
onCreatePreset,
|
||||
toast,
|
||||
toastOnFail,
|
||||
]);
|
||||
|
||||
const doUpdatePreset = useCallback(async () => {
|
||||
if (!agentPreset || changedPresetAttributes.size == 0) return;
|
||||
|
||||
if (!presetName || !allRequiredInputsAreSet || !allCredentialsAreSet) {
|
||||
notifyMissingInputs();
|
||||
return;
|
||||
}
|
||||
|
||||
const updatePreset: LibraryAgentPresetUpdatable = {};
|
||||
if (changedPresetAttributes.has("name")) updatePreset["name"] = presetName;
|
||||
if (changedPresetAttributes.has("description"))
|
||||
updatePreset["description"] = presetDescription;
|
||||
if (
|
||||
changedPresetAttributes.has("inputs") ||
|
||||
changedPresetAttributes.has("credentials")
|
||||
) {
|
||||
updatePreset["inputs"] = inputValues;
|
||||
updatePreset["credentials"] = inputCredentials;
|
||||
}
|
||||
await api
|
||||
.updateLibraryAgentPreset(agentPreset.id, updatePreset)
|
||||
.then((updatedPreset) => {
|
||||
onUpdatePreset(updatedPreset);
|
||||
setChangedPresetAttributes(new Set()); // reset change tracker
|
||||
})
|
||||
.catch(toastOnFail("update agent preset"));
|
||||
}, [
|
||||
api,
|
||||
graph,
|
||||
presetName,
|
||||
presetDescription,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
onUpdatePreset,
|
||||
toast,
|
||||
toastOnFail,
|
||||
]);
|
||||
|
||||
const doSetPresetActive = useCallback(
|
||||
async (active: boolean) => {
|
||||
if (!agentPreset) return;
|
||||
const updatedPreset = await api.updateLibraryAgentPreset(agentPreset.id, {
|
||||
is_active: active,
|
||||
});
|
||||
onUpdatePreset(updatedPreset);
|
||||
},
|
||||
[agentPreset, api, onUpdatePreset],
|
||||
);
|
||||
|
||||
const doSetupTrigger = useCallback(async () => {
|
||||
// Setting up a trigger for non-webhook-triggered agents is not supported
|
||||
if (!graph.trigger_setup_info || !onCreatePreset) return;
|
||||
|
||||
if (!presetName || !allRequiredInputsAreSet || !allCredentialsAreSet) {
|
||||
notifyMissingInputs();
|
||||
return;
|
||||
}
|
||||
|
||||
await api
|
||||
.setupAgentTrigger({
|
||||
name: presetName,
|
||||
description: presetDescription,
|
||||
graph_id: graph.id,
|
||||
graph_version: graph.version,
|
||||
trigger_config: inputValues,
|
||||
agent_credentials: inputCredentials,
|
||||
})
|
||||
.then((newPreset) => {
|
||||
onCreatePreset(newPreset);
|
||||
setChangedPresetAttributes(new Set()); // reset change tracker
|
||||
})
|
||||
.catch(toastOnFail("set up agent trigger"));
|
||||
}, [
|
||||
api,
|
||||
graph,
|
||||
presetName,
|
||||
presetDescription,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
onCreatePreset,
|
||||
toast,
|
||||
toastOnFail,
|
||||
]);
|
||||
|
||||
const openScheduleDialog = useCallback(() => {
|
||||
// Scheduling is not supported for webhook-triggered agents
|
||||
if (graph.has_external_trigger) return;
|
||||
|
||||
if (!allRequiredInputsAreSet || !allCredentialsAreSet) {
|
||||
notifyMissingInputs(false);
|
||||
return;
|
||||
}
|
||||
|
||||
setCronScheduleDialogOpen(true);
|
||||
}, [
|
||||
graph,
|
||||
allRequiredInputsAreSet,
|
||||
allCredentialsAreSet,
|
||||
notifyMissingInputs,
|
||||
]);
|
||||
|
||||
const doSetupSchedule = useCallback(
|
||||
async (cronExpression: string, scheduleName: string) => {
|
||||
// Scheduling is not supported for webhook-triggered agents
|
||||
if (graph.has_external_trigger) return;
|
||||
|
||||
if (_doCreateSchedule) {
|
||||
await _doCreateSchedule(
|
||||
cronExpression,
|
||||
scheduleName || graph.name,
|
||||
inputValues,
|
||||
inputCredentials,
|
||||
);
|
||||
return;
|
||||
}
|
||||
const schedule = await api
|
||||
.createGraphExecutionSchedule({
|
||||
graph_id: graph.id,
|
||||
graph_version: graph.version,
|
||||
name: scheduleName || graph.name,
|
||||
cron: cronExpression,
|
||||
inputs: inputValues,
|
||||
credentials: inputCredentials,
|
||||
})
|
||||
.catch(toastOnFail("set up agent run schedule"));
|
||||
|
||||
analytics.sendDatafastEvent("schedule_agent", {
|
||||
name: graph.name,
|
||||
id: graph.id,
|
||||
cronExpression: cronExpression,
|
||||
});
|
||||
|
||||
if (schedule && onCreateSchedule) onCreateSchedule(schedule);
|
||||
},
|
||||
[api, graph, inputValues, inputCredentials, onCreateSchedule, toastOnFail],
|
||||
);
|
||||
|
||||
const runActions: ButtonAction[] = useMemo(
|
||||
() => [
|
||||
// "Regular" agent: [run] + [save as preset] buttons
|
||||
...(!graph.has_external_trigger
|
||||
? ([
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<CalendarClockIcon className="mr-2 size-4" /> Schedule run
|
||||
</>
|
||||
),
|
||||
variant: "accent",
|
||||
callback: openScheduleDialog,
|
||||
extraProps: { "data-testid": "agent-schedule-button" },
|
||||
},
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<IconPlay className="mr-2 size-4" /> Manual run
|
||||
</>
|
||||
),
|
||||
callback: doRun,
|
||||
extraProps: { "data-testid": "agent-run-button" },
|
||||
},
|
||||
// {
|
||||
// label: (
|
||||
// <>
|
||||
// <IconSave className="mr-2 size-4" /> Save as a preset
|
||||
// </>
|
||||
// ),
|
||||
// callback: doCreatePreset,
|
||||
// disabled: !(
|
||||
// presetName &&
|
||||
// allRequiredInputsAreSet &&
|
||||
// allCredentialsAreSet
|
||||
// ),
|
||||
// },
|
||||
] satisfies ButtonAction[])
|
||||
: []),
|
||||
// Triggered agent: [setup] button
|
||||
...(graph.has_external_trigger && !agentPreset?.webhook_id
|
||||
? ([
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<IconPlay className="mr-2 size-4" /> Set up trigger
|
||||
</>
|
||||
),
|
||||
variant: "accent",
|
||||
callback: doSetupTrigger,
|
||||
disabled: !(
|
||||
presetName &&
|
||||
allRequiredInputsAreSet &&
|
||||
allCredentialsAreSet
|
||||
),
|
||||
},
|
||||
] satisfies ButtonAction[])
|
||||
: []),
|
||||
// Existing agent trigger: [enable]/[disable] button
|
||||
...(agentPreset?.webhook_id
|
||||
? ([
|
||||
agentPreset.is_active
|
||||
? {
|
||||
label: (
|
||||
<>
|
||||
<IconCross className="mr-2.5 size-3.5" /> Disable trigger
|
||||
</>
|
||||
),
|
||||
variant: "destructive",
|
||||
callback: () => doSetPresetActive(false),
|
||||
}
|
||||
: {
|
||||
label: (
|
||||
<>
|
||||
<IconPlay className="mr-2 size-4" /> Enable trigger
|
||||
</>
|
||||
),
|
||||
variant: "accent",
|
||||
callback: () => doSetPresetActive(true),
|
||||
},
|
||||
] satisfies ButtonAction[])
|
||||
: []),
|
||||
// Existing agent preset/trigger: [save] and [delete] buttons
|
||||
...(agentPreset
|
||||
? ([
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<IconSave className="mr-2 size-4" /> Save changes
|
||||
</>
|
||||
),
|
||||
callback: doUpdatePreset,
|
||||
disabled: !(
|
||||
changedPresetAttributes.size > 0 &&
|
||||
presetName &&
|
||||
allRequiredInputsAreSet &&
|
||||
allCredentialsAreSet
|
||||
),
|
||||
},
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<Trash2Icon className="mr-2 size-4" />
|
||||
Delete {graph.has_external_trigger ? "trigger" : "preset"}
|
||||
</>
|
||||
),
|
||||
callback: () => doDeletePreset(agentPreset.id),
|
||||
},
|
||||
] satisfies ButtonAction[])
|
||||
: []),
|
||||
],
|
||||
[
|
||||
graph.has_external_trigger,
|
||||
agentPreset,
|
||||
doRun,
|
||||
doSetupTrigger,
|
||||
doCreatePreset,
|
||||
doUpdatePreset,
|
||||
doDeletePreset,
|
||||
openScheduleDialog,
|
||||
changedPresetAttributes,
|
||||
presetName,
|
||||
allRequiredInputsAreSet,
|
||||
allCredentialsAreSet,
|
||||
],
|
||||
);
|
||||
|
||||
const triggerStatus: AgentStatus | null = !agentPreset
|
||||
? null
|
||||
: !agentPreset.webhook
|
||||
? "broken"
|
||||
: agentPreset.is_active
|
||||
? "active"
|
||||
: "inactive";
|
||||
|
||||
return (
|
||||
<div className={cn("agpt-div flex gap-6", className)}>
|
||||
<div className="flex min-w-0 flex-1 flex-col gap-4">
|
||||
{graph.trigger_setup_info && agentPreset && (
|
||||
<Card className="agpt-box">
|
||||
<CardHeader className="flex-row items-center justify-between">
|
||||
<CardTitle className="font-poppins text-lg">
|
||||
Trigger status
|
||||
</CardTitle>
|
||||
{triggerStatus && <AgentStatusChip status={triggerStatus} />}
|
||||
</CardHeader>
|
||||
<CardContent className="flex flex-col gap-4">
|
||||
{!agentPreset.webhook_id ? (
|
||||
/* Shouldn't happen, but technically possible */
|
||||
<p className="text-sm text-destructive">
|
||||
This trigger is not attached to a webhook. Use "Set up
|
||||
trigger" to fix this.
|
||||
</p>
|
||||
) : !graph.trigger_setup_info.credentials_input_name ? (
|
||||
/* Expose webhook URL if not auto-setup */
|
||||
<div className="text-sm">
|
||||
<p>
|
||||
This trigger is ready to be used. Use the Webhook URL below
|
||||
to set up the trigger connection with the service of your
|
||||
choosing.
|
||||
</p>
|
||||
<div className="nodrag mt-5 flex flex-col gap-1">
|
||||
Webhook URL:
|
||||
<div className="flex gap-2 rounded-md bg-gray-50 p-2">
|
||||
<code className="select-all text-sm">
|
||||
{agentPreset.webhook.url}
|
||||
</code>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
className="size-7 flex-none p-1"
|
||||
onClick={() =>
|
||||
agentPreset.webhook &&
|
||||
navigator.clipboard.writeText(agentPreset.webhook.url)
|
||||
}
|
||||
title="Copy webhook URL"
|
||||
>
|
||||
<CopyIcon className="size-4" />
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<p className="text-sm text-muted-foreground">
|
||||
This agent trigger is{" "}
|
||||
{agentPreset.is_active
|
||||
? "ready. When a trigger is received, it will run with the provided settings."
|
||||
: "disabled. It will not respond to triggers until you enable it."}
|
||||
</p>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
<Card className="agpt-box">
|
||||
<CardHeader>
|
||||
<CardTitle className="font-poppins text-lg">Input</CardTitle>
|
||||
</CardHeader>
|
||||
<CardContent className="flex flex-col gap-4">
|
||||
{/* Schedule recommendation tip */}
|
||||
{recommendedScheduleCron && !graph.has_external_trigger && (
|
||||
<div className="flex items-center gap-2 rounded-md border border-violet-200 bg-violet-50 p-3">
|
||||
<ClockIcon className="h-4 w-4 text-violet-600" />
|
||||
<p className="text-sm text-violet-800">
|
||||
<strong>Tip:</strong> For best results, run this agent{" "}
|
||||
{humanizeCronExpression(
|
||||
recommendedScheduleCron,
|
||||
).toLowerCase()}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Setup Instructions */}
|
||||
{graph.instructions && (
|
||||
<div className="flex items-start gap-2 rounded-md border border-violet-200 bg-violet-50 p-3">
|
||||
<InfoIcon className="mt-0.5 h-4 w-4 flex-shrink-0 text-violet-600" />
|
||||
<div className="text-sm text-violet-800">
|
||||
<strong>Setup Instructions:</strong>{" "}
|
||||
<span className="whitespace-pre-wrap">
|
||||
{graph.instructions}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{(agentPreset || graph.has_external_trigger) && (
|
||||
<>
|
||||
{/* Preset name and description */}
|
||||
<div className="flex flex-col space-y-2">
|
||||
<label className="flex items-center gap-1 text-sm font-medium">
|
||||
{graph.has_external_trigger ? "Trigger" : "Preset"} Name
|
||||
<InformationTooltip
|
||||
description={`Name of the ${graph.has_external_trigger ? "trigger" : "preset"} you are setting up`}
|
||||
/>
|
||||
</label>
|
||||
<Input
|
||||
value={presetName}
|
||||
placeholder={`Enter ${graph.has_external_trigger ? "trigger" : "preset"} name`}
|
||||
onChange={(e) => {
|
||||
setPresetName(e.target.value);
|
||||
setChangedPresetAttributes((prev) => prev.add("name"));
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col space-y-2">
|
||||
<label className="flex items-center gap-1 text-sm font-medium">
|
||||
{graph.has_external_trigger ? "Trigger" : "Preset"}{" "}
|
||||
Description
|
||||
<InformationTooltip
|
||||
description={`Description of the ${graph.has_external_trigger ? "trigger" : "preset"} you are setting up`}
|
||||
/>
|
||||
</label>
|
||||
<Input
|
||||
value={presetDescription}
|
||||
placeholder={`Enter ${graph.has_external_trigger ? "trigger" : "preset"} description`}
|
||||
onChange={(e) => {
|
||||
setPresetDescription(e.target.value);
|
||||
setChangedPresetAttributes((prev) =>
|
||||
prev.add("description"),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Regular inputs */}
|
||||
{Object.entries(agentInputFields).map(([key, inputSubSchema]) => (
|
||||
<RunAgentInputs
|
||||
key={key}
|
||||
schema={inputSubSchema}
|
||||
value={inputValues[key] ?? inputSubSchema.default}
|
||||
placeholder={inputSubSchema.description}
|
||||
onChange={(value) => {
|
||||
setInputValues((obj) => ({
|
||||
...obj,
|
||||
[key]: value,
|
||||
}));
|
||||
setChangedPresetAttributes((prev) => prev.add("inputs"));
|
||||
}}
|
||||
data-testid={`agent-input-${key}`}
|
||||
/>
|
||||
))}
|
||||
|
||||
{/* Credentials inputs */}
|
||||
{credentialFields.length > 0 && (
|
||||
<CredentialsGroupedView
|
||||
credentialFields={credentialFields}
|
||||
requiredCredentials={requiredCredentials}
|
||||
inputCredentials={inputCredentials}
|
||||
inputValues={inputValues}
|
||||
onCredentialChange={handleCredentialChange}
|
||||
/>
|
||||
)}
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
|
||||
{/* Actions */}
|
||||
<aside className="w-48 xl:w-56">
|
||||
<div className="flex flex-col gap-8">
|
||||
<ActionButtonGroup
|
||||
title={`${graph.has_external_trigger ? "Trigger" : agentPreset ? "Preset" : "Run"} actions`}
|
||||
actions={runActions}
|
||||
/>
|
||||
<ScheduleTaskDialog
|
||||
open={cronScheduleDialogOpen}
|
||||
setOpen={setCronScheduleDialogOpen}
|
||||
onSubmit={doSetupSchedule}
|
||||
defaultScheduleName={graph.name}
|
||||
defaultCronExpression={recommendedScheduleCron || undefined}
|
||||
/>
|
||||
|
||||
{agentActions && agentActions.length > 0 && (
|
||||
<ActionButtonGroup title="Agent actions" actions={agentActions} />
|
||||
)}
|
||||
</div>
|
||||
</aside>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
import React from "react";
|
||||
|
||||
import { Badge } from "@/components/__legacy__/ui/badge";
|
||||
|
||||
export type AgentStatus = "active" | "inactive" | "error" | "broken";
|
||||
|
||||
const statusData: Record<
|
||||
AgentStatus,
|
||||
{ label: string; variant: keyof typeof statusStyles }
|
||||
> = {
|
||||
active: { label: "Active", variant: "success" },
|
||||
error: { label: "Error", variant: "destructive" },
|
||||
broken: { label: "Broken", variant: "destructive" },
|
||||
inactive: { label: "Inactive", variant: "secondary" },
|
||||
};
|
||||
|
||||
const statusStyles = {
|
||||
success:
|
||||
"bg-green-100 text-green-800 hover:bg-green-100 hover:text-green-800",
|
||||
destructive: "bg-red-100 text-red-800 hover:bg-red-100 hover:text-red-800",
|
||||
warning:
|
||||
"bg-yellow-100 text-yellow-800 hover:bg-yellow-100 hover:text-yellow-800",
|
||||
info: "bg-blue-100 text-blue-800 hover:bg-blue-100 hover:text-blue-800",
|
||||
secondary:
|
||||
"bg-slate-100 text-slate-800 hover:bg-slate-100 hover:text-slate-800",
|
||||
};
|
||||
|
||||
export function AgentStatusChip({
|
||||
status,
|
||||
}: {
|
||||
status: AgentStatus;
|
||||
}): React.ReactElement {
|
||||
return (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className={`text-xs font-medium ${statusStyles[statusData[status]?.variant]} rounded-[45px] px-[9px] py-[3px]`}
|
||||
>
|
||||
{statusData[status]?.label}
|
||||
</Badge>
|
||||
);
|
||||
}
|
||||
@@ -15,8 +15,6 @@ import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
||||
import { DeleteChatDialog } from "./components/DeleteChatDialog/DeleteChatDialog";
|
||||
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
||||
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
|
||||
import { NotificationBanner } from "./components/NotificationBanner/NotificationBanner";
|
||||
import { NotificationDialog } from "./components/NotificationDialog/NotificationDialog";
|
||||
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
|
||||
import { useCopilotPage } from "./useCopilotPage";
|
||||
|
||||
@@ -119,7 +117,6 @@ export function CopilotPage() {
|
||||
onDrop={handleDrop}
|
||||
>
|
||||
{isMobile && <MobileHeader onOpenDrawer={handleOpenDrawer} />}
|
||||
<NotificationBanner />
|
||||
{/* Drop overlay */}
|
||||
<div
|
||||
className={cn(
|
||||
@@ -204,7 +201,6 @@ export function CopilotPage() {
|
||||
onCancel={handleCancelDelete}
|
||||
/>
|
||||
)}
|
||||
<NotificationDialog />
|
||||
</SidebarProvider>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { useCopilotUIStore } from "@/app/(platform)/copilot/store";
|
||||
import { ChangeEvent, FormEvent, useEffect, useState } from "react";
|
||||
|
||||
interface Args {
|
||||
@@ -17,16 +16,6 @@ export function useChatInput({
|
||||
}: Args) {
|
||||
const [value, setValue] = useState("");
|
||||
const [isSending, setIsSending] = useState(false);
|
||||
const { initialPrompt, setInitialPrompt } = useCopilotUIStore();
|
||||
|
||||
useEffect(
|
||||
function consumeInitialPrompt() {
|
||||
if (!initialPrompt) return;
|
||||
setValue((prev) => (prev.length === 0 ? initialPrompt : prev));
|
||||
setInitialPrompt(null);
|
||||
},
|
||||
[initialPrompt, setInitialPrompt],
|
||||
);
|
||||
|
||||
useEffect(
|
||||
function focusOnMount() {
|
||||
|
||||
@@ -23,36 +23,24 @@ import {
|
||||
useSidebar,
|
||||
} from "@/components/ui/sidebar";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
CheckCircle,
|
||||
DotsThree,
|
||||
PlusCircleIcon,
|
||||
PlusIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { DotsThree, PlusCircleIcon, PlusIcon } from "@phosphor-icons/react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { parseAsString, useQueryState } from "nuqs";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { useCopilotUIStore } from "../../store";
|
||||
import { NotificationToggle } from "./components/NotificationToggle/NotificationToggle";
|
||||
import { DeleteChatDialog } from "../DeleteChatDialog/DeleteChatDialog";
|
||||
import { PulseLoader } from "../PulseLoader/PulseLoader";
|
||||
|
||||
export function ChatSidebar() {
|
||||
const { state } = useSidebar();
|
||||
const isCollapsed = state === "collapsed";
|
||||
const [sessionId, setSessionId] = useQueryState("sessionId", parseAsString);
|
||||
const {
|
||||
sessionToDelete,
|
||||
setSessionToDelete,
|
||||
completedSessionIDs,
|
||||
clearCompletedSession,
|
||||
} = useCopilotUIStore();
|
||||
const { sessionToDelete, setSessionToDelete } = useCopilotUIStore();
|
||||
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
const { data: sessionsResponse, isLoading: isLoadingSessions } =
|
||||
useGetV2ListSessions({ limit: 50 }, { query: { refetchInterval: 10_000 } });
|
||||
useGetV2ListSessions({ limit: 50 });
|
||||
|
||||
const { mutate: deleteSession, isPending: isDeleting } =
|
||||
useDeleteV2DeleteSession({
|
||||
@@ -111,22 +99,6 @@ export function ChatSidebar() {
|
||||
}
|
||||
}, [editingSessionId]);
|
||||
|
||||
// Refetch session list when active session changes
|
||||
useEffect(() => {
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: getGetV2ListSessionsQueryKey(),
|
||||
});
|
||||
}, [sessionId, queryClient]);
|
||||
|
||||
// Clear completed indicator when navigating to a session (works for all paths)
|
||||
useEffect(() => {
|
||||
if (!sessionId || !completedSessionIDs.has(sessionId)) return;
|
||||
clearCompletedSession(sessionId);
|
||||
const remaining = completedSessionIDs.size - 1;
|
||||
document.title =
|
||||
remaining > 0 ? `(${remaining}) Otto is ready - AutoGPT` : "AutoGPT";
|
||||
}, [sessionId, completedSessionIDs, clearCompletedSession]);
|
||||
|
||||
const sessions =
|
||||
sessionsResponse?.status === 200 ? sessionsResponse.data.sessions : [];
|
||||
|
||||
@@ -256,11 +228,8 @@ export function ChatSidebar() {
|
||||
<Text variant="h3" size="body-medium">
|
||||
Your chats
|
||||
</Text>
|
||||
<div className="relative left-5 flex items-center gap-1">
|
||||
<NotificationToggle />
|
||||
<div className="relative left-1">
|
||||
<SidebarTrigger />
|
||||
</div>
|
||||
<div className="relative left-6">
|
||||
<SidebarTrigger />
|
||||
</div>
|
||||
</div>
|
||||
{sessionId ? (
|
||||
@@ -336,8 +305,8 @@ export function ChatSidebar() {
|
||||
onClick={() => handleSelectSession(session.id)}
|
||||
className="w-full px-3 py-2.5 pr-10 text-left"
|
||||
>
|
||||
<div className="flex min-w-0 max-w-full items-center gap-2">
|
||||
<div className="min-w-0 flex-1">
|
||||
<div className="flex min-w-0 max-w-full flex-col overflow-hidden">
|
||||
<div className="min-w-0 max-w-full">
|
||||
<Text
|
||||
variant="body"
|
||||
className={cn(
|
||||
@@ -360,22 +329,10 @@ export function ChatSidebar() {
|
||||
</motion.span>
|
||||
</AnimatePresence>
|
||||
</Text>
|
||||
<Text variant="small" className="text-neutral-400">
|
||||
{formatDate(session.updated_at)}
|
||||
</Text>
|
||||
</div>
|
||||
{session.is_processing &&
|
||||
session.id !== sessionId &&
|
||||
!completedSessionIDs.has(session.id) && (
|
||||
<PulseLoader size={16} className="shrink-0" />
|
||||
)}
|
||||
{completedSessionIDs.has(session.id) &&
|
||||
session.id !== sessionId && (
|
||||
<CheckCircle
|
||||
className="h-4 w-4 shrink-0 text-green-500"
|
||||
weight="fill"
|
||||
/>
|
||||
)}
|
||||
<Text variant="small" className="text-neutral-400">
|
||||
{formatDate(session.updated_at)}
|
||||
</Text>
|
||||
</div>
|
||||
</button>
|
||||
)}
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/molecules/Popover/Popover";
|
||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { Bell, BellRinging, BellSlash } from "@phosphor-icons/react";
|
||||
import { useCopilotUIStore } from "../../../../store";
|
||||
|
||||
export function NotificationToggle() {
|
||||
const {
|
||||
isNotificationsEnabled,
|
||||
setNotificationsEnabled,
|
||||
isSoundEnabled,
|
||||
toggleSound,
|
||||
} = useCopilotUIStore();
|
||||
|
||||
async function handleToggleNotifications() {
|
||||
if (isNotificationsEnabled) {
|
||||
setNotificationsEnabled(false);
|
||||
return;
|
||||
}
|
||||
if (typeof Notification === "undefined") {
|
||||
toast({
|
||||
title: "Notifications not supported",
|
||||
description: "Your browser does not support notifications.",
|
||||
variant: "destructive",
|
||||
});
|
||||
return;
|
||||
}
|
||||
const permission = await Notification.requestPermission();
|
||||
if (permission === "granted") {
|
||||
setNotificationsEnabled(true);
|
||||
} else {
|
||||
toast({
|
||||
title: "Notifications blocked",
|
||||
description:
|
||||
"Please allow notifications in your browser settings to enable this feature.",
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<button
|
||||
className="rounded p-1 text-black transition-colors hover:bg-zinc-50"
|
||||
aria-label="Notification settings"
|
||||
>
|
||||
{!isNotificationsEnabled ? (
|
||||
<BellSlash className="!size-5" />
|
||||
) : isSoundEnabled ? (
|
||||
<BellRinging className="!size-5" />
|
||||
) : (
|
||||
<Bell className="!size-5" />
|
||||
)}
|
||||
</button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent align="start" className="w-56 p-3">
|
||||
<div className="flex flex-col gap-3">
|
||||
<label className="flex items-center justify-between">
|
||||
<span className="text-sm text-zinc-700">Notifications</span>
|
||||
<Switch
|
||||
checked={isNotificationsEnabled}
|
||||
onCheckedChange={handleToggleNotifications}
|
||||
/>
|
||||
</label>
|
||||
<label className="flex items-center justify-between">
|
||||
<span
|
||||
className={cn(
|
||||
"text-sm text-zinc-700",
|
||||
!isNotificationsEnabled && "opacity-50",
|
||||
)}
|
||||
>
|
||||
Sound
|
||||
</span>
|
||||
<Switch
|
||||
checked={isSoundEnabled && isNotificationsEnabled}
|
||||
onCheckedChange={toggleSound}
|
||||
disabled={!isNotificationsEnabled}
|
||||
/>
|
||||
</label>
|
||||
</div>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
}
|
||||
@@ -3,17 +3,8 @@ import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { scrollbarStyles } from "@/components/styles/scrollbars";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
CheckCircle,
|
||||
PlusIcon,
|
||||
SpeakerHigh,
|
||||
SpeakerSlash,
|
||||
SpinnerGapIcon,
|
||||
X,
|
||||
} from "@phosphor-icons/react";
|
||||
import { PlusIcon, SpinnerGapIcon, X } from "@phosphor-icons/react";
|
||||
import { Drawer } from "vaul";
|
||||
import { useCopilotUIStore } from "../../store";
|
||||
import { PulseLoader } from "../PulseLoader/PulseLoader";
|
||||
|
||||
interface Props {
|
||||
isOpen: boolean;
|
||||
@@ -61,13 +52,6 @@ export function MobileDrawer({
|
||||
onClose,
|
||||
onOpenChange,
|
||||
}: Props) {
|
||||
const {
|
||||
completedSessionIDs,
|
||||
clearCompletedSession,
|
||||
isSoundEnabled,
|
||||
toggleSound,
|
||||
} = useCopilotUIStore();
|
||||
|
||||
return (
|
||||
<Drawer.Root open={isOpen} onOpenChange={onOpenChange} direction="left">
|
||||
<Drawer.Portal>
|
||||
@@ -78,31 +62,14 @@ export function MobileDrawer({
|
||||
<Drawer.Title className="text-lg font-semibold text-zinc-800">
|
||||
Your chats
|
||||
</Drawer.Title>
|
||||
<div className="flex items-center gap-1">
|
||||
<button
|
||||
onClick={toggleSound}
|
||||
className="rounded p-1.5 text-zinc-400 transition-colors hover:text-zinc-600"
|
||||
aria-label={
|
||||
isSoundEnabled
|
||||
? "Disable notification sound"
|
||||
: "Enable notification sound"
|
||||
}
|
||||
>
|
||||
{isSoundEnabled ? (
|
||||
<SpeakerHigh className="h-4 w-4" />
|
||||
) : (
|
||||
<SpeakerSlash className="h-4 w-4" />
|
||||
)}
|
||||
</button>
|
||||
<Button
|
||||
variant="icon"
|
||||
size="icon"
|
||||
aria-label="Close sessions"
|
||||
onClick={onClose}
|
||||
>
|
||||
<X width="1rem" height="1rem" />
|
||||
</Button>
|
||||
</div>
|
||||
<Button
|
||||
variant="icon"
|
||||
size="icon"
|
||||
aria-label="Close sessions"
|
||||
onClick={onClose}
|
||||
>
|
||||
<X width="1rem" height="1rem" />
|
||||
</Button>
|
||||
</div>
|
||||
{currentSessionId ? (
|
||||
<div className="mt-2">
|
||||
@@ -136,12 +103,7 @@ export function MobileDrawer({
|
||||
sessions.map((session) => (
|
||||
<button
|
||||
key={session.id}
|
||||
onClick={() => {
|
||||
onSelectSession(session.id);
|
||||
if (completedSessionIDs.has(session.id)) {
|
||||
clearCompletedSession(session.id);
|
||||
}
|
||||
}}
|
||||
onClick={() => onSelectSession(session.id)}
|
||||
className={cn(
|
||||
"w-full rounded-lg px-3 py-2.5 text-left transition-colors",
|
||||
session.id === currentSessionId
|
||||
@@ -150,7 +112,7 @@ export function MobileDrawer({
|
||||
)}
|
||||
>
|
||||
<div className="flex min-w-0 max-w-full flex-col overflow-hidden">
|
||||
<div className="flex min-w-0 max-w-full items-center gap-1.5">
|
||||
<div className="min-w-0 max-w-full">
|
||||
<Text
|
||||
variant="body"
|
||||
className={cn(
|
||||
@@ -162,18 +124,6 @@ export function MobileDrawer({
|
||||
>
|
||||
{session.title || "Untitled chat"}
|
||||
</Text>
|
||||
{session.is_processing &&
|
||||
!completedSessionIDs.has(session.id) &&
|
||||
session.id !== currentSessionId && (
|
||||
<PulseLoader size={8} className="shrink-0" />
|
||||
)}
|
||||
{completedSessionIDs.has(session.id) &&
|
||||
session.id !== currentSessionId && (
|
||||
<CheckCircle
|
||||
className="h-4 w-4 shrink-0 text-green-500"
|
||||
weight="fill"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
<Text variant="small" className="text-neutral-400">
|
||||
{formatDate(session.updated_at)}
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { BellRinging, X } from "@phosphor-icons/react";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useCopilotUIStore } from "../../store";
|
||||
|
||||
export function NotificationBanner() {
|
||||
const { setNotificationsEnabled, isNotificationsEnabled } =
|
||||
useCopilotUIStore();
|
||||
|
||||
const [dismissed, setDismissed] = useState(
|
||||
() => storage.get(Key.COPILOT_NOTIFICATION_BANNER_DISMISSED) === "true",
|
||||
);
|
||||
const [permission, setPermission] = useState(() =>
|
||||
typeof Notification !== "undefined" ? Notification.permission : "denied",
|
||||
);
|
||||
|
||||
// Re-read dismissed flag when notifications are toggled off (e.g. clearCopilotLocalData)
|
||||
useEffect(() => {
|
||||
if (!isNotificationsEnabled) {
|
||||
setDismissed(
|
||||
storage.get(Key.COPILOT_NOTIFICATION_BANNER_DISMISSED) === "true",
|
||||
);
|
||||
}
|
||||
}, [isNotificationsEnabled]);
|
||||
|
||||
// Don't show if notifications aren't supported, already decided, dismissed, or already enabled
|
||||
if (
|
||||
typeof Notification === "undefined" ||
|
||||
permission !== "default" ||
|
||||
dismissed ||
|
||||
isNotificationsEnabled
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
function handleEnable() {
|
||||
Notification.requestPermission().then((result) => {
|
||||
setPermission(result);
|
||||
if (result === "granted") {
|
||||
setNotificationsEnabled(true);
|
||||
handleDismiss();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function handleDismiss() {
|
||||
storage.set(Key.COPILOT_NOTIFICATION_BANNER_DISMISSED, "true");
|
||||
setDismissed(true);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex items-center gap-3 border-b border-amber-200 bg-amber-50 px-4 py-2.5">
|
||||
<BellRinging className="h-5 w-5 shrink-0 text-amber-600" weight="fill" />
|
||||
<Text variant="body" className="flex-1 text-sm text-amber-800">
|
||||
Enable browser notifications to know when Otto finishes working, even
|
||||
when you switch tabs.
|
||||
</Text>
|
||||
<Button variant="primary" size="small" onClick={handleEnable}>
|
||||
Enable
|
||||
</Button>
|
||||
<button
|
||||
onClick={handleDismiss}
|
||||
className="rounded p-1 text-amber-400 transition-colors hover:text-amber-600"
|
||||
aria-label="Dismiss"
|
||||
>
|
||||
<X className="h-4 w-4" />
|
||||
</button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { BellRinging } from "@phosphor-icons/react";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useCopilotUIStore } from "../../store";
|
||||
|
||||
export function NotificationDialog() {
|
||||
const {
|
||||
showNotificationDialog,
|
||||
setShowNotificationDialog,
|
||||
setNotificationsEnabled,
|
||||
isNotificationsEnabled,
|
||||
} = useCopilotUIStore();
|
||||
|
||||
const [dismissed, setDismissed] = useState(
|
||||
() => storage.get(Key.COPILOT_NOTIFICATION_DIALOG_DISMISSED) === "true",
|
||||
);
|
||||
const [permission, setPermission] = useState(() =>
|
||||
typeof Notification !== "undefined" ? Notification.permission : "denied",
|
||||
);
|
||||
|
||||
// Re-read dismissed flag when notifications are toggled off (e.g. clearCopilotLocalData)
|
||||
useEffect(() => {
|
||||
if (!isNotificationsEnabled) {
|
||||
setDismissed(
|
||||
storage.get(Key.COPILOT_NOTIFICATION_DIALOG_DISMISSED) === "true",
|
||||
);
|
||||
}
|
||||
}, [isNotificationsEnabled]);
|
||||
|
||||
const shouldShowAuto =
|
||||
typeof Notification !== "undefined" &&
|
||||
permission === "default" &&
|
||||
!dismissed;
|
||||
|
||||
const isOpen = showNotificationDialog || shouldShowAuto;
|
||||
|
||||
function handleEnable() {
|
||||
if (typeof Notification === "undefined") {
|
||||
handleDismiss();
|
||||
return;
|
||||
}
|
||||
Notification.requestPermission().then((result) => {
|
||||
setPermission(result);
|
||||
if (result === "granted") {
|
||||
setNotificationsEnabled(true);
|
||||
handleDismiss();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function handleDismiss() {
|
||||
storage.set(Key.COPILOT_NOTIFICATION_DIALOG_DISMISSED, "true");
|
||||
setDismissed(true);
|
||||
setShowNotificationDialog(false);
|
||||
}
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title="Stay in the loop"
|
||||
styling={{ maxWidth: "28rem", minWidth: "auto" }}
|
||||
controlled={{
|
||||
isOpen,
|
||||
set: async (open) => {
|
||||
if (!open) handleDismiss();
|
||||
},
|
||||
}}
|
||||
onClose={handleDismiss}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex flex-col items-center gap-4 py-2">
|
||||
<div className="flex h-12 w-12 items-center justify-center rounded-full bg-violet-100">
|
||||
<BellRinging className="h-6 w-6 text-violet-600" weight="fill" />
|
||||
</div>
|
||||
<Text variant="body" className="text-center text-neutral-600">
|
||||
Otto can notify you when a response is ready, even if you switch
|
||||
tabs or close this page. Enable notifications so you never miss one.
|
||||
</Text>
|
||||
</div>
|
||||
<Dialog.Footer>
|
||||
<Button variant="secondary" onClick={handleDismiss}>
|
||||
Not now
|
||||
</Button>
|
||||
<Button variant="primary" onClick={handleEnable}>
|
||||
Enable notifications
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -15,8 +15,6 @@
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 0;
|
||||
transform: scale(0);
|
||||
opacity: 0;
|
||||
animation: ripple 2s linear infinite;
|
||||
}
|
||||
|
||||
@@ -27,10 +25,7 @@
|
||||
@keyframes ripple {
|
||||
0% {
|
||||
transform: scale(0);
|
||||
opacity: 0.6;
|
||||
}
|
||||
50% {
|
||||
opacity: 0.3;
|
||||
opacity: 1;
|
||||
}
|
||||
100% {
|
||||
transform: scale(1);
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { Key, storage } from "@/services/storage/local-storage";
|
||||
import { create } from "zustand";
|
||||
|
||||
export interface DeleteTarget {
|
||||
@@ -7,89 +6,17 @@ export interface DeleteTarget {
|
||||
}
|
||||
|
||||
interface CopilotUIState {
|
||||
/** Prompt extracted from URL hash (e.g. /copilot#prompt=...) for input prefill. */
|
||||
initialPrompt: string | null;
|
||||
setInitialPrompt: (prompt: string | null) => void;
|
||||
|
||||
sessionToDelete: DeleteTarget | null;
|
||||
setSessionToDelete: (target: DeleteTarget | null) => void;
|
||||
|
||||
isDrawerOpen: boolean;
|
||||
setDrawerOpen: (open: boolean) => void;
|
||||
|
||||
completedSessionIDs: Set<string>;
|
||||
addCompletedSession: (id: string) => void;
|
||||
clearCompletedSession: (id: string) => void;
|
||||
clearAllCompletedSessions: () => void;
|
||||
|
||||
isNotificationsEnabled: boolean;
|
||||
setNotificationsEnabled: (enabled: boolean) => void;
|
||||
|
||||
isSoundEnabled: boolean;
|
||||
toggleSound: () => void;
|
||||
|
||||
showNotificationDialog: boolean;
|
||||
setShowNotificationDialog: (show: boolean) => void;
|
||||
|
||||
clearCopilotLocalData: () => void;
|
||||
}
|
||||
|
||||
export const useCopilotUIStore = create<CopilotUIState>((set) => ({
|
||||
initialPrompt: null,
|
||||
setInitialPrompt: (prompt) => set({ initialPrompt: prompt }),
|
||||
|
||||
sessionToDelete: null,
|
||||
setSessionToDelete: (target) => set({ sessionToDelete: target }),
|
||||
|
||||
isDrawerOpen: false,
|
||||
setDrawerOpen: (open) => set({ isDrawerOpen: open }),
|
||||
|
||||
completedSessionIDs: new Set<string>(),
|
||||
addCompletedSession: (id) =>
|
||||
set((state) => {
|
||||
const next = new Set(state.completedSessionIDs);
|
||||
next.add(id);
|
||||
return { completedSessionIDs: next };
|
||||
}),
|
||||
clearCompletedSession: (id) =>
|
||||
set((state) => {
|
||||
const next = new Set(state.completedSessionIDs);
|
||||
next.delete(id);
|
||||
return { completedSessionIDs: next };
|
||||
}),
|
||||
clearAllCompletedSessions: () =>
|
||||
set({ completedSessionIDs: new Set<string>() }),
|
||||
|
||||
isNotificationsEnabled:
|
||||
storage.get(Key.COPILOT_NOTIFICATIONS_ENABLED) === "true" &&
|
||||
typeof Notification !== "undefined" &&
|
||||
Notification.permission === "granted",
|
||||
setNotificationsEnabled: (enabled) => {
|
||||
storage.set(Key.COPILOT_NOTIFICATIONS_ENABLED, String(enabled));
|
||||
set({ isNotificationsEnabled: enabled });
|
||||
},
|
||||
|
||||
isSoundEnabled: storage.get(Key.COPILOT_SOUND_ENABLED) !== "false",
|
||||
toggleSound: () =>
|
||||
set((state) => {
|
||||
const next = !state.isSoundEnabled;
|
||||
storage.set(Key.COPILOT_SOUND_ENABLED, String(next));
|
||||
return { isSoundEnabled: next };
|
||||
}),
|
||||
|
||||
showNotificationDialog: false,
|
||||
setShowNotificationDialog: (show) => set({ showNotificationDialog: show }),
|
||||
|
||||
clearCopilotLocalData: () => {
|
||||
storage.clean(Key.COPILOT_NOTIFICATIONS_ENABLED);
|
||||
storage.clean(Key.COPILOT_SOUND_ENABLED);
|
||||
storage.clean(Key.COPILOT_NOTIFICATION_BANNER_DISMISSED);
|
||||
storage.clean(Key.COPILOT_NOTIFICATION_DIALOG_DISMISSED);
|
||||
set({
|
||||
completedSessionIDs: new Set<string>(),
|
||||
isNotificationsEnabled: false,
|
||||
isSoundEnabled: true,
|
||||
});
|
||||
document.title = "AutoGPT";
|
||||
},
|
||||
}));
|
||||
|
||||
@@ -235,8 +235,8 @@ describe("getAnimationText", () => {
|
||||
state: "input-streaming",
|
||||
...BASE,
|
||||
});
|
||||
expect(text).toContain("Connecting");
|
||||
expect(text).toContain("example.com");
|
||||
expect(text).toContain("Discovering");
|
||||
expect(text).toContain("mcp.example.com");
|
||||
});
|
||||
|
||||
it("shows tool call text when tool_name is set", () => {
|
||||
@@ -245,7 +245,7 @@ describe("getAnimationText", () => {
|
||||
input: { server_url: "https://mcp.example.com/mcp", tool_name: "fetch" },
|
||||
});
|
||||
expect(text).toContain("fetch");
|
||||
expect(text).toContain("example.com");
|
||||
expect(text).toContain("mcp.example.com");
|
||||
});
|
||||
|
||||
it("includes query argument preview when tool_arguments has a query key", () => {
|
||||
@@ -282,7 +282,7 @@ describe("getAnimationText", () => {
|
||||
tool_arguments: {},
|
||||
},
|
||||
});
|
||||
expect(text).toBe("Calling list_users on example.com");
|
||||
expect(text).toBe("Calling list_users on mcp.example.com");
|
||||
});
|
||||
|
||||
it("truncates long argument previews to 60 chars with ellipsis", () => {
|
||||
@@ -327,8 +327,8 @@ describe("getAnimationText", () => {
|
||||
output: DISCOVERY,
|
||||
input: { server_url: "https://mcp.example.com/mcp" },
|
||||
});
|
||||
expect(text).toContain("Connected");
|
||||
expect(text).toContain("example.com");
|
||||
expect(text).toContain("Discovered");
|
||||
expect(text).toContain("1");
|
||||
});
|
||||
|
||||
it("shows setup label on output-available for setup requirements", () => {
|
||||
|
||||
@@ -12,6 +12,8 @@ import { CredentialsProvidersContext } from "@/providers/agent-credentials/crede
|
||||
import { useContext, useEffect, useRef, useState } from "react";
|
||||
import { useCopilotChatActions } from "../../../../components/CopilotChatActionsProvider/useCopilotChatActions";
|
||||
import { ContentMessage } from "../../../../components/ToolAccordion/AccordionContent";
|
||||
import { serverHost } from "../../helpers";
|
||||
|
||||
interface Props {
|
||||
output: SetupRequirementsResponse;
|
||||
/**
|
||||
@@ -36,8 +38,7 @@ export function MCPSetupCard({ output, retryInstruction }: Props) {
|
||||
|
||||
// setup_info.agent_id is set to the server_url in the backend
|
||||
const serverUrl = output.setup_info.agent_id;
|
||||
// agent_name is computed by the backend as the display name for the service
|
||||
const service = output.setup_info.agent_name;
|
||||
const host = serverHost(serverUrl);
|
||||
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
@@ -94,7 +95,10 @@ export function MCPSetupCard({ output, retryInstruction }: Props) {
|
||||
}
|
||||
|
||||
setConnected(true);
|
||||
onSend(retryInstruction ?? "I've connected. Please retry.");
|
||||
onSend(
|
||||
retryInstruction ??
|
||||
"I've connected the MCP server credentials. Please retry.",
|
||||
);
|
||||
} catch (e: unknown) {
|
||||
const err = e as Record<string, unknown>;
|
||||
if (err?.status === 400) {
|
||||
@@ -133,7 +137,10 @@ export function MCPSetupCard({ output, retryInstruction }: Props) {
|
||||
if (!(res.status >= 200 && res.status < 300))
|
||||
throw new Error("Failed to store token");
|
||||
setConnected(true);
|
||||
onSend(retryInstruction ?? "I've connected. Please retry.");
|
||||
onSend(
|
||||
retryInstruction ??
|
||||
"I've connected the MCP server credentials. Please retry.",
|
||||
);
|
||||
} catch (e: unknown) {
|
||||
const err = e as Record<string, unknown>;
|
||||
setError(
|
||||
@@ -148,7 +155,7 @@ export function MCPSetupCard({ output, retryInstruction }: Props) {
|
||||
if (connected) {
|
||||
return (
|
||||
<div className="mt-2 rounded-lg border border-green-200 bg-green-50 px-3 py-2 text-sm text-green-700">
|
||||
Connected to {service}!
|
||||
Connected to {host}!
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -164,7 +171,7 @@ export function MCPSetupCard({ output, retryInstruction }: Props) {
|
||||
onClick={handleConnect}
|
||||
disabled={loading}
|
||||
>
|
||||
{loading ? "Connecting…" : `Connect ${service}`}
|
||||
{loading ? "Connecting…" : `Connect to ${host}`}
|
||||
</Button>
|
||||
|
||||
{error && (
|
||||
@@ -177,7 +184,7 @@ export function MCPSetupCard({ output, retryInstruction }: Props) {
|
||||
<div className="mt-3 flex gap-2">
|
||||
<input
|
||||
type="password"
|
||||
aria-label={`API token for ${service}`}
|
||||
aria-label={`API token for ${host}`}
|
||||
placeholder="Paste API token"
|
||||
value={manualToken}
|
||||
onChange={(e) => setManualToken(e.target.value)}
|
||||
|
||||
@@ -32,11 +32,11 @@ vi.mock("@/app/api/__generated__/endpoints/mcp/mcp", () => ({
|
||||
function makeSetupOutput(serverUrl = "https://mcp.example.com/mcp") {
|
||||
return {
|
||||
type: "setup_requirements" as const,
|
||||
message: "To continue, sign in to example.com and approve access.",
|
||||
message: "The MCP server at mcp.example.com requires authentication.",
|
||||
session_id: "test-session",
|
||||
setup_info: {
|
||||
agent_id: serverUrl,
|
||||
agent_name: "example.com",
|
||||
agent_name: "MCP: mcp.example.com",
|
||||
user_readiness: {
|
||||
has_all_credentials: false,
|
||||
missing_credentials: {},
|
||||
@@ -58,9 +58,9 @@ describe("MCPSetupCard", () => {
|
||||
|
||||
it("renders setup message and connect button", () => {
|
||||
render(<MCPSetupCard output={makeSetupOutput()} />);
|
||||
expect(screen.getByText(/sign in to example\.com/i)).toBeDefined();
|
||||
expect(screen.getByText(/requires authentication/)).toBeDefined();
|
||||
expect(
|
||||
screen.getByRole("button", { name: /connect example\.com/i }),
|
||||
screen.getByRole("button", { name: /connect to mcp.example.com/i }),
|
||||
).toBeDefined();
|
||||
});
|
||||
|
||||
@@ -76,7 +76,7 @@ describe("MCPSetupCard", () => {
|
||||
|
||||
render(<MCPSetupCard output={makeSetupOutput()} />);
|
||||
fireEvent.click(
|
||||
screen.getByRole("button", { name: /connect example\.com/i }),
|
||||
screen.getByRole("button", { name: /connect to mcp.example.com/i }),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
@@ -100,7 +100,7 @@ describe("MCPSetupCard", () => {
|
||||
|
||||
render(<MCPSetupCard output={makeSetupOutput()} />);
|
||||
fireEvent.click(
|
||||
screen.getByRole("button", { name: /connect example\.com/i }),
|
||||
screen.getByRole("button", { name: /connect to mcp.example.com/i }),
|
||||
);
|
||||
|
||||
await waitFor(() => {
|
||||
@@ -127,7 +127,7 @@ describe("MCPSetupCard", () => {
|
||||
fireEvent.click(screen.getByRole("button", { name: /use token/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/connected to example\.com/i)).toBeDefined();
|
||||
expect(screen.getByText(/connected to mcp.example.com/i)).toBeDefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -125,11 +125,6 @@ export function serverHost(url: string): string {
|
||||
}
|
||||
}
|
||||
|
||||
/** Strip the 'mcp.' prefix from an MCP hostname: 'mcp.sentry.dev' → 'sentry.dev' */
|
||||
export function serviceNameFromHost(host: string): string {
|
||||
return host.startsWith("mcp.") ? host.slice(4) : host;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a short preview of the most meaningful argument value, e.g. `"my query"`.
|
||||
* Checks common "query" key names first, then falls back to the first string value.
|
||||
@@ -179,30 +174,28 @@ export function getAnimationText(part: {
|
||||
const host = input?.server_url ? serverHost(input.server_url) : "";
|
||||
const toolName = input?.tool_name?.trim();
|
||||
|
||||
const service = host ? serviceNameFromHost(host) : "";
|
||||
|
||||
switch (part.state) {
|
||||
case "input-streaming":
|
||||
case "input-available": {
|
||||
if (!toolName) return `Connecting to ${service || "integration"}…`;
|
||||
if (!toolName) return `Discovering MCP tools${host ? ` on ${host}` : ""}`;
|
||||
const argPreview = getArgPreview(input?.tool_arguments);
|
||||
return `Calling ${toolName}${argPreview ? `(${argPreview})` : ""}${service ? ` on ${service}` : ""}`;
|
||||
return `Calling ${toolName}${argPreview ? `(${argPreview})` : ""}${host ? ` on ${host}` : ""}`;
|
||||
}
|
||||
case "output-available": {
|
||||
const output = getRunMCPToolOutput(part);
|
||||
if (!output) return "Connecting…";
|
||||
if (!output) return "Connecting to MCP server";
|
||||
if (isSetupRequirementsOutput(output))
|
||||
return `Connect ${output.setup_info.agent_name}`;
|
||||
return `Connect to ${output.setup_info.agent_name}`;
|
||||
if (isMCPToolOutput(output))
|
||||
return `Ran ${output.tool_name}${service ? ` on ${service}` : ""}`;
|
||||
return `Ran ${output.tool_name}${host ? ` on ${host}` : ""}`;
|
||||
if (isDiscoveryOutput(output))
|
||||
return `Connected to ${serviceNameFromHost(serverHost(output.server_url))}`;
|
||||
return "Connection error";
|
||||
return `Discovered ${output.tools.length} tool(s) on ${serverHost(output.server_url)}`;
|
||||
return "MCP error";
|
||||
}
|
||||
case "output-error":
|
||||
return "Connection error";
|
||||
return "MCP error";
|
||||
default:
|
||||
return "Connecting…";
|
||||
return "Connecting to MCP server";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import type { WebSocketNotification } from "@/lib/autogpt-server-api/types";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useCopilotUIStore } from "./store";
|
||||
|
||||
const ORIGINAL_TITLE = "AutoGPT";
|
||||
const NOTIFICATION_SOUND_PATH = "/sounds/notification.mp3";
|
||||
|
||||
/**
|
||||
* Listens for copilot completion notifications via WebSocket.
|
||||
* Updates the Zustand store, plays a sound, and updates document.title.
|
||||
*/
|
||||
export function useCopilotNotifications(activeSessionID: string | null) {
|
||||
const api = useBackendAPI();
|
||||
const audioRef = useRef<HTMLAudioElement | null>(null);
|
||||
const activeSessionRef = useRef(activeSessionID);
|
||||
activeSessionRef.current = activeSessionID;
|
||||
const windowFocusedRef = useRef(true);
|
||||
|
||||
// Pre-load audio element
|
||||
useEffect(() => {
|
||||
if (typeof window === "undefined") return;
|
||||
const audio = new Audio(NOTIFICATION_SOUND_PATH);
|
||||
audio.volume = 0.5;
|
||||
audioRef.current = audio;
|
||||
}, []);
|
||||
|
||||
// Listen for WebSocket notifications
|
||||
useEffect(() => {
|
||||
function handleNotification(notification: WebSocketNotification) {
|
||||
if (notification.type !== "copilot_completion") return;
|
||||
if (notification.event !== "session_completed") return;
|
||||
|
||||
const sessionID = (notification as Record<string, unknown>).session_id;
|
||||
if (typeof sessionID !== "string") return;
|
||||
|
||||
const state = useCopilotUIStore.getState();
|
||||
|
||||
const isActiveSession = sessionID === activeSessionRef.current;
|
||||
const isUserAway =
|
||||
document.visibilityState === "hidden" || !windowFocusedRef.current;
|
||||
|
||||
// Skip if viewing the active session and it's in focus
|
||||
if (isActiveSession && !isUserAway) return;
|
||||
|
||||
// Skip if we already notified for this session (e.g. WS replay)
|
||||
if (state.completedSessionIDs.has(sessionID)) return;
|
||||
|
||||
// Always update UI state (checkmark + title) regardless of notification setting
|
||||
state.addCompletedSession(sessionID);
|
||||
const count = useCopilotUIStore.getState().completedSessionIDs.size;
|
||||
document.title = `(${count}) Otto is ready - ${ORIGINAL_TITLE}`;
|
||||
|
||||
// Sound and browser notifications are gated by the user setting
|
||||
if (!state.isNotificationsEnabled) return;
|
||||
|
||||
if (state.isSoundEnabled && audioRef.current) {
|
||||
audioRef.current.currentTime = 0;
|
||||
audioRef.current.play().catch(() => {});
|
||||
}
|
||||
|
||||
// Send browser notification when user is away
|
||||
if (
|
||||
typeof Notification !== "undefined" &&
|
||||
Notification.permission === "granted" &&
|
||||
isUserAway
|
||||
) {
|
||||
const n = new Notification("Otto is ready", {
|
||||
body: "A response is waiting for you.",
|
||||
icon: "/favicon.ico",
|
||||
});
|
||||
n.onclick = () => {
|
||||
window.focus();
|
||||
const url = new URL(window.location.href);
|
||||
url.searchParams.set("sessionId", sessionID);
|
||||
window.history.pushState({}, "", url.toString());
|
||||
window.dispatchEvent(new PopStateEvent("popstate"));
|
||||
n.close();
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const detach = api.onWebSocketMessage("notification", handleNotification);
|
||||
return () => {
|
||||
detach();
|
||||
};
|
||||
}, [api]);
|
||||
|
||||
// Track window focus for browser notifications when app is in background
|
||||
useEffect(() => {
|
||||
function handleFocus() {
|
||||
windowFocusedRef.current = true;
|
||||
if (useCopilotUIStore.getState().completedSessionIDs.size === 0) {
|
||||
document.title = ORIGINAL_TITLE;
|
||||
}
|
||||
}
|
||||
function handleBlur() {
|
||||
windowFocusedRef.current = false;
|
||||
}
|
||||
function handleVisibilityChange() {
|
||||
if (
|
||||
document.visibilityState === "visible" &&
|
||||
useCopilotUIStore.getState().completedSessionIDs.size === 0
|
||||
) {
|
||||
document.title = ORIGINAL_TITLE;
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener("focus", handleFocus);
|
||||
window.addEventListener("blur", handleBlur);
|
||||
document.addEventListener("visibilitychange", handleVisibilityChange);
|
||||
return () => {
|
||||
window.removeEventListener("focus", handleFocus);
|
||||
window.removeEventListener("blur", handleBlur);
|
||||
document.removeEventListener("visibilitychange", handleVisibilityChange);
|
||||
};
|
||||
}, []);
|
||||
}
|
||||
@@ -13,48 +13,11 @@ import type { FileUIPart } from "ai";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { useCopilotUIStore } from "./store";
|
||||
import { useChatSession } from "./useChatSession";
|
||||
import { useCopilotNotifications } from "./useCopilotNotifications";
|
||||
import { useCopilotStream } from "./useCopilotStream";
|
||||
|
||||
const TITLE_POLL_INTERVAL_MS = 2_000;
|
||||
const TITLE_POLL_MAX_ATTEMPTS = 5;
|
||||
|
||||
/**
|
||||
* Extract a prompt from the URL hash fragment.
|
||||
* Supports: /copilot#prompt=URL-encoded-text
|
||||
* Optionally auto-submits if ?autosubmit=true is in the query string.
|
||||
* Returns null if no prompt is present.
|
||||
*/
|
||||
function extractPromptFromUrl(): {
|
||||
prompt: string;
|
||||
autosubmit: boolean;
|
||||
} | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
|
||||
const hash = window.location.hash;
|
||||
if (!hash) return null;
|
||||
|
||||
const hashParams = new URLSearchParams(hash.slice(1));
|
||||
const prompt = hashParams.get("prompt");
|
||||
|
||||
if (!prompt || !prompt.trim()) return null;
|
||||
|
||||
const searchParams = new URLSearchParams(window.location.search);
|
||||
const autosubmit = searchParams.get("autosubmit") === "true";
|
||||
|
||||
// Clean up hash + autosubmit param only (preserve other query params)
|
||||
const cleanURL = new URL(window.location.href);
|
||||
cleanURL.hash = "";
|
||||
cleanURL.searchParams.delete("autosubmit");
|
||||
window.history.replaceState(
|
||||
null,
|
||||
"",
|
||||
`${cleanURL.pathname}${cleanURL.search}`,
|
||||
);
|
||||
|
||||
return { prompt: prompt.trim(), autosubmit };
|
||||
}
|
||||
|
||||
interface UploadedFile {
|
||||
file_id: string;
|
||||
name: string;
|
||||
@@ -97,8 +60,6 @@ export function useCopilotPage() {
|
||||
refetchSession,
|
||||
});
|
||||
|
||||
useCopilotNotifications(sessionId);
|
||||
|
||||
// --- Delete session ---
|
||||
const { mutate: deleteSessionMutation, isPending: isDeleting } =
|
||||
useDeleteV2DeleteSession({
|
||||
@@ -163,28 +124,6 @@ export function useCopilotPage() {
|
||||
}
|
||||
}, [sessionId, pendingMessage, sendMessage]);
|
||||
|
||||
// --- Extract prompt from URL hash on mount (e.g. /copilot#prompt=Hello) ---
|
||||
const { setInitialPrompt } = useCopilotUIStore();
|
||||
const hasProcessedUrlPrompt = useRef(false);
|
||||
useEffect(() => {
|
||||
if (hasProcessedUrlPrompt.current) return;
|
||||
|
||||
const urlPrompt = extractPromptFromUrl();
|
||||
if (!urlPrompt) return;
|
||||
|
||||
hasProcessedUrlPrompt.current = true;
|
||||
|
||||
if (urlPrompt.autosubmit) {
|
||||
setPendingMessage(urlPrompt.prompt);
|
||||
void createSession().catch(() => {
|
||||
setPendingMessage(null);
|
||||
setInitialPrompt(urlPrompt.prompt);
|
||||
});
|
||||
} else {
|
||||
setInitialPrompt(urlPrompt.prompt);
|
||||
}
|
||||
}, [createSession, setInitialPrompt]);
|
||||
|
||||
async function uploadFiles(
|
||||
files: File[],
|
||||
sid: string,
|
||||
|
||||
@@ -260,7 +260,7 @@ export function CronScheduler({
|
||||
)}
|
||||
|
||||
{frequency !== "hourly" &&
|
||||
!(frequency === "custom" && customInterval.unit !== "days") && (
|
||||
!(frequency === "custom" && customInterval.unit === "hours") && (
|
||||
<TimeAt
|
||||
value={selectedTime}
|
||||
onChange={setSelectedTime}
|
||||
|
||||
@@ -47,7 +47,7 @@ export function useMarketplaceUpdate({ agent }: UseMarketplaceUpdateProps) {
|
||||
{
|
||||
query: {
|
||||
// Only fetch if user is the creator
|
||||
enabled: !!(user?.id && agent?.can_access_graph),
|
||||
enabled: !!(user?.id && agent?.owner_user_id === user.id),
|
||||
},
|
||||
},
|
||||
);
|
||||
@@ -90,7 +90,7 @@ export function useMarketplaceUpdate({ agent }: UseMarketplaceUpdateProps) {
|
||||
};
|
||||
}
|
||||
|
||||
const isUserCreator = !!(agent?.can_access_graph && user?.id);
|
||||
const isUserCreator = agent?.owner_user_id === user?.id;
|
||||
|
||||
const submissionsResponse = okData(submissionsData) as any;
|
||||
const agentSubmissions =
|
||||
|
||||
@@ -42,14 +42,6 @@ function ResetPasswordContent() {
|
||||
|
||||
if (isExpiredOrUsed) {
|
||||
setShowExpiredMessage(true);
|
||||
// Also show a toast with the Supabase error detail for debugging
|
||||
if (errorDescription) {
|
||||
toast({
|
||||
title: "Link Expired",
|
||||
description: errorDescription,
|
||||
variant: "destructive",
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// Show toast for other errors
|
||||
const errorMessage =
|
||||
|
||||
@@ -9,25 +9,6 @@ export async function GET(request: NextRequest) {
|
||||
process.env.NEXT_PUBLIC_FRONTEND_BASE_URL || "http://localhost:3000";
|
||||
|
||||
if (!code) {
|
||||
// Supabase may redirect here with error params instead of a code
|
||||
// (e.g. when the OTP token is expired or already used)
|
||||
const error = searchParams.get("error");
|
||||
const errorCode = searchParams.get("error_code");
|
||||
const errorDescription = searchParams.get("error_description");
|
||||
|
||||
if (error || errorCode || errorDescription) {
|
||||
// Forward raw Supabase error params to the reset-password page,
|
||||
// which already handles classification (expired vs other errors)
|
||||
const params = new URLSearchParams();
|
||||
if (error) params.set("error", error);
|
||||
if (errorCode) params.set("error_code", errorCode);
|
||||
if (errorDescription) params.set("error_description", errorDescription);
|
||||
|
||||
return NextResponse.redirect(
|
||||
`${origin}/reset-password?${params.toString()}`,
|
||||
);
|
||||
}
|
||||
|
||||
return NextResponse.redirect(
|
||||
`${origin}/reset-password?error=${encodeURIComponent("Missing verification code")}`,
|
||||
);
|
||||
|
||||
@@ -9711,6 +9711,7 @@
|
||||
"id": { "type": "string", "title": "Id" },
|
||||
"graph_id": { "type": "string", "title": "Graph Id" },
|
||||
"graph_version": { "type": "integer", "title": "Graph Version" },
|
||||
"owner_user_id": { "type": "string", "title": "Owner User Id" },
|
||||
"image_url": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Image Url"
|
||||
@@ -9798,8 +9799,7 @@
|
||||
},
|
||||
"can_access_graph": {
|
||||
"type": "boolean",
|
||||
"title": "Can Access Graph",
|
||||
"description": "Indicates whether the same user owns the corresponding graph"
|
||||
"title": "Can Access Graph"
|
||||
},
|
||||
"is_latest_version": {
|
||||
"type": "boolean",
|
||||
@@ -9831,6 +9831,7 @@
|
||||
"id",
|
||||
"graph_id",
|
||||
"graph_version",
|
||||
"owner_user_id",
|
||||
"image_url",
|
||||
"creator_name",
|
||||
"creator_image_url",
|
||||
@@ -11829,11 +11830,10 @@
|
||||
"title": {
|
||||
"anyOf": [{ "type": "string" }, { "type": "null" }],
|
||||
"title": "Title"
|
||||
},
|
||||
"is_processing": { "type": "boolean", "title": "Is Processing" }
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"required": ["id", "created_at", "updated_at", "is_processing"],
|
||||
"required": ["id", "created_at", "updated_at"],
|
||||
"title": "SessionSummaryResponse",
|
||||
"description": "Response model for a session summary (without messages)."
|
||||
},
|
||||
|
||||
@@ -9,7 +9,6 @@ import { cn } from "@/lib/utils";
|
||||
import { toDisplayName } from "@/providers/agent-credentials/helper";
|
||||
import { APIKeyCredentialsModal } from "./components/APIKeyCredentialsModal/APIKeyCredentialsModal";
|
||||
import { CredentialsFlatView } from "./components/CredentialsFlatView/CredentialsFlatView";
|
||||
import { CredentialTypeSelector } from "./components/CredentialTypeSelector/CredentialTypeSelector";
|
||||
import { HostScopedCredentialsModal } from "./components/HotScopedCredentialsModal/HotScopedCredentialsModal";
|
||||
import { OAuthFlowWaitingModal } from "./components/OAuthWaitingModal/OAuthWaitingModal";
|
||||
import { PasswordCredentialsModal } from "./components/PasswordCredentialsModal/PasswordCredentialsModal";
|
||||
@@ -71,25 +70,20 @@ export function CredentialsInput({
|
||||
supportsOAuth2,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
hasMultipleCredentialTypes,
|
||||
supportedTypes,
|
||||
userCredentials,
|
||||
systemCredentials,
|
||||
oAuthError,
|
||||
isAPICredentialsModalOpen,
|
||||
isUserPasswordCredentialsModalOpen,
|
||||
isHostScopedCredentialsModalOpen,
|
||||
isCredentialTypeSelectorOpen,
|
||||
isOAuth2FlowInProgress,
|
||||
oAuthPopupController,
|
||||
actionButtonText,
|
||||
setAPICredentialsModalOpen,
|
||||
setUserPasswordCredentialsModalOpen,
|
||||
setHostScopedCredentialsModalOpen,
|
||||
setCredentialTypeSelectorOpen,
|
||||
handleActionButtonClick,
|
||||
handleCredentialSelect,
|
||||
handleOAuthLogin,
|
||||
} = hookData;
|
||||
|
||||
const displayName = toDisplayName(provider);
|
||||
@@ -122,28 +116,7 @@ export function CredentialsInput({
|
||||
|
||||
{!readOnly && (
|
||||
<>
|
||||
{hasMultipleCredentialTypes && (
|
||||
<CredentialTypeSelector
|
||||
schema={schema}
|
||||
open={isCredentialTypeSelectorOpen}
|
||||
onClose={() => setCredentialTypeSelectorOpen(false)}
|
||||
provider={provider}
|
||||
providerName={providerName}
|
||||
supportedTypes={supportedTypes}
|
||||
onCredentialsCreate={(creds) => {
|
||||
onSelectCredential(creds);
|
||||
}}
|
||||
onOAuthLogin={handleOAuthLogin}
|
||||
onOpenPasswordModal={() =>
|
||||
setUserPasswordCredentialsModalOpen(true)
|
||||
}
|
||||
onOpenHostScopedModal={() =>
|
||||
setHostScopedCredentialsModalOpen(true)
|
||||
}
|
||||
siblingInputs={siblingInputs}
|
||||
/>
|
||||
)}
|
||||
{supportsApiKey && !hasMultipleCredentialTypes && (
|
||||
{supportsApiKey && (
|
||||
<APIKeyCredentialsModal
|
||||
schema={schema}
|
||||
open={isAPICredentialsModalOpen}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { IconKey } from "@/components/__legacy__/ui/icons";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
DropdownMenu,
|
||||
@@ -8,12 +9,9 @@ import {
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CaretDownIcon, DotsThreeVertical } from "@phosphor-icons/react";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { CredentialsType } from "@/lib/autogpt-server-api/types";
|
||||
import {
|
||||
fallbackIcon,
|
||||
getCredentialDisplayName,
|
||||
getCredentialTypeIcon,
|
||||
getCredentialTypeLabel,
|
||||
MASKED_KEY_LENGTH,
|
||||
providerIcons,
|
||||
} from "../../helpers";
|
||||
@@ -49,16 +47,6 @@ export function CredentialRow({
|
||||
variant = "default",
|
||||
}: CredentialRowProps) {
|
||||
const ProviderIcon = providerIcons[provider] || fallbackIcon;
|
||||
const isRealCredentialType = [
|
||||
"api_key",
|
||||
"oauth2",
|
||||
"user_password",
|
||||
"host_scoped",
|
||||
].includes(credential.type);
|
||||
const credType = credential.type as CredentialsType;
|
||||
const TypeIcon = isRealCredentialType
|
||||
? getCredentialTypeIcon(credType, provider)
|
||||
: fallbackIcon;
|
||||
const isNodeVariant = variant === "node";
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const [showMaskedKey, setShowMaskedKey] = useState(true);
|
||||
@@ -104,29 +92,22 @@ export function CredentialRow({
|
||||
<div className="flex h-6 w-6 shrink-0 items-center justify-center rounded-full bg-gray-900">
|
||||
<ProviderIcon className="h-3 w-3 text-white" />
|
||||
</div>
|
||||
<TypeIcon className="h-5 w-5 shrink-0 text-zinc-800" />
|
||||
<IconKey className="h-5 w-5 shrink-0 text-zinc-800" />
|
||||
<div
|
||||
className={cn(
|
||||
"relative flex min-w-0 flex-1 flex-nowrap items-center gap-4",
|
||||
isNodeVariant && "overflow-hidden",
|
||||
)}
|
||||
>
|
||||
<div className="flex min-w-0 flex-1 items-center gap-2">
|
||||
<Text
|
||||
variant="body"
|
||||
className={cn(
|
||||
"min-w-0 shrink tracking-tight",
|
||||
isNodeVariant ? "truncate" : "line-clamp-1 text-ellipsis",
|
||||
)}
|
||||
>
|
||||
{getCredentialDisplayName(credential, displayName)}
|
||||
</Text>
|
||||
{isRealCredentialType && (
|
||||
<span className="shrink-0 rounded bg-zinc-100 px-1.5 py-0.5 text-[0.625rem] font-medium leading-tight text-zinc-500">
|
||||
{getCredentialTypeLabel(credType)}
|
||||
</span>
|
||||
<Text
|
||||
variant="body"
|
||||
className={cn(
|
||||
"min-w-0 flex-1 tracking-tight",
|
||||
isNodeVariant ? "truncate" : "line-clamp-1 text-ellipsis",
|
||||
)}
|
||||
</div>
|
||||
>
|
||||
{getCredentialDisplayName(credential, displayName)}
|
||||
</Text>
|
||||
{!(asSelectTrigger && isNodeVariant) && showMaskedKey && (
|
||||
<Text
|
||||
variant="large"
|
||||
|
||||
@@ -1,298 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import {
|
||||
Form,
|
||||
FormDescription,
|
||||
FormField,
|
||||
} from "@/components/__legacy__/ui/form";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import {
|
||||
TabsLine,
|
||||
TabsLineContent,
|
||||
TabsLineList,
|
||||
TabsLineTrigger,
|
||||
} from "@/components/molecules/TabsLine/TabsLine";
|
||||
import { Skeleton } from "@/components/atoms/Skeleton/Skeleton";
|
||||
import {
|
||||
BlockIOCredentialsSubSchema,
|
||||
CredentialsMetaInput,
|
||||
CredentialsType,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { useAPIKeyCredentialsModal } from "../APIKeyCredentialsModal/useAPIKeyCredentialsModal";
|
||||
import { getCredentialTypeIcon, getCredentialTypeLabel } from "../../helpers";
|
||||
|
||||
type Props = {
|
||||
schema: BlockIOCredentialsSubSchema;
|
||||
open: boolean;
|
||||
onClose: () => void;
|
||||
provider: string;
|
||||
providerName: string;
|
||||
supportedTypes: CredentialsType[];
|
||||
onCredentialsCreate: (creds: CredentialsMetaInput) => void;
|
||||
onOAuthLogin: () => void;
|
||||
onOpenPasswordModal: () => void;
|
||||
onOpenHostScopedModal: () => void;
|
||||
siblingInputs?: Record<string, unknown>;
|
||||
};
|
||||
|
||||
export function CredentialTypeSelector({
|
||||
schema,
|
||||
open,
|
||||
onClose,
|
||||
provider,
|
||||
providerName,
|
||||
supportedTypes,
|
||||
onCredentialsCreate,
|
||||
onOAuthLogin,
|
||||
onOpenPasswordModal,
|
||||
onOpenHostScopedModal,
|
||||
siblingInputs,
|
||||
}: Props) {
|
||||
const defaultTab = supportedTypes[0];
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
title={`Add credential for ${providerName}`}
|
||||
controlled={{
|
||||
isOpen: open,
|
||||
set: (isOpen) => {
|
||||
if (!isOpen) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{ maxWidth: "28rem" }}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<TabsLine defaultValue={defaultTab}>
|
||||
<TabsLineList>
|
||||
{supportedTypes.map((type) => {
|
||||
const Icon = getCredentialTypeIcon(type, provider);
|
||||
return (
|
||||
<TabsLineTrigger
|
||||
key={type}
|
||||
value={type}
|
||||
className="inline-flex items-center gap-1.5"
|
||||
>
|
||||
<Icon size={16} />
|
||||
{getCredentialTypeLabel(type)}
|
||||
</TabsLineTrigger>
|
||||
);
|
||||
})}
|
||||
</TabsLineList>
|
||||
|
||||
{supportedTypes.includes("oauth2") && (
|
||||
<TabsLineContent value="oauth2">
|
||||
<OAuthTabContent
|
||||
providerName={providerName}
|
||||
onOAuthLogin={() => {
|
||||
onClose();
|
||||
onOAuthLogin();
|
||||
}}
|
||||
/>
|
||||
</TabsLineContent>
|
||||
)}
|
||||
|
||||
{supportedTypes.includes("api_key") && (
|
||||
<TabsLineContent value="api_key">
|
||||
<APIKeyTabContent
|
||||
schema={schema}
|
||||
siblingInputs={siblingInputs}
|
||||
onCredentialsCreate={(creds) => {
|
||||
onCredentialsCreate(creds);
|
||||
onClose();
|
||||
}}
|
||||
/>
|
||||
</TabsLineContent>
|
||||
)}
|
||||
|
||||
{supportedTypes.includes("user_password") && (
|
||||
<TabsLineContent value="user_password">
|
||||
<SimpleActionTab
|
||||
description="Add a username and password credential."
|
||||
buttonLabel="Enter credentials"
|
||||
onClick={() => {
|
||||
onClose();
|
||||
onOpenPasswordModal();
|
||||
}}
|
||||
/>
|
||||
</TabsLineContent>
|
||||
)}
|
||||
|
||||
{supportedTypes.includes("host_scoped") && (
|
||||
<TabsLineContent value="host_scoped">
|
||||
<SimpleActionTab
|
||||
description="Add host-scoped headers for authentication."
|
||||
buttonLabel="Add headers"
|
||||
onClick={() => {
|
||||
onClose();
|
||||
onOpenHostScopedModal();
|
||||
}}
|
||||
/>
|
||||
</TabsLineContent>
|
||||
)}
|
||||
</TabsLine>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
type OAuthTabContentProps = {
|
||||
providerName: string;
|
||||
onOAuthLogin: () => void;
|
||||
};
|
||||
|
||||
function OAuthTabContent({ providerName, onOAuthLogin }: OAuthTabContentProps) {
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<p className="text-sm text-zinc-600">
|
||||
Sign in with your {providerName} account using OAuth.
|
||||
</p>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
onClick={onOAuthLogin}
|
||||
type="button"
|
||||
>
|
||||
Sign in with {providerName}
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
type APIKeyTabContentProps = {
|
||||
schema: BlockIOCredentialsSubSchema;
|
||||
siblingInputs?: Record<string, unknown>;
|
||||
onCredentialsCreate: (creds: CredentialsMetaInput) => void;
|
||||
};
|
||||
|
||||
function APIKeyTabContent({
|
||||
schema,
|
||||
siblingInputs,
|
||||
onCredentialsCreate,
|
||||
}: APIKeyTabContentProps) {
|
||||
const {
|
||||
form,
|
||||
isLoading,
|
||||
isSubmitting,
|
||||
supportsApiKey,
|
||||
schemaDescription,
|
||||
onSubmit,
|
||||
} = useAPIKeyCredentialsModal({ schema, siblingInputs, onCredentialsCreate });
|
||||
|
||||
if (!supportsApiKey && !isLoading) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<Skeleton className="h-4 w-3/4" />
|
||||
<div className="space-y-2">
|
||||
<Skeleton className="h-9 w-full" />
|
||||
<Skeleton className="h-9 w-full" />
|
||||
<Skeleton className="h-9 w-full" />
|
||||
<Skeleton className="h-9 w-68" />
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{schemaDescription && (
|
||||
<p className="text-sm text-zinc-600">{schemaDescription}</p>
|
||||
)}
|
||||
|
||||
<Form {...form}>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)} className="space-y-2">
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="title"
|
||||
render={({ field }) => (
|
||||
<Input
|
||||
id="title"
|
||||
label="Name"
|
||||
type="text"
|
||||
placeholder="Enter a name for this API Key..."
|
||||
{...field}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="apiKey"
|
||||
render={({ field }) => (
|
||||
<Input
|
||||
id="apiKey"
|
||||
label="API Key"
|
||||
type="password"
|
||||
placeholder="Enter API Key..."
|
||||
hint={
|
||||
schema.credentials_scopes ? (
|
||||
<FormDescription>
|
||||
Required scope(s) for this block:{" "}
|
||||
{schema.credentials_scopes?.map((s, i, a) => (
|
||||
<span key={i}>
|
||||
<code className="text-xs font-bold">{s}</code>
|
||||
{i < a.length - 1 && ", "}
|
||||
</span>
|
||||
))}
|
||||
</FormDescription>
|
||||
) : null
|
||||
}
|
||||
{...field}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="expiresAt"
|
||||
render={({ field }) => (
|
||||
<Input
|
||||
id="expiresAt"
|
||||
label="Expiration Date"
|
||||
type="datetime-local"
|
||||
placeholder="Select expiration date..."
|
||||
value={field.value}
|
||||
onChange={(e) => field.onChange(e.target.value)}
|
||||
onBlur={field.onBlur}
|
||||
name={field.name}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
<Button
|
||||
type="submit"
|
||||
className="min-w-68"
|
||||
loading={isSubmitting}
|
||||
disabled={isSubmitting}
|
||||
>
|
||||
Add API Key
|
||||
</Button>
|
||||
</form>
|
||||
</Form>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
type SimpleActionTabProps = {
|
||||
description: string;
|
||||
buttonLabel: string;
|
||||
onClick: () => void;
|
||||
};
|
||||
|
||||
function SimpleActionTab({
|
||||
description,
|
||||
buttonLabel,
|
||||
onClick,
|
||||
}: SimpleActionTabProps) {
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
<p className="text-sm text-zinc-600">{description}</p>
|
||||
<Button variant="primary" size="small" onClick={onClick} type="button">
|
||||
{buttonLabel}
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -26,6 +26,10 @@ type Props = {
|
||||
inputCredentials: Record<string, CredentialsMetaInput | undefined>;
|
||||
inputValues: Record<string, any>;
|
||||
onCredentialChange: (key: string, value?: CredentialsMetaInput) => void;
|
||||
/** Display variant passed through to each CredentialsInput */
|
||||
variant?: "default" | "node";
|
||||
/** Whether individual CredentialsInput components show their own title */
|
||||
showTitle?: boolean;
|
||||
};
|
||||
|
||||
export function CredentialsGroupedView({
|
||||
@@ -34,6 +38,8 @@ export function CredentialsGroupedView({
|
||||
inputCredentials,
|
||||
inputValues,
|
||||
onCredentialChange,
|
||||
variant = "default",
|
||||
showTitle,
|
||||
}: Props) {
|
||||
const allProviders = useContext(CredentialsProvidersContext);
|
||||
|
||||
@@ -131,6 +137,8 @@ export function CredentialsGroupedView({
|
||||
}}
|
||||
siblingInputs={inputValues}
|
||||
isOptional={!requiredCredentials.has(key)}
|
||||
variant={variant}
|
||||
showTitle={showTitle}
|
||||
/>
|
||||
);
|
||||
},
|
||||
@@ -172,6 +180,8 @@ export function CredentialsGroupedView({
|
||||
}}
|
||||
siblingInputs={inputValues}
|
||||
isOptional={!requiredCredentials.has(key)}
|
||||
variant={variant}
|
||||
showTitle={showTitle}
|
||||
/>
|
||||
);
|
||||
},
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
import { CredentialsMetaInput } from "@/app/api/__generated__/models/credentialsMetaInput";
|
||||
import { CredentialsType } from "@/lib/autogpt-server-api/types";
|
||||
import {
|
||||
getCredentialDisplayName,
|
||||
getCredentialTypeLabel,
|
||||
} from "../../helpers";
|
||||
import { getCredentialDisplayName } from "../../helpers";
|
||||
import { CredentialRow } from "../CredentialRow/CredentialRow";
|
||||
|
||||
interface Props {
|
||||
@@ -90,8 +86,7 @@ export function CredentialsSelect({
|
||||
)}
|
||||
{credentials.map((credential) => (
|
||||
<option key={credential.id} value={credential.id}>
|
||||
{getCredentialDisplayName(credential, displayName)} (
|
||||
{getCredentialTypeLabel(credential.type as CredentialsType)})
|
||||
{getCredentialDisplayName(credential, displayName)}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { GlobeSimple, KeyIcon, Lock, Password } from "@phosphor-icons/react";
|
||||
import { KeyIcon } from "@phosphor-icons/react";
|
||||
import { NotionLogoIcon } from "@radix-ui/react-icons";
|
||||
import {
|
||||
FaDiscord,
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
FaMedium,
|
||||
FaTwitter,
|
||||
} from "react-icons/fa";
|
||||
import { CredentialsType } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
export const fallbackIcon = KeyIcon;
|
||||
|
||||
@@ -69,62 +68,6 @@ export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & (
|
||||
}
|
||||
);
|
||||
|
||||
export function countSupportedTypes(
|
||||
supportsOAuth2: boolean,
|
||||
supportsApiKey: boolean,
|
||||
supportsUserPassword: boolean,
|
||||
supportsHostScoped: boolean,
|
||||
): number {
|
||||
return [
|
||||
supportsOAuth2,
|
||||
supportsApiKey,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
].filter(Boolean).length;
|
||||
}
|
||||
|
||||
export function getSupportedTypes(
|
||||
supportsOAuth2: boolean,
|
||||
supportsApiKey: boolean,
|
||||
supportsUserPassword: boolean,
|
||||
supportsHostScoped: boolean,
|
||||
): CredentialsType[] {
|
||||
const types: CredentialsType[] = [];
|
||||
if (supportsOAuth2) types.push("oauth2");
|
||||
if (supportsApiKey) types.push("api_key");
|
||||
if (supportsUserPassword) types.push("user_password");
|
||||
if (supportsHostScoped) types.push("host_scoped");
|
||||
return types;
|
||||
}
|
||||
|
||||
const CREDENTIAL_TYPE_LABELS: Record<CredentialsType, string> = {
|
||||
oauth2: "OAuth",
|
||||
api_key: "API Key",
|
||||
user_password: "Password",
|
||||
host_scoped: "Headers",
|
||||
};
|
||||
|
||||
export function getCredentialTypeLabel(type: CredentialsType): string {
|
||||
return CREDENTIAL_TYPE_LABELS[type] ?? type;
|
||||
}
|
||||
|
||||
type CredentialIcon = React.FC<{ className?: string; size?: string | number }>;
|
||||
|
||||
export function getCredentialTypeIcon(
|
||||
type: CredentialsType,
|
||||
provider?: string,
|
||||
): CredentialIcon {
|
||||
if (type === "oauth2" && provider) {
|
||||
const icon = providerIcons[provider];
|
||||
if (icon) return icon as CredentialIcon;
|
||||
return GlobeSimple as CredentialIcon;
|
||||
}
|
||||
if (type === "api_key") return KeyIcon as CredentialIcon;
|
||||
if (type === "user_password") return Password as CredentialIcon;
|
||||
if (type === "host_scoped") return Lock as CredentialIcon;
|
||||
return KeyIcon as CredentialIcon;
|
||||
}
|
||||
|
||||
export function getActionButtonText(
|
||||
supportsOAuth2: boolean,
|
||||
supportsApiKey: boolean,
|
||||
@@ -132,18 +75,6 @@ export function getActionButtonText(
|
||||
supportsHostScoped: boolean,
|
||||
hasExistingCredentials: boolean,
|
||||
): string {
|
||||
const multipleTypes =
|
||||
countSupportedTypes(
|
||||
supportsOAuth2,
|
||||
supportsApiKey,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
) > 1;
|
||||
|
||||
if (multipleTypes) {
|
||||
return hasExistingCredentials ? "Add another credential" : "Add credential";
|
||||
}
|
||||
|
||||
if (hasExistingCredentials) {
|
||||
if (supportsOAuth2) return "Connect another account";
|
||||
if (supportsApiKey) return "Use a new API key";
|
||||
|
||||
@@ -10,10 +10,8 @@ import { openOAuthPopup } from "@/lib/oauth-popup";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import {
|
||||
countSupportedTypes,
|
||||
filterSystemCredentials,
|
||||
getActionButtonText,
|
||||
getSupportedTypes,
|
||||
getSystemCredentials,
|
||||
} from "./helpers";
|
||||
|
||||
@@ -46,8 +44,6 @@ export function useCredentialsInput({
|
||||
] = useState(false);
|
||||
const [isHostScopedCredentialsModalOpen, setHostScopedCredentialsModalOpen] =
|
||||
useState(false);
|
||||
const [isCredentialTypeSelectorOpen, setCredentialTypeSelectorOpen] =
|
||||
useState(false);
|
||||
const [isOAuth2FlowInProgress, setOAuth2FlowInProgress] = useState(false);
|
||||
const [oAuthPopupController, setOAuthPopupController] =
|
||||
useState<AbortController | null>(null);
|
||||
@@ -267,27 +263,7 @@ export function useCredentialsInput({
|
||||
}
|
||||
}
|
||||
|
||||
const hasMultipleCredentialTypes =
|
||||
countSupportedTypes(
|
||||
supportsOAuth2,
|
||||
supportsApiKey,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
) > 1;
|
||||
|
||||
const supportedTypes = getSupportedTypes(
|
||||
supportsOAuth2,
|
||||
supportsApiKey,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
);
|
||||
|
||||
function handleActionButtonClick() {
|
||||
if (hasMultipleCredentialTypes) {
|
||||
setCredentialTypeSelectorOpen(true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (supportsOAuth2) {
|
||||
handleOAuthLogin();
|
||||
} else if (supportsApiKey) {
|
||||
@@ -332,8 +308,6 @@ export function useCredentialsInput({
|
||||
supportsOAuth2,
|
||||
supportsUserPassword,
|
||||
supportsHostScoped,
|
||||
hasMultipleCredentialTypes,
|
||||
supportedTypes,
|
||||
isSystemProvider,
|
||||
userCredentials,
|
||||
systemCredentials,
|
||||
@@ -343,7 +317,6 @@ export function useCredentialsInput({
|
||||
isAPICredentialsModalOpen,
|
||||
isUserPasswordCredentialsModalOpen,
|
||||
isHostScopedCredentialsModalOpen,
|
||||
isCredentialTypeSelectorOpen,
|
||||
isOAuth2FlowInProgress,
|
||||
oAuthPopupController,
|
||||
credentialToDelete,
|
||||
@@ -358,7 +331,6 @@ export function useCredentialsInput({
|
||||
setAPICredentialsModalOpen,
|
||||
setUserPasswordCredentialsModalOpen,
|
||||
setHostScopedCredentialsModalOpen,
|
||||
setCredentialTypeSelectorOpen,
|
||||
setCredentialToDelete,
|
||||
handleActionButtonClick,
|
||||
handleCredentialSelect,
|
||||
|
||||
@@ -478,17 +478,16 @@ export function CronScheduler({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{frequency !== "hourly" &&
|
||||
!(frequency === "custom" && customInterval.unit !== "days") && (
|
||||
<div className="flex items-center gap-4 space-y-2">
|
||||
<Label className="pt-2">At</Label>
|
||||
<Input
|
||||
type="time"
|
||||
value={selectedTime}
|
||||
onChange={(e) => setSelectedTime(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{frequency !== "hourly" && (
|
||||
<div className="flex items-center gap-4 space-y-2">
|
||||
<Label className="pt-2">At</Label>
|
||||
<Input
|
||||
type="time"
|
||||
value={selectedTime}
|
||||
onChange={(e) => setSelectedTime(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { CredentialsInput } from "@/components/contextual/CredentialsInput/CredentialsInput";
|
||||
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
|
||||
import type { CredentialField } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
|
||||
import {
|
||||
BlockIOCredentialsSubSchema,
|
||||
CredentialsMetaInput,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
import { FieldProps, getUiOptions } from "@rjsf/utils";
|
||||
import { useMemo } from "react";
|
||||
import { useCallback, useMemo } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { CredentialFieldTitle } from "./components/CredentialFieldTitle";
|
||||
|
||||
const CREDENTIAL_KEY = "credentials";
|
||||
|
||||
export const CredentialsField = (props: FieldProps) => {
|
||||
const { formData, onChange, schema, registry, fieldPathId, required } = props;
|
||||
|
||||
@@ -36,27 +39,25 @@ export const CredentialsField = (props: FieldProps) => {
|
||||
return typeof value === "boolean" ? value : false;
|
||||
}, [node?.data?.metadata?.credentials_optional]);
|
||||
|
||||
const handleChange = (newValue: any) => {
|
||||
onChange(newValue, fieldPathId?.path);
|
||||
};
|
||||
// In builder canvas (nodeId exists): show star based on credentialsOptional toggle
|
||||
// In run dialogs (no nodeId): show star based on schema's required array
|
||||
const isRequired = nodeId ? !credentialsOptional : required;
|
||||
|
||||
const handleSelectCredentials = (credentialsMeta?: CredentialsMetaInput) => {
|
||||
if (credentialsMeta) {
|
||||
handleChange({
|
||||
id: credentialsMeta.id,
|
||||
provider: credentialsMeta.provider,
|
||||
title: credentialsMeta.title,
|
||||
type: credentialsMeta.type,
|
||||
});
|
||||
} else {
|
||||
handleChange(undefined);
|
||||
}
|
||||
};
|
||||
// Convert single schema to CredentialField[] for CredentialsGroupedView
|
||||
const credentialFields: CredentialField[] = useMemo(
|
||||
() => [[CREDENTIAL_KEY, schema as BlockIOCredentialsSubSchema]],
|
||||
[schema],
|
||||
);
|
||||
|
||||
// Convert formData to CredentialsMetaInput format
|
||||
const selectedCredentials: CredentialsMetaInput | undefined = useMemo(
|
||||
() =>
|
||||
formData?.id
|
||||
const requiredCredentials = useMemo(
|
||||
() => (isRequired ? new Set([CREDENTIAL_KEY]) : new Set<string>()),
|
||||
[isRequired],
|
||||
);
|
||||
|
||||
// Convert formData to inputCredentials map for CredentialsGroupedView
|
||||
const inputCredentials = useMemo(
|
||||
() => ({
|
||||
[CREDENTIAL_KEY]: formData?.id
|
||||
? {
|
||||
id: formData.id,
|
||||
provider: formData.provider,
|
||||
@@ -64,12 +65,28 @@ export const CredentialsField = (props: FieldProps) => {
|
||||
type: formData.type,
|
||||
}
|
||||
: undefined,
|
||||
}),
|
||||
[formData?.id, formData?.provider, formData?.title, formData?.type],
|
||||
);
|
||||
|
||||
// In builder canvas (nodeId exists): show star based on credentialsOptional toggle
|
||||
// In run dialogs (no nodeId): show star based on schema's required array
|
||||
const isRequired = nodeId ? !credentialsOptional : required;
|
||||
const handleCredentialChange = useCallback(
|
||||
(_key: string, value?: CredentialsMetaInput) => {
|
||||
if (value) {
|
||||
onChange(
|
||||
{
|
||||
id: value.id,
|
||||
provider: value.provider,
|
||||
title: value.title,
|
||||
type: value.type,
|
||||
},
|
||||
fieldPathId?.path,
|
||||
);
|
||||
} else {
|
||||
onChange(undefined, fieldPathId?.path);
|
||||
}
|
||||
},
|
||||
[onChange, fieldPathId?.path],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2">
|
||||
@@ -80,15 +97,13 @@ export const CredentialsField = (props: FieldProps) => {
|
||||
schema={schema}
|
||||
required={isRequired}
|
||||
/>
|
||||
<CredentialsInput
|
||||
schema={schema as BlockIOCredentialsSubSchema}
|
||||
selectedCredentials={selectedCredentials}
|
||||
onSelectCredentials={handleSelectCredentials}
|
||||
siblingInputs={hardcodedValues}
|
||||
<CredentialsGroupedView
|
||||
credentialFields={credentialFields}
|
||||
requiredCredentials={requiredCredentials}
|
||||
inputCredentials={inputCredentials}
|
||||
inputValues={hardcodedValues}
|
||||
onCredentialChange={handleCredentialChange}
|
||||
showTitle={false}
|
||||
readOnly={formContext?.readOnly}
|
||||
isOptional={!isRequired}
|
||||
className="w-full"
|
||||
variant="node"
|
||||
/>
|
||||
|
||||
|
||||
@@ -11,10 +11,6 @@ export enum Key {
|
||||
CHAT_SESSION_ID = "chat_session_id",
|
||||
COOKIE_CONSENT = "autogpt_cookie_consent",
|
||||
AI_AGENT_SAFETY_POPUP_SHOWN = "ai-agent-safety-popup-shown",
|
||||
COPILOT_SOUND_ENABLED = "copilot-sound-enabled",
|
||||
COPILOT_NOTIFICATIONS_ENABLED = "copilot-notifications-enabled",
|
||||
COPILOT_NOTIFICATION_BANNER_DISMISSED = "copilot-notification-banner-dismissed",
|
||||
COPILOT_NOTIFICATION_DIALOG_DISMISSED = "copilot-notification-dialog-dismissed",
|
||||
}
|
||||
|
||||
function get(key: Key) {
|
||||
|
||||
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
|
||||
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
||||
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
||||
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
|
||||
### Outputs
|
||||
|
||||
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | No |
|
||||
| messages | List of messages in the conversation. | List[Any] | Yes |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
| ollama_host | Ollama host for local models | str | No |
|
||||
|
||||
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|
||||
|-------|-------------|------|----------|
|
||||
| focus | The focus of the list to generate. | str | No |
|
||||
| source_data | The data to generate the list from. | str | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
||||
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
||||
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
||||
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| text | The text to summarize. | str | Yes |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| focus | The topic to focus on in the summary | str | No |
|
||||
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
|
||||
| Input | Description | Type | Required |
|
||||
|-------|-------------|------|----------|
|
||||
| prompt | The prompt to send to the language model. | str | Yes |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "cohere/command-a-03-2025" \| "cohere/command-a-translate-08-2025" \| "cohere/command-a-reasoning-08-2025" \| "cohere/command-a-vision-07-2025" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-reasoning-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "microsoft/phi-4" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-sonnet-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-2.5-pro" \| "google/gemini-3.1-pro-preview" \| "google/gemini-3-flash-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-3.1-flash-lite-preview" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "mistralai/mistral-large-2512" \| "mistralai/mistral-medium-3.1" \| "mistralai/mistral-small-3.2-24b-instruct" \| "mistralai/codestral-2508" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-3" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||
|
||||
Reference in New Issue
Block a user