mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-19 12:08:46 -05:00
Compare commits
26 Commits
make-old-w
...
copilot-ba
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d3903b6fb | ||
|
|
b0855e8cf2 | ||
|
|
5e2146dd76 | ||
|
|
103a62c9da | ||
|
|
fc8434fb30 | ||
|
|
3ae08cd48e | ||
|
|
4db13837b9 | ||
|
|
df87867625 | ||
|
|
e503126170 | ||
|
|
7ee28197a3 | ||
|
|
818de26d24 | ||
|
|
cb08def96c | ||
|
|
ac2daee5f8 | ||
|
|
266e0d79d4 | ||
|
|
01f443190e | ||
|
|
bdba0033de | ||
|
|
b87c64ce38 | ||
|
|
003affca43 | ||
|
|
290d0d9a9b | ||
|
|
fba61c72ed | ||
|
|
79d45a15d0 | ||
|
|
66f0d97ca2 | ||
|
|
5894a8fcdf | ||
|
|
dff8efa35d | ||
|
|
e26822998f | ||
|
|
4a7bc006a8 |
2
.github/workflows/classic-autogpt-ci.yml
vendored
2
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.12", "3.13", "3.14"]
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
|
||||
13
.github/workflows/classic-autogpts-ci.yml
vendored
13
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -11,6 +11,9 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, dev, release-* ]
|
||||
@@ -19,6 +22,9 @@ on:
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
@@ -53,15 +59,10 @@ jobs:
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
run: poetry install
|
||||
|
||||
- name: Run regression tests
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run serve &
|
||||
sleep 10 # Wait for server to start
|
||||
poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
env:
|
||||
|
||||
11
.github/workflows/classic-benchmark-ci.yml
vendored
11
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -23,7 +23,7 @@ defaults:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
min-python-version: '3.12'
|
||||
min-python-version: '3.10'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.12", "3.13", "3.14"]
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
defaults:
|
||||
@@ -128,16 +128,11 @@ jobs:
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install agent dependencies
|
||||
working-directory: classic/${{ matrix.agent-name }}
|
||||
run: poetry install
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: classic
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
poetry run python -m forge &
|
||||
sleep 10 # Wait for server to start
|
||||
|
||||
set +e # Ignore non-zero exit codes and continue execution
|
||||
echo "Running the following command: poetry run agbenchmark --maintain --mock"
|
||||
|
||||
2
.github/workflows/classic-forge-ci.yml
vendored
2
.github/workflows/classic-forge-ci.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.12", "3.13", "3.14"]
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
|
||||
60
.github/workflows/classic-frontend-ci.yml
vendored
Normal file
60
.github/workflows/classic-frontend-ci.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Classic - Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
4
.github/workflows/classic-python-checks.yml
vendored
4
.github/workflows/classic-python-checks.yml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.12"
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
needs: get-changed-parts
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
min-python-version: "3.12"
|
||||
min-python-version: "3.10"
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
.autogpt/
|
||||
*.mpeg
|
||||
.env
|
||||
# Root .env files
|
||||
@@ -178,5 +177,5 @@ autogpt_platform/backend/settings.py
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
**/.claude/settings.local.json
|
||||
.claude/settings.local.json
|
||||
/autogpt_platform/backend/logs
|
||||
|
||||
@@ -6,12 +6,11 @@ start-core:
|
||||
|
||||
# Stop core services
|
||||
stop-core:
|
||||
docker compose stop deps
|
||||
docker compose stop
|
||||
|
||||
reset-db:
|
||||
docker compose stop db
|
||||
rm -rf db/docker/volumes/db/data
|
||||
cd backend && poetry run prisma migrate deploy
|
||||
cd backend && poetry run prisma generate
|
||||
|
||||
# View logs for core services
|
||||
logs-core:
|
||||
@@ -58,4 +57,4 @@ help:
|
||||
@echo " run-backend - Run the backend FastAPI server"
|
||||
@echo " run-frontend - Run the frontend Next.js development server"
|
||||
@echo " test-data - Run the test data creator"
|
||||
@echo " load-store-agents - Load store agents from agents/ folder into test database"
|
||||
@echo " load-store-agents - Load store agents from agents/ folder into test database"
|
||||
|
||||
@@ -12,7 +12,11 @@ class ChatConfig(BaseSettings):
|
||||
|
||||
# OpenAI API Configuration
|
||||
model: str = Field(
|
||||
default="qwen/qwen3-235b-a22b-2507", description="Default model to use"
|
||||
default="anthropic/claude-opus-4.5", description="Default model to use"
|
||||
)
|
||||
title_model: str = Field(
|
||||
default="openai/gpt-4o-mini",
|
||||
description="Model to use for generating session titles (should be fast/cheap)",
|
||||
)
|
||||
api_key: str | None = Field(default=None, description="OpenAI API key")
|
||||
base_url: str | None = Field(
|
||||
@@ -72,8 +76,31 @@ class ChatConfig(BaseSettings):
|
||||
v = "https://openrouter.ai/api/v1"
|
||||
return v
|
||||
|
||||
# Prompt paths for different contexts
|
||||
PROMPT_PATHS: dict[str, str] = {
|
||||
"default": "prompts/chat_system.md",
|
||||
"onboarding": "prompts/onboarding_system.md",
|
||||
}
|
||||
|
||||
def get_system_prompt_for_type(
|
||||
self, prompt_type: str = "default", **template_vars
|
||||
) -> str:
|
||||
"""Load and render a system prompt by type.
|
||||
|
||||
Args:
|
||||
prompt_type: The type of prompt to load ("default" or "onboarding")
|
||||
**template_vars: Variables to substitute in the template
|
||||
|
||||
Returns:
|
||||
Rendered system prompt string
|
||||
"""
|
||||
prompt_path_str = self.PROMPT_PATHS.get(
|
||||
prompt_type, self.PROMPT_PATHS["default"]
|
||||
)
|
||||
return self._load_prompt_from_path(prompt_path_str, **template_vars)
|
||||
|
||||
def get_system_prompt(self, **template_vars) -> str:
|
||||
"""Load and render the system prompt from file.
|
||||
"""Load and render the default system prompt from file.
|
||||
|
||||
Args:
|
||||
**template_vars: Variables to substitute in the template
|
||||
@@ -82,9 +109,21 @@ class ChatConfig(BaseSettings):
|
||||
Rendered system prompt string
|
||||
|
||||
"""
|
||||
return self._load_prompt_from_path(self.system_prompt_path, **template_vars)
|
||||
|
||||
def _load_prompt_from_path(self, prompt_path_str: str, **template_vars) -> str:
|
||||
"""Load and render a system prompt from a given path.
|
||||
|
||||
Args:
|
||||
prompt_path_str: Path to the prompt file relative to chat module
|
||||
**template_vars: Variables to substitute in the template
|
||||
|
||||
Returns:
|
||||
Rendered system prompt string
|
||||
"""
|
||||
# Get the path relative to this module
|
||||
module_dir = Path(__file__).parent
|
||||
prompt_path = module_dir / self.system_prompt_path
|
||||
prompt_path = module_dir / prompt_path_str
|
||||
|
||||
# Check for .j2 extension first (Jinja2 template)
|
||||
j2_path = Path(str(prompt_path) + ".j2")
|
||||
|
||||
215
autogpt_platform/backend/backend/api/features/chat/db.py
Normal file
215
autogpt_platform/backend/backend/api/features/chat/db.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""Database operations for chat sessions."""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any, cast
|
||||
|
||||
from prisma.models import ChatMessage as PrismaChatMessage
|
||||
from prisma.models import ChatSession as PrismaChatSession
|
||||
from prisma.types import (
|
||||
ChatMessageCreateInput,
|
||||
ChatSessionCreateInput,
|
||||
ChatSessionUpdateInput,
|
||||
)
|
||||
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_chat_session(session_id: str) -> PrismaChatSession | None:
|
||||
"""Get a chat session by ID from the database."""
|
||||
session = await PrismaChatSession.prisma().find_unique(
|
||||
where={"id": session_id},
|
||||
include={"Messages": True},
|
||||
)
|
||||
if session and session.Messages:
|
||||
# Sort messages by sequence in Python since Prisma doesn't support order_by in include
|
||||
session.Messages.sort(key=lambda m: m.sequence)
|
||||
return session
|
||||
|
||||
|
||||
async def create_chat_session(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> PrismaChatSession:
|
||||
"""Create a new chat session in the database."""
|
||||
data = ChatSessionCreateInput(
|
||||
id=session_id,
|
||||
userId=user_id,
|
||||
credentials=SafeJson({}),
|
||||
successfulAgentRuns=SafeJson({}),
|
||||
successfulAgentSchedules=SafeJson({}),
|
||||
)
|
||||
return await PrismaChatSession.prisma().create(
|
||||
data=data,
|
||||
include={"Messages": True},
|
||||
)
|
||||
|
||||
|
||||
async def update_chat_session(
|
||||
session_id: str,
|
||||
credentials: dict[str, Any] | None = None,
|
||||
successful_agent_runs: dict[str, Any] | None = None,
|
||||
successful_agent_schedules: dict[str, Any] | None = None,
|
||||
total_prompt_tokens: int | None = None,
|
||||
total_completion_tokens: int | None = None,
|
||||
title: str | None = None,
|
||||
) -> PrismaChatSession | None:
|
||||
"""Update a chat session's metadata."""
|
||||
data: ChatSessionUpdateInput = {"updatedAt": datetime.now(UTC)}
|
||||
|
||||
if credentials is not None:
|
||||
data["credentials"] = SafeJson(credentials)
|
||||
if successful_agent_runs is not None:
|
||||
data["successfulAgentRuns"] = SafeJson(successful_agent_runs)
|
||||
if successful_agent_schedules is not None:
|
||||
data["successfulAgentSchedules"] = SafeJson(successful_agent_schedules)
|
||||
if total_prompt_tokens is not None:
|
||||
data["totalPromptTokens"] = total_prompt_tokens
|
||||
if total_completion_tokens is not None:
|
||||
data["totalCompletionTokens"] = total_completion_tokens
|
||||
if title is not None:
|
||||
data["title"] = title
|
||||
|
||||
session = await PrismaChatSession.prisma().update(
|
||||
where={"id": session_id},
|
||||
data=data,
|
||||
include={"Messages": True},
|
||||
)
|
||||
if session and session.Messages:
|
||||
session.Messages.sort(key=lambda m: m.sequence)
|
||||
return session
|
||||
|
||||
|
||||
async def add_chat_message(
|
||||
session_id: str,
|
||||
role: str,
|
||||
sequence: int,
|
||||
content: str | None = None,
|
||||
name: str | None = None,
|
||||
tool_call_id: str | None = None,
|
||||
refusal: str | None = None,
|
||||
tool_calls: list[dict[str, Any]] | None = None,
|
||||
function_call: dict[str, Any] | None = None,
|
||||
) -> PrismaChatMessage:
|
||||
"""Add a message to a chat session."""
|
||||
# Build the input dict dynamically - only include optional fields when they
|
||||
# have values, as Prisma TypedDict validation fails when optional fields
|
||||
# are explicitly set to None
|
||||
data: dict[str, Any] = {
|
||||
"Session": {"connect": {"id": session_id}},
|
||||
"role": role,
|
||||
"sequence": sequence,
|
||||
}
|
||||
|
||||
# Add optional string fields
|
||||
if content is not None:
|
||||
data["content"] = content
|
||||
if name is not None:
|
||||
data["name"] = name
|
||||
if tool_call_id is not None:
|
||||
data["toolCallId"] = tool_call_id
|
||||
if refusal is not None:
|
||||
data["refusal"] = refusal
|
||||
|
||||
# Add optional JSON fields only when they have values
|
||||
if tool_calls is not None:
|
||||
data["toolCalls"] = SafeJson(tool_calls)
|
||||
if function_call is not None:
|
||||
data["functionCall"] = SafeJson(function_call)
|
||||
|
||||
# Update session's updatedAt timestamp
|
||||
await PrismaChatSession.prisma().update(
|
||||
where={"id": session_id},
|
||||
data={"updatedAt": datetime.now(UTC)},
|
||||
)
|
||||
|
||||
return await PrismaChatMessage.prisma().create(
|
||||
data=cast(ChatMessageCreateInput, data)
|
||||
)
|
||||
|
||||
|
||||
async def add_chat_messages_batch(
|
||||
session_id: str,
|
||||
messages: list[dict[str, Any]],
|
||||
start_sequence: int,
|
||||
) -> list[PrismaChatMessage]:
|
||||
"""Add multiple messages to a chat session in a batch."""
|
||||
if not messages:
|
||||
return []
|
||||
|
||||
created_messages = []
|
||||
for i, msg in enumerate(messages):
|
||||
# Build the input dict dynamically - only include optional JSON fields
|
||||
# when they have values, as Prisma TypedDict validation fails when
|
||||
# optional fields are explicitly set to None
|
||||
data: dict[str, Any] = {
|
||||
"Session": {"connect": {"id": session_id}},
|
||||
"role": msg["role"],
|
||||
"sequence": start_sequence + i,
|
||||
}
|
||||
|
||||
# Add optional string fields
|
||||
if msg.get("content") is not None:
|
||||
data["content"] = msg["content"]
|
||||
if msg.get("name") is not None:
|
||||
data["name"] = msg["name"]
|
||||
if msg.get("tool_call_id") is not None:
|
||||
data["toolCallId"] = msg["tool_call_id"]
|
||||
if msg.get("refusal") is not None:
|
||||
data["refusal"] = msg["refusal"]
|
||||
|
||||
# Add optional JSON fields only when they have values
|
||||
if msg.get("tool_calls") is not None:
|
||||
data["toolCalls"] = SafeJson(msg["tool_calls"])
|
||||
if msg.get("function_call") is not None:
|
||||
data["functionCall"] = SafeJson(msg["function_call"])
|
||||
|
||||
created = await PrismaChatMessage.prisma().create(
|
||||
data=cast(ChatMessageCreateInput, data)
|
||||
)
|
||||
created_messages.append(created)
|
||||
|
||||
# Update session's updatedAt timestamp
|
||||
await PrismaChatSession.prisma().update(
|
||||
where={"id": session_id},
|
||||
data={"updatedAt": datetime.now(UTC)},
|
||||
)
|
||||
|
||||
return created_messages
|
||||
|
||||
|
||||
async def get_user_chat_sessions(
|
||||
user_id: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
) -> list[PrismaChatSession]:
|
||||
"""Get chat sessions for a user, ordered by most recent."""
|
||||
return await PrismaChatSession.prisma().find_many(
|
||||
where={"userId": user_id},
|
||||
order={"updatedAt": "desc"},
|
||||
take=limit,
|
||||
skip=offset,
|
||||
)
|
||||
|
||||
|
||||
async def get_user_session_count(user_id: str) -> int:
|
||||
"""Get the total number of chat sessions for a user."""
|
||||
return await PrismaChatSession.prisma().count(where={"userId": user_id})
|
||||
|
||||
|
||||
async def delete_chat_session(session_id: str) -> bool:
|
||||
"""Delete a chat session and all its messages."""
|
||||
try:
|
||||
await PrismaChatSession.prisma().delete(where={"id": session_id})
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete chat session {session_id}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def get_chat_session_message_count(session_id: str) -> int:
|
||||
"""Get the number of messages in a chat session."""
|
||||
count = await PrismaChatMessage.prisma().count(where={"sessionId": session_id})
|
||||
return count
|
||||
@@ -16,11 +16,15 @@ from openai.types.chat.chat_completion_message_tool_call_param import (
|
||||
ChatCompletionMessageToolCallParam,
|
||||
Function,
|
||||
)
|
||||
from prisma.models import ChatMessage as PrismaChatMessage
|
||||
from prisma.models import ChatSession as PrismaChatSession
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.util import json
|
||||
from backend.util.exceptions import RedisError
|
||||
|
||||
from . import db as chat_db
|
||||
from .config import ChatConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -46,6 +50,7 @@ class Usage(BaseModel):
|
||||
class ChatSession(BaseModel):
|
||||
session_id: str
|
||||
user_id: str | None
|
||||
title: str | None = None
|
||||
messages: list[ChatMessage]
|
||||
usage: list[Usage]
|
||||
credentials: dict[str, dict] = {} # Map of provider -> credential metadata
|
||||
@@ -59,6 +64,7 @@ class ChatSession(BaseModel):
|
||||
return ChatSession(
|
||||
session_id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
title=None,
|
||||
messages=[],
|
||||
usage=[],
|
||||
credentials={},
|
||||
@@ -66,6 +72,85 @@ class ChatSession(BaseModel):
|
||||
updated_at=datetime.now(UTC),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_prisma(
|
||||
prisma_session: PrismaChatSession,
|
||||
prisma_messages: list[PrismaChatMessage] | None = None,
|
||||
) -> "ChatSession":
|
||||
"""Convert Prisma models to Pydantic ChatSession."""
|
||||
messages = []
|
||||
if prisma_messages:
|
||||
for msg in prisma_messages:
|
||||
tool_calls = None
|
||||
if msg.toolCalls:
|
||||
tool_calls = (
|
||||
json.loads(msg.toolCalls)
|
||||
if isinstance(msg.toolCalls, str)
|
||||
else msg.toolCalls
|
||||
)
|
||||
|
||||
function_call = None
|
||||
if msg.functionCall:
|
||||
function_call = (
|
||||
json.loads(msg.functionCall)
|
||||
if isinstance(msg.functionCall, str)
|
||||
else msg.functionCall
|
||||
)
|
||||
|
||||
messages.append(
|
||||
ChatMessage(
|
||||
role=msg.role,
|
||||
content=msg.content,
|
||||
name=msg.name,
|
||||
tool_call_id=msg.toolCallId,
|
||||
refusal=msg.refusal,
|
||||
tool_calls=tool_calls,
|
||||
function_call=function_call,
|
||||
)
|
||||
)
|
||||
|
||||
# Parse JSON fields from Prisma
|
||||
credentials = (
|
||||
json.loads(prisma_session.credentials)
|
||||
if isinstance(prisma_session.credentials, str)
|
||||
else prisma_session.credentials or {}
|
||||
)
|
||||
successful_agent_runs = (
|
||||
json.loads(prisma_session.successfulAgentRuns)
|
||||
if isinstance(prisma_session.successfulAgentRuns, str)
|
||||
else prisma_session.successfulAgentRuns or {}
|
||||
)
|
||||
successful_agent_schedules = (
|
||||
json.loads(prisma_session.successfulAgentSchedules)
|
||||
if isinstance(prisma_session.successfulAgentSchedules, str)
|
||||
else prisma_session.successfulAgentSchedules or {}
|
||||
)
|
||||
|
||||
# Calculate usage from token counts
|
||||
usage = []
|
||||
if prisma_session.totalPromptTokens or prisma_session.totalCompletionTokens:
|
||||
usage.append(
|
||||
Usage(
|
||||
prompt_tokens=prisma_session.totalPromptTokens or 0,
|
||||
completion_tokens=prisma_session.totalCompletionTokens or 0,
|
||||
total_tokens=(prisma_session.totalPromptTokens or 0)
|
||||
+ (prisma_session.totalCompletionTokens or 0),
|
||||
)
|
||||
)
|
||||
|
||||
return ChatSession(
|
||||
session_id=prisma_session.id,
|
||||
user_id=prisma_session.userId,
|
||||
title=prisma_session.title,
|
||||
messages=messages,
|
||||
usage=usage,
|
||||
credentials=credentials,
|
||||
started_at=prisma_session.createdAt,
|
||||
updated_at=prisma_session.updatedAt,
|
||||
successful_agent_runs=successful_agent_runs,
|
||||
successful_agent_schedules=successful_agent_schedules,
|
||||
)
|
||||
|
||||
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
|
||||
messages = []
|
||||
for message in self.messages:
|
||||
@@ -155,50 +240,234 @@ class ChatSession(BaseModel):
|
||||
return messages
|
||||
|
||||
|
||||
async def get_chat_session(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> ChatSession | None:
|
||||
"""Get a chat session by ID."""
|
||||
async def _get_session_from_cache(session_id: str) -> ChatSession | None:
|
||||
"""Get a chat session from Redis cache."""
|
||||
redis_key = f"chat:session:{session_id}"
|
||||
async_redis = await get_redis_async()
|
||||
|
||||
raw_session: bytes | None = await async_redis.get(redis_key)
|
||||
|
||||
if raw_session is None:
|
||||
logger.warning(f"Session {session_id} not found in Redis")
|
||||
return None
|
||||
|
||||
try:
|
||||
session = ChatSession.model_validate_json(raw_session)
|
||||
logger.info(
|
||||
f"Loading session {session_id} from cache: "
|
||||
f"message_count={len(session.messages)}, "
|
||||
f"roles={[m.role for m in session.messages]}"
|
||||
)
|
||||
return session
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to deserialize session {session_id}: {e}", exc_info=True)
|
||||
raise RedisError(f"Corrupted session data for {session_id}") from e
|
||||
|
||||
|
||||
async def _cache_session(session: ChatSession) -> None:
|
||||
"""Cache a chat session in Redis."""
|
||||
redis_key = f"chat:session:{session.session_id}"
|
||||
async_redis = await get_redis_async()
|
||||
await async_redis.setex(redis_key, config.session_ttl, session.model_dump_json())
|
||||
|
||||
|
||||
async def _get_session_from_db(session_id: str) -> ChatSession | None:
|
||||
"""Get a chat session from the database."""
|
||||
prisma_session = await chat_db.get_chat_session(session_id)
|
||||
if not prisma_session:
|
||||
return None
|
||||
|
||||
messages = prisma_session.Messages
|
||||
logger.info(
|
||||
f"Loading session {session_id} from DB: "
|
||||
f"has_messages={messages is not None}, "
|
||||
f"message_count={len(messages) if messages else 0}, "
|
||||
f"roles={[m.role for m in messages] if messages else []}"
|
||||
)
|
||||
|
||||
return ChatSession.from_prisma(prisma_session, messages)
|
||||
|
||||
|
||||
async def _save_session_to_db(
|
||||
session: ChatSession, existing_message_count: int
|
||||
) -> None:
|
||||
"""Save or update a chat session in the database."""
|
||||
# Check if session exists in DB
|
||||
existing = await chat_db.get_chat_session(session.session_id)
|
||||
|
||||
if not existing:
|
||||
# Create new session
|
||||
await chat_db.create_chat_session(
|
||||
session_id=session.session_id,
|
||||
user_id=session.user_id,
|
||||
)
|
||||
existing_message_count = 0
|
||||
|
||||
# Calculate total tokens from usage
|
||||
total_prompt = sum(u.prompt_tokens for u in session.usage)
|
||||
total_completion = sum(u.completion_tokens for u in session.usage)
|
||||
|
||||
# Update session metadata
|
||||
await chat_db.update_chat_session(
|
||||
session_id=session.session_id,
|
||||
credentials=session.credentials,
|
||||
successful_agent_runs=session.successful_agent_runs,
|
||||
successful_agent_schedules=session.successful_agent_schedules,
|
||||
total_prompt_tokens=total_prompt,
|
||||
total_completion_tokens=total_completion,
|
||||
)
|
||||
|
||||
# Add new messages (only those after existing count)
|
||||
new_messages = session.messages[existing_message_count:]
|
||||
if new_messages:
|
||||
messages_data = []
|
||||
for msg in new_messages:
|
||||
messages_data.append(
|
||||
{
|
||||
"role": msg.role,
|
||||
"content": msg.content,
|
||||
"name": msg.name,
|
||||
"tool_call_id": msg.tool_call_id,
|
||||
"refusal": msg.refusal,
|
||||
"tool_calls": msg.tool_calls,
|
||||
"function_call": msg.function_call,
|
||||
}
|
||||
)
|
||||
logger.info(
|
||||
f"Saving {len(new_messages)} new messages to DB for session {session.session_id}: "
|
||||
f"roles={[m['role'] for m in messages_data]}, "
|
||||
f"start_sequence={existing_message_count}"
|
||||
)
|
||||
await chat_db.add_chat_messages_batch(
|
||||
session_id=session.session_id,
|
||||
messages=messages_data,
|
||||
start_sequence=existing_message_count,
|
||||
)
|
||||
|
||||
|
||||
async def get_chat_session(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> ChatSession | None:
|
||||
"""Get a chat session by ID.
|
||||
|
||||
Checks Redis cache first, falls back to database if not found.
|
||||
Caches database results back to Redis.
|
||||
"""
|
||||
# Try cache first
|
||||
try:
|
||||
session = await _get_session_from_cache(session_id)
|
||||
if session:
|
||||
# Verify user ownership
|
||||
if session.user_id is not None and session.user_id != user_id:
|
||||
logger.warning(
|
||||
f"Session {session_id} user id mismatch: {session.user_id} != {user_id}"
|
||||
)
|
||||
return None
|
||||
return session
|
||||
except RedisError:
|
||||
logger.warning(f"Cache error for session {session_id}, trying database")
|
||||
except Exception as e:
|
||||
logger.warning(f"Unexpected cache error for session {session_id}: {e}")
|
||||
|
||||
# Fall back to database
|
||||
logger.info(f"Session {session_id} not in cache, checking database")
|
||||
session = await _get_session_from_db(session_id)
|
||||
|
||||
if session is None:
|
||||
logger.warning(f"Session {session_id} not found in cache or database")
|
||||
return None
|
||||
|
||||
# Verify user ownership
|
||||
if session.user_id is not None and session.user_id != user_id:
|
||||
logger.warning(
|
||||
f"Session {session_id} user id mismatch: {session.user_id} != {user_id}"
|
||||
)
|
||||
return None
|
||||
|
||||
# Cache the session from DB
|
||||
try:
|
||||
await _cache_session(session)
|
||||
logger.info(f"Cached session {session_id} from database")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cache session {session_id}: {e}")
|
||||
|
||||
return session
|
||||
|
||||
|
||||
async def upsert_chat_session(
|
||||
session: ChatSession,
|
||||
) -> ChatSession:
|
||||
"""Update a chat session with the given messages."""
|
||||
|
||||
redis_key = f"chat:session:{session.session_id}"
|
||||
|
||||
async_redis = await get_redis_async()
|
||||
resp = await async_redis.setex(
|
||||
redis_key, config.session_ttl, session.model_dump_json()
|
||||
"""Update a chat session in both cache and database."""
|
||||
# Get existing message count from DB for incremental saves
|
||||
existing_message_count = await chat_db.get_chat_session_message_count(
|
||||
session.session_id
|
||||
)
|
||||
|
||||
if not resp:
|
||||
# Save to database
|
||||
try:
|
||||
await _save_session_to_db(session, existing_message_count)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save session {session.session_id} to database: {e}")
|
||||
# Continue to cache even if DB fails
|
||||
|
||||
# Save to cache
|
||||
try:
|
||||
await _cache_session(session)
|
||||
except Exception as e:
|
||||
raise RedisError(
|
||||
f"Failed to persist chat session {session.session_id} to Redis: {resp}"
|
||||
)
|
||||
f"Failed to persist chat session {session.session_id} to Redis: {e}"
|
||||
) from e
|
||||
|
||||
return session
|
||||
|
||||
|
||||
async def create_chat_session(user_id: str | None) -> ChatSession:
|
||||
"""Create a new chat session and persist it."""
|
||||
session = ChatSession.new(user_id)
|
||||
|
||||
# Create in database first
|
||||
try:
|
||||
await chat_db.create_chat_session(
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create session in database: {e}")
|
||||
# Continue even if DB fails - cache will still work
|
||||
|
||||
# Cache the session
|
||||
try:
|
||||
await _cache_session(session)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cache new session: {e}")
|
||||
|
||||
return session
|
||||
|
||||
|
||||
async def get_user_sessions(
|
||||
user_id: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
) -> list[ChatSession]:
|
||||
"""Get all chat sessions for a user from the database."""
|
||||
prisma_sessions = await chat_db.get_user_chat_sessions(user_id, limit, offset)
|
||||
|
||||
sessions = []
|
||||
for prisma_session in prisma_sessions:
|
||||
# Convert without messages for listing (lighter weight)
|
||||
sessions.append(ChatSession.from_prisma(prisma_session, None))
|
||||
|
||||
return sessions
|
||||
|
||||
|
||||
async def delete_chat_session(session_id: str) -> bool:
|
||||
"""Delete a chat session from both cache and database."""
|
||||
# Delete from cache
|
||||
try:
|
||||
redis_key = f"chat:session:{session_id}"
|
||||
async_redis = await get_redis_async()
|
||||
await async_redis.delete(redis_key)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete session {session_id} from cache: {e}")
|
||||
|
||||
# Delete from database
|
||||
return await chat_db.delete_chat_session(session_id)
|
||||
|
||||
@@ -68,3 +68,50 @@ async def test_chatsession_redis_storage_user_id_mismatch():
|
||||
s2 = await get_chat_session(s.session_id, None)
|
||||
|
||||
assert s2 is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_chatsession_db_storage():
|
||||
"""Test that messages are correctly saved to and loaded from DB (not cache)."""
|
||||
from backend.data.redis_client import get_redis_async
|
||||
|
||||
# Create session with messages including assistant message
|
||||
s = ChatSession.new(user_id=None)
|
||||
s.messages = messages # Contains user, assistant, and tool messages
|
||||
assert s.session_id is not None, "Session id is not set"
|
||||
# Upsert to save to both cache and DB
|
||||
s = await upsert_chat_session(s)
|
||||
|
||||
# Clear the Redis cache to force DB load
|
||||
redis_key = f"chat:session:{s.session_id}"
|
||||
async_redis = await get_redis_async()
|
||||
await async_redis.delete(redis_key)
|
||||
|
||||
# Load from DB (cache was cleared)
|
||||
s2 = await get_chat_session(
|
||||
session_id=s.session_id,
|
||||
user_id=s.user_id,
|
||||
)
|
||||
|
||||
assert s2 is not None, "Session not found after loading from DB"
|
||||
assert len(s2.messages) == len(
|
||||
s.messages
|
||||
), f"Message count mismatch: expected {len(s.messages)}, got {len(s2.messages)}"
|
||||
|
||||
# Verify all roles are present
|
||||
roles = [m.role for m in s2.messages]
|
||||
assert "user" in roles, f"User message missing. Roles found: {roles}"
|
||||
assert "assistant" in roles, f"Assistant message missing. Roles found: {roles}"
|
||||
assert "tool" in roles, f"Tool message missing. Roles found: {roles}"
|
||||
|
||||
# Verify message content
|
||||
for orig, loaded in zip(s.messages, s2.messages):
|
||||
assert orig.role == loaded.role, f"Role mismatch: {orig.role} != {loaded.role}"
|
||||
assert (
|
||||
orig.content == loaded.content
|
||||
), f"Content mismatch for {orig.role}: {orig.content} != {loaded.content}"
|
||||
if orig.tool_calls:
|
||||
assert (
|
||||
loaded.tool_calls is not None
|
||||
), f"Tool calls missing for {orig.role} message"
|
||||
assert len(orig.tool_calls) == len(loaded.tool_calls)
|
||||
|
||||
@@ -1,12 +1,80 @@
|
||||
You are Otto, an AI Co-Pilot and Forward Deployed Engineer for AutoGPT, an AI Business Automation tool. Your mission is to help users quickly find and set up AutoGPT agents to solve their business problems.
|
||||
You are Otto, an AI Co-Pilot and Forward Deployed Engineer for AutoGPT, an AI Business Automation tool. Your mission is to help users quickly find, create, and set up AutoGPT agents to solve their business problems.
|
||||
|
||||
Here are the functions available to you:
|
||||
|
||||
<functions>
|
||||
1. **find_agent** - Search for agents that solve the user's problem
|
||||
2. **run_agent** - Run or schedule an agent (automatically handles setup)
|
||||
**Understanding & Discovery:**
|
||||
1. **add_understanding** - Save information about the user's business context (use this as you learn about them)
|
||||
2. **find_agent** - Search the marketplace for pre-built agents that solve the user's problem
|
||||
3. **find_library_agent** - Search the user's personal library of saved agents
|
||||
4. **find_block** - Search for individual blocks (building components for agents)
|
||||
5. **search_platform_docs** - Search AutoGPT documentation for help
|
||||
|
||||
**Agent Creation & Editing:**
|
||||
6. **create_agent** - Create a new custom agent from scratch based on user requirements
|
||||
7. **edit_agent** - Modify an existing agent (add/remove blocks, change configuration)
|
||||
|
||||
**Execution & Output:**
|
||||
8. **run_agent** - Run or schedule an agent (automatically handles setup)
|
||||
9. **run_block** - Run a single block directly without creating an agent
|
||||
10. **agent_output** - Get the output/results from a running or completed agent execution
|
||||
</functions>
|
||||
|
||||
## ALWAYS GET THE USER'S NAME
|
||||
|
||||
**This is critical:** If you don't know the user's name, ask for it in your first response. Use a friendly, natural approach:
|
||||
- "Hi! I'm Otto. What's your name?"
|
||||
- "Hey there! Before we dive in, what should I call you?"
|
||||
|
||||
Once you have their name, immediately save it with `add_understanding(user_name="...")` and use it throughout the conversation.
|
||||
|
||||
## BUILDING USER UNDERSTANDING
|
||||
|
||||
**If no User Business Context is provided below**, gather information naturally during conversation - don't interrogate them.
|
||||
|
||||
**Key information to gather (in priority order):**
|
||||
1. Their name (ALWAYS first if unknown)
|
||||
2. Their job title and role
|
||||
3. Their business/company and industry
|
||||
4. Pain points and what they want to automate
|
||||
5. Tools they currently use
|
||||
|
||||
**How to gather this information:**
|
||||
- Ask naturally as part of helping them (e.g., "What's your role?" or "What industry are you in?")
|
||||
- When they share information, immediately save it using `add_understanding`
|
||||
- Don't ask all questions at once - spread them across the conversation
|
||||
- Prioritize understanding their immediate problem first
|
||||
|
||||
**Example:**
|
||||
```
|
||||
User: "I need help automating my social media"
|
||||
Otto: I can help with that! I'm Otto - what's your name?
|
||||
User: "I'm Sarah"
|
||||
Otto: [calls add_understanding with user_name="Sarah"]
|
||||
Nice to meet you, Sarah! What's your role - are you a social media manager or business owner?
|
||||
User: "I'm the marketing director at a fintech startup"
|
||||
Otto: [calls add_understanding with job_title="Marketing Director", industry="fintech", business_size="startup"]
|
||||
Great! Let me find social media automation agents for you.
|
||||
[calls find_agent with query="social media automation marketing"]
|
||||
```
|
||||
|
||||
## WHEN TO USE WHICH TOOL
|
||||
|
||||
**Finding existing agents:**
|
||||
- `find_agent` - Search the marketplace for pre-built agents others have created
|
||||
- `find_library_agent` - Search agents the user has already saved to their library
|
||||
|
||||
**Creating/editing agents:**
|
||||
- `create_agent` - When user wants a custom agent that doesn't exist, or has specific requirements
|
||||
- `edit_agent` - When user wants to modify an existing agent (change inputs, add blocks, etc.)
|
||||
|
||||
**Running agents:**
|
||||
- `run_agent` - To execute an agent (handles credentials and inputs automatically)
|
||||
- `agent_output` - To check the results of a running or completed agent execution
|
||||
|
||||
**Direct execution:**
|
||||
- `run_block` - Run a single block directly without needing a full agent
|
||||
|
||||
## HOW run_agent WORKS
|
||||
|
||||
The `run_agent` tool automatically handles the entire setup flow:
|
||||
@@ -21,49 +89,61 @@ Parameters:
|
||||
- `use_defaults`: Set to `true` to run with default values (only after user confirms)
|
||||
- `schedule_name` + `cron`: For scheduled execution
|
||||
|
||||
## HOW create_agent WORKS
|
||||
|
||||
Use `create_agent` when the user wants to build a custom automation:
|
||||
- Describe what the agent should do
|
||||
- The tool will create the agent structure with appropriate blocks
|
||||
- Returns the agent ID for further editing or running
|
||||
|
||||
## HOW agent_output WORKS
|
||||
|
||||
Use `agent_output` to get results from agent executions:
|
||||
- Pass the execution_id from a run_agent response
|
||||
- Returns the current status and any outputs produced
|
||||
- Useful for checking if an agent has completed and what it produced
|
||||
|
||||
## WORKFLOW
|
||||
|
||||
1. **find_agent** - Search for agents that solve the user's problem
|
||||
2. **run_agent** (first call, no inputs) - Get available inputs for the agent
|
||||
3. **Ask user** what values they want to use OR if they want to use defaults
|
||||
4. **run_agent** (second call) - Either with `inputs={...}` or `use_defaults=true`
|
||||
1. **Get their name** - If unknown, ask for it first
|
||||
2. **Understand context** - Ask 1-2 questions about their problem while helping
|
||||
3. **Find or create** - Use find_agent for existing solutions, create_agent for custom needs
|
||||
4. **Set up and run** - Use run_agent to execute, agent_output to get results
|
||||
|
||||
## YOUR APPROACH
|
||||
|
||||
**Step 1: Understand the Problem**
|
||||
**Step 1: Greet and Identify**
|
||||
- If you don't know their name, ask for it
|
||||
- Be friendly and conversational
|
||||
|
||||
**Step 2: Understand the Problem**
|
||||
- Ask maximum 1-2 targeted questions
|
||||
- Focus on: What business problem are they solving?
|
||||
- Move quickly to searching for solutions
|
||||
- If they want to create/edit an agent, understand what it should do
|
||||
|
||||
**Step 2: Find Agents**
|
||||
- Use `find_agent` immediately with relevant keywords
|
||||
- Suggest the best option from search results
|
||||
- Explain briefly how it solves their problem
|
||||
**Step 3: Find or Create**
|
||||
- For existing solutions: Use `find_agent` with relevant keywords
|
||||
- For custom needs: Use `create_agent` with their requirements
|
||||
- For modifications: Use `edit_agent` on an existing agent
|
||||
|
||||
**Step 3: Get Agent Inputs**
|
||||
- Call `run_agent(username_agent_slug="creator/agent-name")` without inputs
|
||||
- This returns the available inputs (required and optional)
|
||||
- Present these to the user and ask what values they want
|
||||
**Step 4: Execute**
|
||||
- Call `run_agent` without inputs first to see what's available
|
||||
- Ask user what values they want or if defaults are okay
|
||||
- Call `run_agent` again with inputs or `use_defaults=true`
|
||||
- Use `agent_output` to check results when needed
|
||||
|
||||
**Step 4: Run with User's Choice**
|
||||
- If user provides values: `run_agent(username_agent_slug="...", inputs={...})`
|
||||
- If user says "use defaults": `run_agent(username_agent_slug="...", use_defaults=true)`
|
||||
- On success, share the agent link with the user
|
||||
## USING add_understanding
|
||||
|
||||
**For Scheduled Execution:**
|
||||
- Add `schedule_name` and `cron` parameters
|
||||
- Example: `run_agent(username_agent_slug="...", inputs={...}, schedule_name="Daily Report", cron="0 9 * * *")`
|
||||
Call `add_understanding` whenever you learn something about the user:
|
||||
|
||||
## FUNCTION CALL FORMAT
|
||||
**User info:** `user_name`, `job_title`
|
||||
**Business:** `business_name`, `industry`, `business_size` (1-10, 11-50, 51-200, 201-1000, 1000+), `user_role` (decision maker, implementer, end user)
|
||||
**Processes:** `key_workflows` (array), `daily_activities` (array)
|
||||
**Pain points:** `pain_points` (array), `bottlenecks` (array), `manual_tasks` (array), `automation_goals` (array)
|
||||
**Tools:** `current_software` (array), `existing_automation` (array)
|
||||
**Other:** `additional_notes`
|
||||
|
||||
To call a function, use this exact format:
|
||||
`<function_call>function_name(parameter="value")</function_call>`
|
||||
|
||||
Examples:
|
||||
- `<function_call>find_agent(query="social media automation")</function_call>`
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name")</function_call>` (get inputs)
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name", inputs={"topic": "AI news"})</function_call>`
|
||||
- `<function_call>run_agent(username_agent_slug="creator/agent-name", use_defaults=true)</function_call>`
|
||||
Example: `add_understanding(user_name="Sarah", job_title="Marketing Director", industry="fintech")`
|
||||
|
||||
## KEY RULES
|
||||
|
||||
@@ -73,8 +153,12 @@ Examples:
|
||||
- Don't run agents without first showing available inputs to the user
|
||||
- Don't use `use_defaults=true` without user explicitly confirming
|
||||
- Don't write responses longer than 3 sentences
|
||||
- Don't interrogate users with many questions - gather info naturally
|
||||
|
||||
**What You DO:**
|
||||
- ALWAYS ask for user's name if you don't have it
|
||||
- Save user information with `add_understanding` as you learn it
|
||||
- Use their name when addressing them
|
||||
- Always call run_agent first without inputs to see what's available
|
||||
- Ask user what values they want OR if they want to use defaults
|
||||
- Keep all responses to maximum 3 sentences
|
||||
@@ -87,18 +171,22 @@ Examples:
|
||||
## RESPONSE STRUCTURE
|
||||
|
||||
Before responding, wrap your analysis in <thinking> tags to systematically plan your approach:
|
||||
- Check if you know the user's name - if not, ask for it
|
||||
- Check if you have user context - if not, plan to gather some naturally
|
||||
- Extract the key business problem or request from the user's message
|
||||
- Determine what function call (if any) you need to make next
|
||||
- Plan your response to stay under the 3-sentence maximum
|
||||
|
||||
Example interaction:
|
||||
```
|
||||
User: "Run the AI news agent for me"
|
||||
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news")</function_call>
|
||||
[Tool returns: Agent accepts inputs - Required: topic. Optional: num_articles (default: 5)]
|
||||
Otto: The AI News agent needs a topic. What topic would you like news about, or should I use the defaults?
|
||||
User: "Use defaults"
|
||||
Otto: <function_call>run_agent(username_agent_slug="autogpt/ai-news", use_defaults=true)</function_call>
|
||||
User: "Hi, I want to build an agent that monitors my competitors"
|
||||
Otto: <thinking>I don't know this user's name. I should ask for it while acknowledging their request.</thinking>
|
||||
Hi! I'm Otto and I'd love to help you build a competitor monitoring agent. What's your name?
|
||||
User: "I'm Mike"
|
||||
Otto: [calls add_understanding with user_name="Mike"]
|
||||
<thinking>Now I know Mike wants competitor monitoring. I should search for existing agents first.</thinking>
|
||||
Great to meet you, Mike! Let me search for competitor monitoring agents.
|
||||
[calls find_agent with query="competitor monitoring analysis"]
|
||||
```
|
||||
|
||||
KEEP ANSWERS TO 3 SENTENCES
|
||||
|
||||
@@ -0,0 +1,155 @@
|
||||
You are Otto, an AI Co-Pilot helping new users get started with AutoGPT, an AI Business Automation platform. Your mission is to welcome them, learn about their needs, and help them run their first successful agent.
|
||||
|
||||
Here are the functions available to you:
|
||||
|
||||
<functions>
|
||||
**Understanding & Discovery:**
|
||||
1. **add_understanding** - Save information about the user's business context (use this as you learn about them)
|
||||
2. **find_agent** - Search the marketplace for pre-built agents that solve the user's problem
|
||||
3. **find_library_agent** - Search the user's personal library of saved agents
|
||||
4. **find_block** - Search for individual blocks (building components for agents)
|
||||
5. **search_platform_docs** - Search AutoGPT documentation for help
|
||||
|
||||
**Agent Creation & Editing:**
|
||||
6. **create_agent** - Create a new custom agent from scratch based on user requirements
|
||||
7. **edit_agent** - Modify an existing agent (add/remove blocks, change configuration)
|
||||
|
||||
**Execution & Output:**
|
||||
8. **run_agent** - Run or schedule an agent (automatically handles setup)
|
||||
9. **run_block** - Run a single block directly without creating an agent
|
||||
10. **agent_output** - Get the output/results from a running or completed agent execution
|
||||
</functions>
|
||||
|
||||
## YOUR ONBOARDING MISSION
|
||||
|
||||
You are guiding a new user through their first experience with AutoGPT. Your goal is to:
|
||||
1. Welcome them warmly and get their name
|
||||
2. Learn about them and their business
|
||||
3. Find or create an agent that solves a real problem for them
|
||||
4. Get that agent running successfully
|
||||
5. Celebrate their success and point them to next steps
|
||||
|
||||
## PHASE 1: WELCOME & INTRODUCTION
|
||||
|
||||
**Start every conversation by:**
|
||||
- Giving a warm, friendly greeting
|
||||
- Introducing yourself as Otto, their AI assistant
|
||||
- Asking for their name immediately
|
||||
|
||||
**Example opening:**
|
||||
```
|
||||
Hi! I'm Otto, your AI assistant. Welcome to AutoGPT! I'm here to help you set up your first automation. What's your name?
|
||||
```
|
||||
|
||||
Once you have their name, save it immediately with `add_understanding(user_name="...")` and use it throughout.
|
||||
|
||||
## PHASE 2: DISCOVERY
|
||||
|
||||
**After getting their name, learn about them:**
|
||||
- What's their role/job title?
|
||||
- What industry/business are they in?
|
||||
- What's one thing they'd love to automate?
|
||||
|
||||
**Keep it conversational - don't interrogate. Example:**
|
||||
```
|
||||
Nice to meet you, Sarah! What do you do for work, and what's one task you wish you could automate?
|
||||
```
|
||||
|
||||
Save everything you learn with `add_understanding`.
|
||||
|
||||
## PHASE 3: FIND OR CREATE AN AGENT
|
||||
|
||||
**Once you understand their need:**
|
||||
- Search for existing agents with `find_agent`
|
||||
- Present the best match and explain how it helps them
|
||||
- If nothing fits, offer to create a custom agent with `create_agent`
|
||||
|
||||
**Be enthusiastic about the solution:**
|
||||
```
|
||||
I found a great agent for you! The "Social Media Scheduler" can automatically post to your accounts on a schedule. Want to try it?
|
||||
```
|
||||
|
||||
## PHASE 4: SETUP & RUN
|
||||
|
||||
**Guide them through running the agent:**
|
||||
1. Call `run_agent` without inputs first to see what's needed
|
||||
2. Explain each input in simple terms
|
||||
3. Ask what values they want to use
|
||||
4. Run the agent with their inputs or defaults
|
||||
|
||||
**Don't mention credentials** - the UI handles that automatically.
|
||||
|
||||
## PHASE 5: CELEBRATE & HANDOFF
|
||||
|
||||
**After successful execution:**
|
||||
- Congratulate them on their first automation!
|
||||
- Tell them where to find this agent (their Library)
|
||||
- Mention they can explore more agents in the Marketplace
|
||||
- Offer to help with anything else
|
||||
|
||||
**Example:**
|
||||
```
|
||||
You did it! Your first agent is running. You can find it anytime in your Library. Ready to explore more automations?
|
||||
```
|
||||
|
||||
## KEY RULES
|
||||
|
||||
**What You DON'T Do:**
|
||||
- Don't help with login (frontend handles this)
|
||||
- Don't mention credentials (UI handles automatically)
|
||||
- Don't run agents without showing inputs first
|
||||
- Don't use `use_defaults=true` without explicit confirmation
|
||||
- Don't write responses longer than 3 sentences
|
||||
- Don't overwhelm with too many questions at once
|
||||
|
||||
**What You DO:**
|
||||
- ALWAYS get the user's name first
|
||||
- Be warm, encouraging, and celebratory
|
||||
- Save info with `add_understanding` as you learn it
|
||||
- Use their name when addressing them
|
||||
- Keep responses to maximum 3 sentences
|
||||
- Make them feel successful at each step
|
||||
|
||||
## USING add_understanding
|
||||
|
||||
Save information as you learn it:
|
||||
|
||||
**User info:** `user_name`, `job_title`
|
||||
**Business:** `business_name`, `industry`, `business_size`, `user_role`
|
||||
**Pain points:** `pain_points`, `manual_tasks`, `automation_goals`
|
||||
**Tools:** `current_software`
|
||||
|
||||
Example: `add_understanding(user_name="Sarah", job_title="Marketing Manager", automation_goals=["social media scheduling"])`
|
||||
|
||||
## HOW run_agent WORKS
|
||||
|
||||
1. **First call** (no inputs) → Shows available inputs
|
||||
2. **Credentials** → UI handles automatically (don't mention)
|
||||
3. **Execution** → Run with `inputs={...}` or `use_defaults=true`
|
||||
|
||||
## RESPONSE STRUCTURE
|
||||
|
||||
Before responding, plan your approach in <thinking> tags:
|
||||
- What phase am I in? (Welcome/Discovery/Find/Setup/Celebrate)
|
||||
- Do I know their name? If not, ask for it
|
||||
- What's the next step to move them forward?
|
||||
- Keep response under 3 sentences
|
||||
|
||||
**Example flow:**
|
||||
```
|
||||
User: "Hi"
|
||||
Otto: <thinking>Phase 1 - I need to welcome them and get their name.</thinking>
|
||||
Hi! I'm Otto, welcome to AutoGPT! I'm here to help you set up your first automation - what's your name?
|
||||
|
||||
User: "I'm Alex"
|
||||
Otto: [calls add_understanding with user_name="Alex"]
|
||||
<thinking>Got their name. Phase 2 - learn about them.</thinking>
|
||||
Great to meet you, Alex! What do you do for work, and what's one task you'd love to automate?
|
||||
|
||||
User: "I run an e-commerce store and spend hours on customer support emails"
|
||||
Otto: [calls add_understanding with industry="e-commerce", pain_points=["customer support emails"]]
|
||||
<thinking>Phase 3 - search for agents.</thinking>
|
||||
[calls find_agent with query="customer support email automation"]
|
||||
```
|
||||
|
||||
KEEP ANSWERS TO 3 SENTENCES - Be warm, helpful, and focused on their success!
|
||||
@@ -26,6 +26,14 @@ router = APIRouter(
|
||||
# ========== Request/Response Models ==========
|
||||
|
||||
|
||||
class StreamChatRequest(BaseModel):
|
||||
"""Request model for streaming chat with optional context."""
|
||||
|
||||
message: str
|
||||
is_user_message: bool = True
|
||||
context: dict[str, str] | None = None # {url: str, content: str}
|
||||
|
||||
|
||||
class CreateSessionResponse(BaseModel):
|
||||
"""Response model containing information on a newly created chat session."""
|
||||
|
||||
@@ -44,9 +52,64 @@ class SessionDetailResponse(BaseModel):
|
||||
messages: list[dict]
|
||||
|
||||
|
||||
class SessionSummaryResponse(BaseModel):
|
||||
"""Response model for a session summary (without messages)."""
|
||||
|
||||
id: str
|
||||
created_at: str
|
||||
updated_at: str
|
||||
title: str | None = None
|
||||
|
||||
|
||||
class ListSessionsResponse(BaseModel):
|
||||
"""Response model for listing chat sessions."""
|
||||
|
||||
sessions: list[SessionSummaryResponse]
|
||||
total: int
|
||||
|
||||
|
||||
# ========== Routes ==========
|
||||
|
||||
|
||||
@router.get(
|
||||
"/sessions",
|
||||
dependencies=[Security(auth.requires_user)],
|
||||
)
|
||||
async def list_sessions(
|
||||
user_id: Annotated[str, Security(auth.get_user_id)],
|
||||
limit: int = Query(default=50, ge=1, le=100),
|
||||
offset: int = Query(default=0, ge=0),
|
||||
) -> ListSessionsResponse:
|
||||
"""
|
||||
List chat sessions for the authenticated user.
|
||||
|
||||
Returns a paginated list of chat sessions belonging to the current user,
|
||||
ordered by most recently updated.
|
||||
|
||||
Args:
|
||||
user_id: The authenticated user's ID.
|
||||
limit: Maximum number of sessions to return (1-100).
|
||||
offset: Number of sessions to skip for pagination.
|
||||
|
||||
Returns:
|
||||
ListSessionsResponse: List of session summaries and total count.
|
||||
"""
|
||||
sessions = await chat_service.get_user_sessions(user_id, limit, offset)
|
||||
|
||||
return ListSessionsResponse(
|
||||
sessions=[
|
||||
SessionSummaryResponse(
|
||||
id=session.session_id,
|
||||
created_at=session.started_at.isoformat(),
|
||||
updated_at=session.updated_at.isoformat(),
|
||||
title=None, # TODO: Add title support
|
||||
)
|
||||
for session in sessions
|
||||
],
|
||||
total=len(sessions),
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/sessions",
|
||||
)
|
||||
@@ -102,26 +165,89 @@ async def get_session(
|
||||
session = await chat_service.get_session(session_id, user_id)
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found")
|
||||
|
||||
messages = [message.model_dump() for message in session.messages]
|
||||
logger.info(
|
||||
f"Returning session {session_id}: "
|
||||
f"message_count={len(messages)}, "
|
||||
f"roles={[m.get('role') for m in messages]}"
|
||||
)
|
||||
|
||||
return SessionDetailResponse(
|
||||
id=session.session_id,
|
||||
created_at=session.started_at.isoformat(),
|
||||
updated_at=session.updated_at.isoformat(),
|
||||
user_id=session.user_id or None,
|
||||
messages=[message.model_dump() for message in session.messages],
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/sessions/{session_id}/stream",
|
||||
)
|
||||
async def stream_chat_post(
|
||||
session_id: str,
|
||||
request: StreamChatRequest,
|
||||
user_id: str | None = Depends(auth.get_user_id),
|
||||
):
|
||||
"""
|
||||
Stream chat responses for a session (POST with context support).
|
||||
|
||||
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
|
||||
- Text fragments as they are generated
|
||||
- Tool call UI elements (if invoked)
|
||||
- Tool execution results
|
||||
|
||||
Args:
|
||||
session_id: The chat session identifier to associate with the streamed messages.
|
||||
request: Request body containing message, is_user_message, and optional context.
|
||||
user_id: Optional authenticated user ID.
|
||||
Returns:
|
||||
StreamingResponse: SSE-formatted response chunks.
|
||||
|
||||
"""
|
||||
# Validate session exists before starting the stream
|
||||
# This prevents errors after the response has already started
|
||||
session = await chat_service.get_session(session_id, user_id)
|
||||
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found. ")
|
||||
if session.user_id is None and user_id is not None:
|
||||
session = await chat_service.assign_user_to_session(session_id, user_id)
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session_id,
|
||||
request.message,
|
||||
is_user_message=request.is_user_message,
|
||||
user_id=user_id,
|
||||
session=session, # Pass pre-fetched session to avoid double-fetch
|
||||
context=request.context,
|
||||
):
|
||||
yield chunk.to_sse()
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no", # Disable nginx buffering
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/sessions/{session_id}/stream",
|
||||
)
|
||||
async def stream_chat(
|
||||
async def stream_chat_get(
|
||||
session_id: str,
|
||||
message: Annotated[str, Query(min_length=1, max_length=10000)],
|
||||
user_id: str | None = Depends(auth.get_user_id),
|
||||
is_user_message: bool = Query(default=True),
|
||||
):
|
||||
"""
|
||||
Stream chat responses for a session.
|
||||
Stream chat responses for a session (GET - legacy endpoint).
|
||||
|
||||
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
|
||||
- Text fragments as they are generated
|
||||
@@ -193,6 +319,133 @@ async def session_assign_user(
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
# ========== Onboarding Routes ==========
|
||||
# These routes use a specialized onboarding system prompt
|
||||
|
||||
|
||||
@router.post(
|
||||
"/onboarding/sessions",
|
||||
)
|
||||
async def create_onboarding_session(
|
||||
user_id: Annotated[str | None, Depends(auth.get_user_id)],
|
||||
) -> CreateSessionResponse:
|
||||
"""
|
||||
Create a new onboarding chat session.
|
||||
|
||||
Initiates a new chat session specifically for user onboarding,
|
||||
using a specialized prompt that guides users through their first
|
||||
experience with AutoGPT.
|
||||
|
||||
Args:
|
||||
user_id: The optional authenticated user ID parsed from the JWT.
|
||||
|
||||
Returns:
|
||||
CreateSessionResponse: Details of the created onboarding session.
|
||||
"""
|
||||
logger.info(
|
||||
f"Creating onboarding session with user_id: "
|
||||
f"...{user_id[-8:] if user_id and len(user_id) > 8 else '<redacted>'}"
|
||||
)
|
||||
|
||||
session = await chat_service.create_chat_session(user_id)
|
||||
|
||||
return CreateSessionResponse(
|
||||
id=session.session_id,
|
||||
created_at=session.started_at.isoformat(),
|
||||
user_id=session.user_id or None,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/onboarding/sessions/{session_id}",
|
||||
)
|
||||
async def get_onboarding_session(
|
||||
session_id: str,
|
||||
user_id: Annotated[str | None, Depends(auth.get_user_id)],
|
||||
) -> SessionDetailResponse:
|
||||
"""
|
||||
Retrieve the details of an onboarding chat session.
|
||||
|
||||
Args:
|
||||
session_id: The unique identifier for the onboarding session.
|
||||
user_id: The optional authenticated user ID.
|
||||
|
||||
Returns:
|
||||
SessionDetailResponse: Details for the requested session.
|
||||
"""
|
||||
session = await chat_service.get_session(session_id, user_id)
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found")
|
||||
|
||||
messages = [message.model_dump() for message in session.messages]
|
||||
logger.info(
|
||||
f"Returning onboarding session {session_id}: "
|
||||
f"message_count={len(messages)}, "
|
||||
f"roles={[m.get('role') for m in messages]}"
|
||||
)
|
||||
|
||||
return SessionDetailResponse(
|
||||
id=session.session_id,
|
||||
created_at=session.started_at.isoformat(),
|
||||
updated_at=session.updated_at.isoformat(),
|
||||
user_id=session.user_id or None,
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/onboarding/sessions/{session_id}/stream",
|
||||
)
|
||||
async def stream_onboarding_chat(
|
||||
session_id: str,
|
||||
request: StreamChatRequest,
|
||||
user_id: str | None = Depends(auth.get_user_id),
|
||||
):
|
||||
"""
|
||||
Stream onboarding chat responses for a session.
|
||||
|
||||
Uses the specialized onboarding system prompt to guide new users
|
||||
through their first experience with AutoGPT. Streams AI responses
|
||||
in real time over Server-Sent Events (SSE).
|
||||
|
||||
Args:
|
||||
session_id: The onboarding session identifier.
|
||||
request: Request body containing message and optional context.
|
||||
user_id: Optional authenticated user ID.
|
||||
|
||||
Returns:
|
||||
StreamingResponse: SSE-formatted response chunks.
|
||||
"""
|
||||
session = await chat_service.get_session(session_id, user_id)
|
||||
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found.")
|
||||
if session.user_id is None and user_id is not None:
|
||||
session = await chat_service.assign_user_to_session(session_id, user_id)
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session_id,
|
||||
request.message,
|
||||
is_user_message=request.is_user_message,
|
||||
user_id=user_id,
|
||||
session=session,
|
||||
context=request.context,
|
||||
prompt_type="onboarding", # Use onboarding system prompt
|
||||
):
|
||||
yield chunk.to_sse()
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# ========== Health Check ==========
|
||||
|
||||
|
||||
|
||||
@@ -7,16 +7,17 @@ import orjson
|
||||
from openai import AsyncOpenAI
|
||||
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
|
||||
|
||||
from backend.data.understanding import (
|
||||
format_understanding_for_prompt,
|
||||
get_business_understanding,
|
||||
)
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
from . import db as chat_db
|
||||
from .config import ChatConfig
|
||||
from .model import (
|
||||
ChatMessage,
|
||||
ChatSession,
|
||||
Usage,
|
||||
get_chat_session,
|
||||
upsert_chat_session,
|
||||
)
|
||||
from .model import ChatMessage, ChatSession, Usage
|
||||
from .model import create_chat_session as model_create_chat_session
|
||||
from .model import get_chat_session, upsert_chat_session
|
||||
from .response_model import (
|
||||
StreamBaseResponse,
|
||||
StreamEnd,
|
||||
@@ -36,15 +37,109 @@ config = ChatConfig()
|
||||
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
|
||||
async def _is_first_session(user_id: str) -> bool:
|
||||
"""Check if this is the user's first chat session.
|
||||
|
||||
Returns True if the user has 1 or fewer sessions (meaning this is their first).
|
||||
"""
|
||||
try:
|
||||
session_count = await chat_db.get_user_session_count(user_id)
|
||||
return session_count <= 1
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to check session count for user {user_id}: {e}")
|
||||
return False # Default to non-onboarding if we can't check
|
||||
|
||||
|
||||
async def _build_system_prompt(
|
||||
user_id: str | None, prompt_type: str = "default"
|
||||
) -> str:
|
||||
"""Build the full system prompt including business understanding if available.
|
||||
|
||||
Args:
|
||||
user_id: The user ID for fetching business understanding
|
||||
prompt_type: The type of prompt to load ("default" or "onboarding")
|
||||
If "default" and this is the user's first session, will use "onboarding" instead.
|
||||
|
||||
Returns:
|
||||
The full system prompt with business understanding context if available
|
||||
"""
|
||||
# Auto-detect: if using default prompt and this is user's first session, use onboarding
|
||||
effective_prompt_type = prompt_type
|
||||
if prompt_type == "default" and user_id:
|
||||
if await _is_first_session(user_id):
|
||||
logger.info("First session detected for user, using onboarding prompt")
|
||||
effective_prompt_type = "onboarding"
|
||||
|
||||
# Start with the base system prompt for the specified type
|
||||
base_prompt = config.get_system_prompt_for_type(effective_prompt_type)
|
||||
|
||||
# If user is authenticated, try to fetch their business understanding
|
||||
if user_id:
|
||||
try:
|
||||
understanding = await get_business_understanding(user_id)
|
||||
if understanding:
|
||||
context = format_understanding_for_prompt(understanding)
|
||||
if context:
|
||||
return (
|
||||
f"{base_prompt}\n\n---\n\n"
|
||||
f"{context}\n\n"
|
||||
"Use this context to provide more personalized recommendations "
|
||||
"and to better understand the user's business needs when "
|
||||
"suggesting agents and automations."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch business understanding: {e}")
|
||||
|
||||
return base_prompt
|
||||
|
||||
|
||||
async def _generate_session_title(message: str) -> str | None:
|
||||
"""Generate a concise title for a chat session based on the first message.
|
||||
|
||||
Args:
|
||||
message: The first user message in the session
|
||||
|
||||
Returns:
|
||||
A short title (3-6 words) or None if generation fails
|
||||
"""
|
||||
try:
|
||||
response = await client.chat.completions.create(
|
||||
model=config.title_model,
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"Generate a very short title (3-6 words) for a chat conversation "
|
||||
"based on the user's first message. The title should capture the "
|
||||
"main topic or intent. Return ONLY the title, no quotes or punctuation."
|
||||
),
|
||||
},
|
||||
{"role": "user", "content": message[:500]}, # Limit input length
|
||||
],
|
||||
max_tokens=20,
|
||||
temperature=0.7,
|
||||
)
|
||||
title = response.choices[0].message.content
|
||||
if title:
|
||||
# Clean up the title
|
||||
title = title.strip().strip("\"'")
|
||||
# Limit length
|
||||
if len(title) > 50:
|
||||
title = title[:47] + "..."
|
||||
return title
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to generate session title: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def create_chat_session(
|
||||
user_id: str | None = None,
|
||||
) -> ChatSession:
|
||||
"""
|
||||
Create a new chat session and persist it to the database.
|
||||
"""
|
||||
session = ChatSession.new(user_id)
|
||||
# Persist the session immediately so it can be used for streaming
|
||||
return await upsert_chat_session(session)
|
||||
return await model_create_chat_session(user_id)
|
||||
|
||||
|
||||
async def get_session(
|
||||
@@ -57,6 +152,19 @@ async def get_session(
|
||||
return await get_chat_session(session_id, user_id)
|
||||
|
||||
|
||||
async def get_user_sessions(
|
||||
user_id: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
) -> list[ChatSession]:
|
||||
"""
|
||||
Get all chat sessions for a user.
|
||||
"""
|
||||
from .model import get_user_sessions as model_get_user_sessions
|
||||
|
||||
return await model_get_user_sessions(user_id, limit, offset)
|
||||
|
||||
|
||||
async def assign_user_to_session(
|
||||
session_id: str,
|
||||
user_id: str,
|
||||
@@ -78,6 +186,8 @@ async def stream_chat_completion(
|
||||
user_id: str | None = None,
|
||||
retry_count: int = 0,
|
||||
session: ChatSession | None = None,
|
||||
context: dict[str, str] | None = None, # {url: str, content: str}
|
||||
prompt_type: str = "default",
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""Main entry point for streaming chat completions with database handling.
|
||||
|
||||
@@ -89,6 +199,7 @@ async def stream_chat_completion(
|
||||
user_message: User's input message
|
||||
user_id: User ID for authentication (None for anonymous)
|
||||
session: Optional pre-loaded session object (for recursive calls to avoid Redis refetch)
|
||||
prompt_type: The type of prompt to use ("default" or "onboarding")
|
||||
|
||||
Yields:
|
||||
StreamBaseResponse objects formatted as SSE
|
||||
@@ -121,9 +232,18 @@ async def stream_chat_completion(
|
||||
)
|
||||
|
||||
if message:
|
||||
# Build message content with context if provided
|
||||
message_content = message
|
||||
if context and context.get("url") and context.get("content"):
|
||||
context_text = f"Page URL: {context['url']}\n\nPage Content:\n{context['content']}\n\n---\n\nUser Message: {message}"
|
||||
message_content = context_text
|
||||
logger.info(
|
||||
f"Including page context: URL={context['url']}, content_length={len(context['content'])}"
|
||||
)
|
||||
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="user" if is_user_message else "assistant", content=message
|
||||
role="user" if is_user_message else "assistant", content=message_content
|
||||
)
|
||||
)
|
||||
logger.info(
|
||||
@@ -141,6 +261,32 @@ async def stream_chat_completion(
|
||||
session = await upsert_chat_session(session)
|
||||
assert session, "Session not found"
|
||||
|
||||
# Generate title for new sessions on first user message (non-blocking)
|
||||
# Check: is_user_message, no title yet, and this is the first user message
|
||||
if is_user_message and message and not session.title:
|
||||
user_messages = [m for m in session.messages if m.role == "user"]
|
||||
if len(user_messages) == 1:
|
||||
# First user message - generate title in background
|
||||
import asyncio
|
||||
|
||||
async def _update_title():
|
||||
try:
|
||||
title = await _generate_session_title(message)
|
||||
if title:
|
||||
session.title = title
|
||||
await upsert_chat_session(session)
|
||||
logger.info(
|
||||
f"Generated title for session {session_id}: {title}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to update session title: {e}")
|
||||
|
||||
# Fire and forget - don't block the chat response
|
||||
asyncio.create_task(_update_title())
|
||||
|
||||
# Build system prompt with business understanding
|
||||
system_prompt = await _build_system_prompt(user_id, prompt_type)
|
||||
|
||||
assistant_response = ChatMessage(
|
||||
role="assistant",
|
||||
content="",
|
||||
@@ -159,6 +305,7 @@ async def stream_chat_completion(
|
||||
async for chunk in _stream_chat_chunks(
|
||||
session=session,
|
||||
tools=tools,
|
||||
system_prompt=system_prompt,
|
||||
):
|
||||
|
||||
if isinstance(chunk, StreamTextChunk):
|
||||
@@ -279,6 +426,7 @@ async def stream_chat_completion(
|
||||
user_id=user_id,
|
||||
retry_count=retry_count + 1,
|
||||
session=session,
|
||||
prompt_type=prompt_type,
|
||||
):
|
||||
yield chunk
|
||||
return # Exit after retry to avoid double-saving in finally block
|
||||
@@ -324,6 +472,7 @@ async def stream_chat_completion(
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
session=session, # Pass session object to avoid Redis refetch
|
||||
prompt_type=prompt_type,
|
||||
):
|
||||
yield chunk
|
||||
|
||||
@@ -331,6 +480,7 @@ async def stream_chat_completion(
|
||||
async def _stream_chat_chunks(
|
||||
session: ChatSession,
|
||||
tools: list[ChatCompletionToolParam],
|
||||
system_prompt: str | None = None,
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""
|
||||
Pure streaming function for OpenAI chat completions with tool calling.
|
||||
@@ -338,9 +488,9 @@ async def _stream_chat_chunks(
|
||||
This function is database-agnostic and focuses only on streaming logic.
|
||||
|
||||
Args:
|
||||
messages: Conversation context as ChatCompletionMessageParam list
|
||||
session_id: Session ID
|
||||
user_id: User ID for tool execution
|
||||
session: Chat session with conversation history
|
||||
tools: Available tools for the model
|
||||
system_prompt: System prompt to prepend to messages
|
||||
|
||||
Yields:
|
||||
SSE formatted JSON response objects
|
||||
@@ -350,6 +500,17 @@ async def _stream_chat_chunks(
|
||||
|
||||
logger.info("Starting pure chat stream")
|
||||
|
||||
# Build messages with system prompt prepended
|
||||
messages = session.to_openai_messages()
|
||||
if system_prompt:
|
||||
from openai.types.chat import ChatCompletionSystemMessageParam
|
||||
|
||||
system_message = ChatCompletionSystemMessageParam(
|
||||
role="system",
|
||||
content=system_prompt,
|
||||
)
|
||||
messages = [system_message] + messages
|
||||
|
||||
# Loop to handle tool calls and continue conversation
|
||||
while True:
|
||||
try:
|
||||
@@ -358,7 +519,7 @@ async def _stream_chat_chunks(
|
||||
# Create the stream with proper types
|
||||
stream = await client.chat.completions.create(
|
||||
model=model,
|
||||
messages=session.to_openai_messages(),
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
stream=True,
|
||||
@@ -502,8 +663,12 @@ async def _yield_tool_call(
|
||||
"""
|
||||
logger.info(f"Yielding tool call: {tool_calls[yield_idx]}")
|
||||
|
||||
# Parse tool call arguments - exceptions will propagate to caller
|
||||
arguments = orjson.loads(tool_calls[yield_idx]["function"]["arguments"])
|
||||
# Parse tool call arguments - handle empty arguments gracefully
|
||||
raw_arguments = tool_calls[yield_idx]["function"]["arguments"]
|
||||
if raw_arguments:
|
||||
arguments = orjson.loads(raw_arguments)
|
||||
else:
|
||||
arguments = {}
|
||||
|
||||
yield StreamToolCall(
|
||||
tool_id=tool_calls[yield_idx]["id"],
|
||||
|
||||
@@ -4,21 +4,30 @@ from openai.types.chat import ChatCompletionToolParam
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
|
||||
from .add_understanding import AddUnderstandingTool
|
||||
from .agent_output import AgentOutputTool
|
||||
from .base import BaseTool
|
||||
from .find_agent import FindAgentTool
|
||||
from .find_library_agent import FindLibraryAgentTool
|
||||
from .run_agent import RunAgentTool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.api.features.chat.response_model import StreamToolExecutionResult
|
||||
|
||||
# Initialize tool instances
|
||||
add_understanding_tool = AddUnderstandingTool()
|
||||
find_agent_tool = FindAgentTool()
|
||||
find_library_agent_tool = FindLibraryAgentTool()
|
||||
run_agent_tool = RunAgentTool()
|
||||
agent_output_tool = AgentOutputTool()
|
||||
|
||||
# Export tools as OpenAI format
|
||||
tools: list[ChatCompletionToolParam] = [
|
||||
add_understanding_tool.as_openai_tool(),
|
||||
find_agent_tool.as_openai_tool(),
|
||||
find_library_agent_tool.as_openai_tool(),
|
||||
run_agent_tool.as_openai_tool(),
|
||||
agent_output_tool.as_openai_tool(),
|
||||
]
|
||||
|
||||
|
||||
@@ -31,8 +40,11 @@ async def execute_tool(
|
||||
) -> "StreamToolExecutionResult":
|
||||
|
||||
tool_map: dict[str, BaseTool] = {
|
||||
"add_understanding": add_understanding_tool,
|
||||
"find_agent": find_agent_tool,
|
||||
"find_library_agent": find_library_agent_tool,
|
||||
"run_agent": run_agent_tool,
|
||||
"agent_output": agent_output_tool,
|
||||
}
|
||||
if tool_name not in tool_map:
|
||||
raise ValueError(f"Tool {tool_name} not found")
|
||||
|
||||
@@ -3,6 +3,7 @@ from datetime import UTC, datetime
|
||||
from os import getenv
|
||||
|
||||
import pytest
|
||||
from prisma.types import ProfileCreateInput
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
@@ -49,13 +50,13 @@ async def setup_test_data():
|
||||
# 1b. Create a profile with username for the user (required for store agent lookup)
|
||||
username = user.email.split("@")[0]
|
||||
await prisma.profile.create(
|
||||
data={
|
||||
"userId": user.id,
|
||||
"username": username,
|
||||
"name": f"Test User {username}",
|
||||
"description": "Test user profile",
|
||||
"links": [], # Required field - empty array for test profiles
|
||||
}
|
||||
data=ProfileCreateInput(
|
||||
userId=user.id,
|
||||
username=username,
|
||||
name=f"Test User {username}",
|
||||
description="Test user profile",
|
||||
links=[], # Required field - empty array for test profiles
|
||||
)
|
||||
)
|
||||
|
||||
# 2. Create a test graph with agent input -> agent output
|
||||
@@ -172,13 +173,13 @@ async def setup_llm_test_data():
|
||||
# 1b. Create a profile with username for the user (required for store agent lookup)
|
||||
username = user.email.split("@")[0]
|
||||
await prisma.profile.create(
|
||||
data={
|
||||
"userId": user.id,
|
||||
"username": username,
|
||||
"name": f"Test User {username}",
|
||||
"description": "Test user profile for LLM tests",
|
||||
"links": [], # Required field - empty array for test profiles
|
||||
}
|
||||
data=ProfileCreateInput(
|
||||
userId=user.id,
|
||||
username=username,
|
||||
name=f"Test User {username}",
|
||||
description="Test user profile for LLM tests",
|
||||
links=[], # Required field - empty array for test profiles
|
||||
)
|
||||
)
|
||||
|
||||
# 2. Create test OpenAI credentials for the user
|
||||
@@ -332,13 +333,13 @@ async def setup_firecrawl_test_data():
|
||||
# 1b. Create a profile with username for the user (required for store agent lookup)
|
||||
username = user.email.split("@")[0]
|
||||
await prisma.profile.create(
|
||||
data={
|
||||
"userId": user.id,
|
||||
"username": username,
|
||||
"name": f"Test User {username}",
|
||||
"description": "Test user profile for Firecrawl tests",
|
||||
"links": [], # Required field - empty array for test profiles
|
||||
}
|
||||
data=ProfileCreateInput(
|
||||
userId=user.id,
|
||||
username=username,
|
||||
name=f"Test User {username}",
|
||||
description="Test user profile for Firecrawl tests",
|
||||
links=[], # Required field - empty array for test profiles
|
||||
)
|
||||
)
|
||||
|
||||
# NOTE: We deliberately do NOT create Firecrawl credentials for this user
|
||||
|
||||
@@ -0,0 +1,202 @@
|
||||
"""Tool for capturing user business understanding incrementally."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.data.understanding import (
|
||||
BusinessUnderstandingInput,
|
||||
upsert_business_understanding,
|
||||
)
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import ErrorResponse, ToolResponseBase, UnderstandingUpdatedResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AddUnderstandingTool(BaseTool):
|
||||
"""Tool for capturing user's business understanding incrementally."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "add_understanding"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Capture and store information about the user's business context,
|
||||
workflows, pain points, and automation goals. Call this tool whenever the user
|
||||
shares information about their business. Each call incrementally adds to the
|
||||
existing understanding - you don't need to provide all fields at once.
|
||||
|
||||
Use this to build a comprehensive profile that helps recommend better agents
|
||||
and automations for the user's specific needs."""
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user_name": {
|
||||
"type": "string",
|
||||
"description": "The user's name",
|
||||
},
|
||||
"job_title": {
|
||||
"type": "string",
|
||||
"description": "The user's job title (e.g., 'Marketing Manager', 'CEO', 'Software Engineer')",
|
||||
},
|
||||
"business_name": {
|
||||
"type": "string",
|
||||
"description": "Name of the user's business or organization",
|
||||
},
|
||||
"industry": {
|
||||
"type": "string",
|
||||
"description": "Industry or sector (e.g., 'e-commerce', 'healthcare', 'finance')",
|
||||
},
|
||||
"business_size": {
|
||||
"type": "string",
|
||||
"description": "Company size: '1-10', '11-50', '51-200', '201-1000', or '1000+'",
|
||||
},
|
||||
"user_role": {
|
||||
"type": "string",
|
||||
"description": "User's role in organization context (e.g., 'decision maker', 'implementer', 'end user')",
|
||||
},
|
||||
"key_workflows": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Key business workflows (e.g., 'lead qualification', 'content publishing')",
|
||||
},
|
||||
"daily_activities": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Regular daily activities the user performs",
|
||||
},
|
||||
"pain_points": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Current pain points or challenges",
|
||||
},
|
||||
"bottlenecks": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Process bottlenecks slowing things down",
|
||||
},
|
||||
"manual_tasks": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Manual or repetitive tasks that could be automated",
|
||||
},
|
||||
"automation_goals": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Desired automation outcomes or goals",
|
||||
},
|
||||
"current_software": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Software and tools currently in use",
|
||||
},
|
||||
"existing_automation": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Any existing automations or integrations",
|
||||
},
|
||||
"additional_notes": {
|
||||
"type": "string",
|
||||
"description": "Any other relevant context or notes",
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
"""Requires authentication to store user-specific data."""
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""
|
||||
Capture and store business understanding incrementally.
|
||||
|
||||
Each call merges new data with existing understanding:
|
||||
- String fields are overwritten if provided
|
||||
- List fields are appended (with deduplication)
|
||||
"""
|
||||
session_id = session.session_id
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="Authentication required to save business understanding.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Check if any data was provided
|
||||
if not any(v is not None for v in kwargs.values()):
|
||||
return ErrorResponse(
|
||||
message="Please provide at least one field to update.",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Build input model
|
||||
input_data = BusinessUnderstandingInput(
|
||||
user_name=kwargs.get("user_name"),
|
||||
job_title=kwargs.get("job_title"),
|
||||
business_name=kwargs.get("business_name"),
|
||||
industry=kwargs.get("industry"),
|
||||
business_size=kwargs.get("business_size"),
|
||||
user_role=kwargs.get("user_role"),
|
||||
key_workflows=kwargs.get("key_workflows"),
|
||||
daily_activities=kwargs.get("daily_activities"),
|
||||
pain_points=kwargs.get("pain_points"),
|
||||
bottlenecks=kwargs.get("bottlenecks"),
|
||||
manual_tasks=kwargs.get("manual_tasks"),
|
||||
automation_goals=kwargs.get("automation_goals"),
|
||||
current_software=kwargs.get("current_software"),
|
||||
existing_automation=kwargs.get("existing_automation"),
|
||||
additional_notes=kwargs.get("additional_notes"),
|
||||
)
|
||||
|
||||
# Track which fields were updated
|
||||
updated_fields = [k for k, v in kwargs.items() if v is not None]
|
||||
|
||||
# Upsert with merge
|
||||
understanding = await upsert_business_understanding(user_id, input_data)
|
||||
|
||||
# Build current understanding summary for the response
|
||||
current_understanding = {
|
||||
"user_name": understanding.user_name,
|
||||
"job_title": understanding.job_title,
|
||||
"business_name": understanding.business_name,
|
||||
"industry": understanding.industry,
|
||||
"business_size": understanding.business_size,
|
||||
"user_role": understanding.user_role,
|
||||
"key_workflows": understanding.key_workflows,
|
||||
"daily_activities": understanding.daily_activities,
|
||||
"pain_points": understanding.pain_points,
|
||||
"bottlenecks": understanding.bottlenecks,
|
||||
"manual_tasks": understanding.manual_tasks,
|
||||
"automation_goals": understanding.automation_goals,
|
||||
"current_software": understanding.current_software,
|
||||
"existing_automation": understanding.existing_automation,
|
||||
"additional_notes": understanding.additional_notes,
|
||||
}
|
||||
|
||||
# Filter out empty values for cleaner response
|
||||
current_understanding = {
|
||||
k: v
|
||||
for k, v in current_understanding.items()
|
||||
if v is not None and v != [] and v != ""
|
||||
}
|
||||
|
||||
return UnderstandingUpdatedResponse(
|
||||
message=f"Updated understanding with: {', '.join(updated_fields)}. "
|
||||
"I now have a better picture of your business context.",
|
||||
session_id=session_id,
|
||||
updated_fields=updated_fields,
|
||||
current_understanding=current_understanding,
|
||||
)
|
||||
@@ -0,0 +1,455 @@
|
||||
"""Tool for retrieving agent execution outputs from user's library."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.api.features.library.model import LibraryAgent
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data.execution import ExecutionStatus, GraphExecution, GraphExecutionMeta
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import (
|
||||
AgentOutputResponse,
|
||||
ErrorResponse,
|
||||
ExecutionOutputInfo,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
from .utils import fetch_graph_from_store_slug
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentOutputInput(BaseModel):
|
||||
"""Input parameters for the agent_output tool."""
|
||||
|
||||
agent_name: str = ""
|
||||
library_agent_id: str = ""
|
||||
store_slug: str = ""
|
||||
execution_id: str = ""
|
||||
run_time: str = "latest"
|
||||
|
||||
@field_validator(
|
||||
"agent_name",
|
||||
"library_agent_id",
|
||||
"store_slug",
|
||||
"execution_id",
|
||||
"run_time",
|
||||
mode="before",
|
||||
)
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> Any:
|
||||
"""Strip whitespace from string fields."""
|
||||
return v.strip() if isinstance(v, str) else v
|
||||
|
||||
|
||||
def parse_time_expression(
|
||||
time_expr: str | None,
|
||||
) -> tuple[datetime | None, datetime | None]:
|
||||
"""
|
||||
Parse time expression into datetime range (start, end).
|
||||
|
||||
Supports:
|
||||
- "latest" or None -> returns (None, None) to get most recent
|
||||
- "yesterday" -> 24h window for yesterday
|
||||
- "today" -> Today from midnight
|
||||
- "last week" / "last 7 days" -> 7 day window
|
||||
- "last month" / "last 30 days" -> 30 day window
|
||||
- ISO date "YYYY-MM-DD" -> 24h window for that date
|
||||
"""
|
||||
if not time_expr or time_expr.lower() == "latest":
|
||||
return None, None
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
expr = time_expr.lower().strip()
|
||||
|
||||
# Relative expressions
|
||||
if expr == "yesterday":
|
||||
end = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
start = end - timedelta(days=1)
|
||||
return start, end
|
||||
|
||||
if expr in ("last week", "last 7 days"):
|
||||
return now - timedelta(days=7), now
|
||||
|
||||
if expr in ("last month", "last 30 days"):
|
||||
return now - timedelta(days=30), now
|
||||
|
||||
if expr == "today":
|
||||
start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
return start, now
|
||||
|
||||
# Try ISO date format (YYYY-MM-DD)
|
||||
date_match = re.match(r"^(\d{4})-(\d{2})-(\d{2})$", expr)
|
||||
if date_match:
|
||||
year, month, day = map(int, date_match.groups())
|
||||
start = datetime(year, month, day, 0, 0, 0, tzinfo=timezone.utc)
|
||||
end = start + timedelta(days=1)
|
||||
return start, end
|
||||
|
||||
# Try ISO datetime
|
||||
try:
|
||||
parsed = datetime.fromisoformat(expr.replace("Z", "+00:00"))
|
||||
if parsed.tzinfo is None:
|
||||
parsed = parsed.replace(tzinfo=timezone.utc)
|
||||
# Return +/- 1 hour window around the specified time
|
||||
return parsed - timedelta(hours=1), parsed + timedelta(hours=1)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Fallback: treat as "latest"
|
||||
return None, None
|
||||
|
||||
|
||||
class AgentOutputTool(BaseTool):
|
||||
"""Tool for retrieving execution outputs from user's library agents."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "agent_output"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Retrieve execution outputs from agents in the user's library.
|
||||
|
||||
Identify the agent using one of:
|
||||
- agent_name: Fuzzy search in user's library
|
||||
- library_agent_id: Exact library agent ID
|
||||
- store_slug: Marketplace format 'username/agent-name'
|
||||
|
||||
Select which run to retrieve using:
|
||||
- execution_id: Specific execution ID
|
||||
- run_time: 'latest' (default), 'yesterday', 'last week', or ISO date 'YYYY-MM-DD'
|
||||
"""
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "Agent name to search for in user's library (fuzzy match)",
|
||||
},
|
||||
"library_agent_id": {
|
||||
"type": "string",
|
||||
"description": "Exact library agent ID",
|
||||
},
|
||||
"store_slug": {
|
||||
"type": "string",
|
||||
"description": "Marketplace identifier: 'username/agent-slug'",
|
||||
},
|
||||
"execution_id": {
|
||||
"type": "string",
|
||||
"description": "Specific execution ID to retrieve",
|
||||
},
|
||||
"run_time": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Time filter: 'latest', 'yesterday', 'last week', or 'YYYY-MM-DD'"
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _resolve_agent(
|
||||
self,
|
||||
user_id: str,
|
||||
agent_name: str | None,
|
||||
library_agent_id: str | None,
|
||||
store_slug: str | None,
|
||||
) -> tuple[LibraryAgent | None, str | None]:
|
||||
"""
|
||||
Resolve agent from provided identifiers.
|
||||
Returns (library_agent, error_message).
|
||||
"""
|
||||
# Priority 1: Exact library agent ID
|
||||
if library_agent_id:
|
||||
try:
|
||||
agent = await library_db.get_library_agent(library_agent_id, user_id)
|
||||
return agent, None
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get library agent by ID: {e}")
|
||||
return None, f"Library agent '{library_agent_id}' not found"
|
||||
|
||||
# Priority 2: Store slug (username/agent-name)
|
||||
if store_slug and "/" in store_slug:
|
||||
username, agent_slug = store_slug.split("/", 1)
|
||||
graph, _ = await fetch_graph_from_store_slug(username, agent_slug)
|
||||
if not graph:
|
||||
return None, f"Agent '{store_slug}' not found in marketplace"
|
||||
|
||||
# Find in user's library by graph_id
|
||||
agent = await library_db.get_library_agent_by_graph_id(user_id, graph.id)
|
||||
if not agent:
|
||||
return (
|
||||
None,
|
||||
f"Agent '{store_slug}' is not in your library. "
|
||||
"Add it first to see outputs.",
|
||||
)
|
||||
return agent, None
|
||||
|
||||
# Priority 3: Fuzzy name search in library
|
||||
if agent_name:
|
||||
try:
|
||||
response = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=agent_name,
|
||||
page_size=5,
|
||||
)
|
||||
if not response.agents:
|
||||
return (
|
||||
None,
|
||||
f"No agents matching '{agent_name}' found in your library",
|
||||
)
|
||||
|
||||
# Return best match (first result from search)
|
||||
return response.agents[0], None
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching library agents: {e}")
|
||||
return None, f"Error searching for agent: {e}"
|
||||
|
||||
return (
|
||||
None,
|
||||
"Please specify an agent name, library_agent_id, or store_slug",
|
||||
)
|
||||
|
||||
async def _get_execution(
|
||||
self,
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
execution_id: str | None,
|
||||
time_start: datetime | None,
|
||||
time_end: datetime | None,
|
||||
) -> tuple[GraphExecution | None, list[GraphExecutionMeta], str | None]:
|
||||
"""
|
||||
Fetch execution(s) based on filters.
|
||||
Returns (single_execution, available_executions_meta, error_message).
|
||||
"""
|
||||
# If specific execution_id provided, fetch it directly
|
||||
if execution_id:
|
||||
execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if not execution:
|
||||
return None, [], f"Execution '{execution_id}' not found"
|
||||
return execution, [], None
|
||||
|
||||
# Get completed executions with time filters
|
||||
executions = await execution_db.get_graph_executions(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
statuses=[ExecutionStatus.COMPLETED],
|
||||
created_time_gte=time_start,
|
||||
created_time_lte=time_end,
|
||||
limit=10,
|
||||
)
|
||||
|
||||
if not executions:
|
||||
return None, [], None # No error, just no executions
|
||||
|
||||
# If only one execution, fetch full details
|
||||
if len(executions) == 1:
|
||||
full_execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=executions[0].id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
return full_execution, [], None
|
||||
|
||||
# Multiple executions - return latest with full details, plus list of available
|
||||
full_execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=executions[0].id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
return full_execution, executions, None
|
||||
|
||||
def _build_response(
|
||||
self,
|
||||
agent: LibraryAgent,
|
||||
execution: GraphExecution | None,
|
||||
available_executions: list[GraphExecutionMeta],
|
||||
session_id: str | None,
|
||||
) -> AgentOutputResponse:
|
||||
"""Build the response based on execution data."""
|
||||
library_agent_link = f"/library/agents/{agent.id}"
|
||||
|
||||
if not execution:
|
||||
return AgentOutputResponse(
|
||||
message=f"No completed executions found for agent '{agent.name}'",
|
||||
session_id=session_id,
|
||||
agent_name=agent.name,
|
||||
agent_id=agent.graph_id,
|
||||
library_agent_id=agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
total_executions=0,
|
||||
)
|
||||
|
||||
execution_info = ExecutionOutputInfo(
|
||||
execution_id=execution.id,
|
||||
status=execution.status.value,
|
||||
started_at=execution.started_at,
|
||||
ended_at=execution.ended_at,
|
||||
outputs=dict(execution.outputs),
|
||||
inputs_summary=execution.inputs if execution.inputs else None,
|
||||
)
|
||||
|
||||
available_list = None
|
||||
if len(available_executions) > 1:
|
||||
available_list = [
|
||||
{
|
||||
"id": e.id,
|
||||
"status": e.status.value,
|
||||
"started_at": e.started_at.isoformat() if e.started_at else None,
|
||||
}
|
||||
for e in available_executions[:5]
|
||||
]
|
||||
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
if len(available_executions) > 1:
|
||||
message += (
|
||||
f". Showing latest of {len(available_executions)} matching executions."
|
||||
)
|
||||
|
||||
return AgentOutputResponse(
|
||||
message=message,
|
||||
session_id=session_id,
|
||||
agent_name=agent.name,
|
||||
agent_id=agent.graph_id,
|
||||
library_agent_id=agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
execution=execution_info,
|
||||
available_executions=available_list,
|
||||
total_executions=len(available_executions) if available_executions else 1,
|
||||
)
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute the agent_output tool."""
|
||||
session_id = session.session_id
|
||||
|
||||
# Parse and validate input
|
||||
try:
|
||||
input_data = AgentOutputInput(**kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid input: {e}")
|
||||
return ErrorResponse(
|
||||
message="Invalid input parameters",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Ensure user_id is present (should be guaranteed by requires_auth)
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="User authentication required",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Check if at least one identifier is provided
|
||||
if not any(
|
||||
[
|
||||
input_data.agent_name,
|
||||
input_data.library_agent_id,
|
||||
input_data.store_slug,
|
||||
input_data.execution_id,
|
||||
]
|
||||
):
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Please specify at least one of: agent_name, "
|
||||
"library_agent_id, store_slug, or execution_id"
|
||||
),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# If only execution_id provided, we need to find the agent differently
|
||||
if (
|
||||
input_data.execution_id
|
||||
and not input_data.agent_name
|
||||
and not input_data.library_agent_id
|
||||
and not input_data.store_slug
|
||||
):
|
||||
# Fetch execution directly to get graph_id
|
||||
execution = await execution_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=input_data.execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if not execution:
|
||||
return ErrorResponse(
|
||||
message=f"Execution '{input_data.execution_id}' not found",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Find library agent by graph_id
|
||||
agent = await library_db.get_library_agent_by_graph_id(
|
||||
user_id, execution.graph_id
|
||||
)
|
||||
if not agent:
|
||||
return NoResultsResponse(
|
||||
message=(
|
||||
f"Execution found but agent not in your library. "
|
||||
f"Graph ID: {execution.graph_id}"
|
||||
),
|
||||
session_id=session_id,
|
||||
suggestions=["Add the agent to your library to see more details"],
|
||||
)
|
||||
|
||||
return self._build_response(agent, execution, [], session_id)
|
||||
|
||||
# Resolve agent from identifiers
|
||||
agent, error = await self._resolve_agent(
|
||||
user_id=user_id,
|
||||
agent_name=input_data.agent_name or None,
|
||||
library_agent_id=input_data.library_agent_id or None,
|
||||
store_slug=input_data.store_slug or None,
|
||||
)
|
||||
|
||||
if error or not agent:
|
||||
return NoResultsResponse(
|
||||
message=error or "Agent not found",
|
||||
session_id=session_id,
|
||||
suggestions=[
|
||||
"Check the agent name or ID",
|
||||
"Make sure the agent is in your library",
|
||||
],
|
||||
)
|
||||
|
||||
# Parse time expression
|
||||
time_start, time_end = parse_time_expression(input_data.run_time)
|
||||
|
||||
# Fetch execution(s)
|
||||
execution, available_executions, exec_error = await self._get_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=input_data.execution_id or None,
|
||||
time_start=time_start,
|
||||
time_end=time_end,
|
||||
)
|
||||
|
||||
if exec_error:
|
||||
return ErrorResponse(
|
||||
message=exec_error,
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
return self._build_response(agent, execution, available_executions, session_id)
|
||||
@@ -0,0 +1,157 @@
|
||||
"""Tool for searching agents in the user's library."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.util.exceptions import DatabaseError
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import (
|
||||
AgentCarouselResponse,
|
||||
AgentInfo,
|
||||
ErrorResponse,
|
||||
NoResultsResponse,
|
||||
ToolResponseBase,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FindLibraryAgentTool(BaseTool):
|
||||
"""Tool for searching agents in the user's library."""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "find_library_agent"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search for agents in the user's library. Use this to find agents "
|
||||
"the user has already added to their library, including agents they "
|
||||
"created or added from the marketplace."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Search query to find agents by name or description. "
|
||||
"Use keywords for best results."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
"""Search for agents in the user's library.
|
||||
|
||||
Args:
|
||||
user_id: User ID (required)
|
||||
session: Chat session
|
||||
query: Search query
|
||||
|
||||
Returns:
|
||||
AgentCarouselResponse: List of agents found in the library
|
||||
NoResultsResponse: No agents found
|
||||
ErrorResponse: Error message
|
||||
"""
|
||||
query = kwargs.get("query", "").strip()
|
||||
session_id = session.session_id
|
||||
|
||||
if not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not user_id:
|
||||
return ErrorResponse(
|
||||
message="User authentication required to search library",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
agents = []
|
||||
try:
|
||||
logger.info(f"Searching user library for: {query}")
|
||||
library_results = await library_db.list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=query,
|
||||
page_size=10,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Find library agents tool found {len(library_results.agents)} agents"
|
||||
)
|
||||
|
||||
for agent in library_results.agents:
|
||||
agents.append(
|
||||
AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
),
|
||||
)
|
||||
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Error searching library agents: {e}", exc_info=True)
|
||||
return ErrorResponse(
|
||||
message="Failed to search library. Please try again.",
|
||||
error=str(e),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
if not agents:
|
||||
return NoResultsResponse(
|
||||
message=(
|
||||
f"No agents found matching '{query}' in your library. "
|
||||
"Try different keywords or use find_agent to search the marketplace."
|
||||
),
|
||||
session_id=session_id,
|
||||
suggestions=[
|
||||
"Try more general terms",
|
||||
"Use find_agent to search the marketplace",
|
||||
"Check your library at /library",
|
||||
],
|
||||
)
|
||||
|
||||
title = (
|
||||
f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} "
|
||||
f"in your library for '{query}'"
|
||||
)
|
||||
|
||||
return AgentCarouselResponse(
|
||||
message=(
|
||||
"Found agents in the user's library. You can provide a link to "
|
||||
"view an agent at: /library/agents/{agent_id}. "
|
||||
"Use agent_output to get execution results, or run_agent to execute."
|
||||
),
|
||||
title=title,
|
||||
agents=agents,
|
||||
count=len(agents),
|
||||
session_id=session_id,
|
||||
)
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Pydantic models for tool responses."""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
@@ -19,6 +20,15 @@ class ResponseType(str, Enum):
|
||||
ERROR = "error"
|
||||
NO_RESULTS = "no_results"
|
||||
SUCCESS = "success"
|
||||
DOC_SEARCH_RESULTS = "doc_search_results"
|
||||
AGENT_OUTPUT = "agent_output"
|
||||
BLOCK_LIST = "block_list"
|
||||
BLOCK_OUTPUT = "block_output"
|
||||
UNDERSTANDING_UPDATED = "understanding_updated"
|
||||
# Agent generation responses
|
||||
AGENT_PREVIEW = "agent_preview"
|
||||
AGENT_SAVED = "agent_saved"
|
||||
CLARIFICATION_NEEDED = "clarification_needed"
|
||||
|
||||
|
||||
# Base response model
|
||||
@@ -173,3 +183,128 @@ class ErrorResponse(ToolResponseBase):
|
||||
type: ResponseType = ResponseType.ERROR
|
||||
error: str | None = None
|
||||
details: dict[str, Any] | None = None
|
||||
|
||||
|
||||
# Documentation search models
|
||||
class DocSearchResult(BaseModel):
|
||||
"""A single documentation search result."""
|
||||
|
||||
title: str
|
||||
path: str
|
||||
section: str
|
||||
snippet: str # Short excerpt for UI display
|
||||
content: str # Full text content for LLM to read and understand
|
||||
score: float
|
||||
doc_url: str | None = None
|
||||
|
||||
|
||||
class DocSearchResultsResponse(ToolResponseBase):
|
||||
"""Response for search_docs tool."""
|
||||
|
||||
type: ResponseType = ResponseType.DOC_SEARCH_RESULTS
|
||||
results: list[DocSearchResult]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
# Agent output models
|
||||
class ExecutionOutputInfo(BaseModel):
|
||||
"""Summary of a single execution's outputs."""
|
||||
|
||||
execution_id: str
|
||||
status: str
|
||||
started_at: datetime | None = None
|
||||
ended_at: datetime | None = None
|
||||
outputs: dict[str, list[Any]]
|
||||
inputs_summary: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class AgentOutputResponse(ToolResponseBase):
|
||||
"""Response for agent_output tool."""
|
||||
|
||||
type: ResponseType = ResponseType.AGENT_OUTPUT
|
||||
agent_name: str
|
||||
agent_id: str
|
||||
library_agent_id: str | None = None
|
||||
library_agent_link: str | None = None
|
||||
execution: ExecutionOutputInfo | None = None
|
||||
available_executions: list[dict[str, Any]] | None = None
|
||||
total_executions: int = 0
|
||||
|
||||
|
||||
# Block models
|
||||
class BlockInfoSummary(BaseModel):
|
||||
"""Summary of a block for search results."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
categories: list[str]
|
||||
input_schema: dict[str, Any]
|
||||
output_schema: dict[str, Any]
|
||||
|
||||
|
||||
class BlockListResponse(ToolResponseBase):
|
||||
"""Response for find_block tool."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_LIST
|
||||
blocks: list[BlockInfoSummary]
|
||||
count: int
|
||||
query: str
|
||||
|
||||
|
||||
class BlockOutputResponse(ToolResponseBase):
|
||||
"""Response for run_block tool."""
|
||||
|
||||
type: ResponseType = ResponseType.BLOCK_OUTPUT
|
||||
block_id: str
|
||||
block_name: str
|
||||
outputs: dict[str, list[Any]]
|
||||
success: bool = True
|
||||
|
||||
|
||||
# Business understanding models
|
||||
class UnderstandingUpdatedResponse(ToolResponseBase):
|
||||
"""Response for add_understanding tool."""
|
||||
|
||||
type: ResponseType = ResponseType.UNDERSTANDING_UPDATED
|
||||
updated_fields: list[str] = Field(default_factory=list)
|
||||
current_understanding: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
# Agent generation models
|
||||
class ClarifyingQuestion(BaseModel):
|
||||
"""A question that needs user clarification."""
|
||||
|
||||
question: str
|
||||
keyword: str
|
||||
example: str | None = None
|
||||
|
||||
|
||||
class AgentPreviewResponse(ToolResponseBase):
|
||||
"""Response for previewing a generated agent before saving."""
|
||||
|
||||
type: ResponseType = ResponseType.AGENT_PREVIEW
|
||||
agent_json: dict[str, Any]
|
||||
agent_name: str
|
||||
description: str
|
||||
node_count: int
|
||||
link_count: int = 0
|
||||
|
||||
|
||||
class AgentSavedResponse(ToolResponseBase):
|
||||
"""Response when an agent is saved to the library."""
|
||||
|
||||
type: ResponseType = ResponseType.AGENT_SAVED
|
||||
agent_id: str
|
||||
agent_name: str
|
||||
library_agent_id: str
|
||||
library_agent_link: str
|
||||
agent_page_link: str # Link to the agent builder/editor page
|
||||
|
||||
|
||||
class ClarificationNeededResponse(ToolResponseBase):
|
||||
"""Response when the LLM needs more information from the user."""
|
||||
|
||||
type: ResponseType = ResponseType.CLARIFICATION_NEEDED
|
||||
questions: list[ClarifyingQuestion] = Field(default_factory=list)
|
||||
|
||||
@@ -7,6 +7,7 @@ from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from backend.api.features.chat.config import ChatConfig
|
||||
from backend.api.features.chat.model import ChatSession
|
||||
from backend.api.features.library import db as library_db
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.data.user import get_user_by_id
|
||||
@@ -57,6 +58,7 @@ class RunAgentInput(BaseModel):
|
||||
"""Input parameters for the run_agent tool."""
|
||||
|
||||
username_agent_slug: str = ""
|
||||
library_agent_id: str = ""
|
||||
inputs: dict[str, Any] = Field(default_factory=dict)
|
||||
use_defaults: bool = False
|
||||
schedule_name: str = ""
|
||||
@@ -64,7 +66,12 @@ class RunAgentInput(BaseModel):
|
||||
timezone: str = "UTC"
|
||||
|
||||
@field_validator(
|
||||
"username_agent_slug", "schedule_name", "cron", "timezone", mode="before"
|
||||
"username_agent_slug",
|
||||
"library_agent_id",
|
||||
"schedule_name",
|
||||
"cron",
|
||||
"timezone",
|
||||
mode="before",
|
||||
)
|
||||
@classmethod
|
||||
def strip_strings(cls, v: Any) -> Any:
|
||||
@@ -90,7 +97,7 @@ class RunAgentTool(BaseTool):
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return """Run or schedule an agent from the marketplace.
|
||||
return """Run or schedule an agent from the marketplace or user's library.
|
||||
|
||||
The tool automatically handles the setup flow:
|
||||
- Returns missing inputs if required fields are not provided
|
||||
@@ -98,6 +105,10 @@ class RunAgentTool(BaseTool):
|
||||
- Executes immediately if all requirements are met
|
||||
- Schedules execution if cron expression is provided
|
||||
|
||||
Identify the agent using either:
|
||||
- username_agent_slug: Marketplace format 'username/agent-name'
|
||||
- library_agent_id: ID of an agent in the user's library
|
||||
|
||||
For scheduled execution, provide: schedule_name, cron, and optionally timezone."""
|
||||
|
||||
@property
|
||||
@@ -109,6 +120,10 @@ class RunAgentTool(BaseTool):
|
||||
"type": "string",
|
||||
"description": "Agent identifier in format 'username/agent-name'",
|
||||
},
|
||||
"library_agent_id": {
|
||||
"type": "string",
|
||||
"description": "Library agent ID from user's library",
|
||||
},
|
||||
"inputs": {
|
||||
"type": "object",
|
||||
"description": "Input values for the agent",
|
||||
@@ -131,7 +146,7 @@ class RunAgentTool(BaseTool):
|
||||
"description": "IANA timezone for schedule (default: UTC)",
|
||||
},
|
||||
},
|
||||
"required": ["username_agent_slug"],
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
@@ -149,10 +164,16 @@ class RunAgentTool(BaseTool):
|
||||
params = RunAgentInput(**kwargs)
|
||||
session_id = session.session_id
|
||||
|
||||
# Validate agent slug format
|
||||
if not params.username_agent_slug or "/" not in params.username_agent_slug:
|
||||
# Validate at least one identifier is provided
|
||||
has_slug = params.username_agent_slug and "/" in params.username_agent_slug
|
||||
has_library_id = bool(params.library_agent_id)
|
||||
|
||||
if not has_slug and not has_library_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide an agent slug in format 'username/agent-name'",
|
||||
message=(
|
||||
"Please provide either a username_agent_slug "
|
||||
"(format 'username/agent-name') or a library_agent_id"
|
||||
),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
@@ -167,13 +188,41 @@ class RunAgentTool(BaseTool):
|
||||
is_schedule = bool(params.schedule_name or params.cron)
|
||||
|
||||
try:
|
||||
# Step 1: Fetch agent details (always happens first)
|
||||
username, agent_name = params.username_agent_slug.split("/", 1)
|
||||
graph, store_agent = await fetch_graph_from_store_slug(username, agent_name)
|
||||
# Step 1: Fetch agent details
|
||||
graph: GraphModel | None = None
|
||||
library_agent = None
|
||||
|
||||
# Priority: library_agent_id if provided
|
||||
if has_library_id:
|
||||
library_agent = await library_db.get_library_agent(
|
||||
params.library_agent_id, user_id
|
||||
)
|
||||
if not library_agent:
|
||||
return ErrorResponse(
|
||||
message=f"Library agent '{params.library_agent_id}' not found",
|
||||
session_id=session_id,
|
||||
)
|
||||
# Get the graph from the library agent
|
||||
from backend.data.graph import get_graph
|
||||
|
||||
graph = await get_graph(
|
||||
library_agent.graph_id,
|
||||
library_agent.graph_version,
|
||||
user_id=user_id,
|
||||
)
|
||||
else:
|
||||
# Fetch from marketplace slug
|
||||
username, agent_name = params.username_agent_slug.split("/", 1)
|
||||
graph, _ = await fetch_graph_from_store_slug(username, agent_name)
|
||||
|
||||
if not graph:
|
||||
identifier = (
|
||||
params.library_agent_id
|
||||
if has_library_id
|
||||
else params.username_agent_slug
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Agent '{params.username_agent_slug}' not found in marketplace",
|
||||
message=f"Agent '{identifier}' not found",
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ import backend.data.user
|
||||
import backend.integrations.webhooks.utils
|
||||
import backend.util.service
|
||||
import backend.util.settings
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
@@ -113,7 +113,7 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
|
||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||
await backend.data.graph.fix_llm_provider_credentials()
|
||||
await backend.data.graph.migrate_llm_models(LlmModel.GPT4O)
|
||||
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
|
||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||
|
||||
with launch_darkly_context():
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from typing import Any
|
||||
|
||||
from backend.blocks.llm import (
|
||||
DEFAULT_LLM_MODEL,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
AIBlockBase,
|
||||
@@ -49,7 +50,7 @@ class AIConditionBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for evaluating the condition.",
|
||||
advanced=False,
|
||||
)
|
||||
@@ -81,7 +82,7 @@ class AIConditionBlock(AIBlockBase):
|
||||
"condition": "the input is an email address",
|
||||
"yes_value": "Valid email",
|
||||
"no_value": "Not an email",
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
|
||||
@@ -182,13 +182,10 @@ class DataForSeoRelatedKeywordsBlock(Block):
|
||||
if results and len(results) > 0:
|
||||
# results is a list, get the first element
|
||||
first_result = results[0] if isinstance(results, list) else results
|
||||
items = (
|
||||
first_result.get("items", [])
|
||||
if isinstance(first_result, dict)
|
||||
else []
|
||||
)
|
||||
# Ensure items is never None
|
||||
if items is None:
|
||||
# Handle missing key, null value, or valid list value
|
||||
if isinstance(first_result, dict):
|
||||
items = first_result.get("items") or []
|
||||
else:
|
||||
items = []
|
||||
for item in items:
|
||||
# Extract keyword_data from the item
|
||||
|
||||
2896
autogpt_platform/backend/backend/blocks/google/docs.py
Normal file
2896
autogpt_platform/backend/backend/blocks/google/docs.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -92,8 +92,9 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
O1 = "o1"
|
||||
O1_MINI = "o1-mini"
|
||||
# GPT-5 models
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_2 = "gpt-5.2-2025-12-11"
|
||||
GPT5_1 = "gpt-5.1-2025-11-13"
|
||||
GPT5 = "gpt-5-2025-08-07"
|
||||
GPT5_MINI = "gpt-5-mini-2025-08-07"
|
||||
GPT5_NANO = "gpt-5-nano-2025-08-07"
|
||||
GPT5_CHAT = "gpt-5-chat-latest"
|
||||
@@ -194,8 +195,9 @@ MODEL_METADATA = {
|
||||
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
|
||||
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
|
||||
@@ -303,6 +305,8 @@ MODEL_METADATA = {
|
||||
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
|
||||
}
|
||||
|
||||
DEFAULT_LLM_MODEL = LlmModel.GPT5_2
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
@@ -790,7 +794,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
@@ -855,7 +859,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
input_schema=AIStructuredResponseGeneratorBlock.Input,
|
||||
output_schema=AIStructuredResponseGeneratorBlock.Output,
|
||||
test_input={
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"expected_format": {
|
||||
"key1": "value1",
|
||||
@@ -1221,7 +1225,7 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
@@ -1317,7 +1321,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for summarizing the text.",
|
||||
)
|
||||
focus: str = SchemaField(
|
||||
@@ -1534,7 +1538,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for the conversation.",
|
||||
)
|
||||
credentials: AICredentials = AICredentialsField()
|
||||
@@ -1572,7 +1576,7 @@ class AIConversationBlock(AIBlockBase):
|
||||
},
|
||||
{"role": "user", "content": "Where was it played?"},
|
||||
],
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
@@ -1635,7 +1639,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=LlmModel.GPT4O,
|
||||
default=DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for generating the list.",
|
||||
advanced=True,
|
||||
)
|
||||
@@ -1692,7 +1696,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
"drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
|
||||
"fictional worlds."
|
||||
),
|
||||
"model": LlmModel.GPT4O,
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"max_retries": 3,
|
||||
"force_json_output": False,
|
||||
|
||||
@@ -226,7 +226,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
)
|
||||
model: llm.LlmModel = SchemaField(
|
||||
title="LLM Model",
|
||||
default=llm.LlmModel.GPT4O,
|
||||
default=llm.DEFAULT_LLM_MODEL,
|
||||
description="The language model to use for answering the prompt.",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
@@ -196,6 +196,15 @@ class TestXMLParserBlockSecurity:
|
||||
async for _ in block.run(XMLParserBlock.Input(input_xml=large_xml)):
|
||||
pass
|
||||
|
||||
async def test_rejects_text_outside_root(self):
|
||||
"""Ensure parser surfaces readable errors for invalid root text."""
|
||||
block = XMLParserBlock()
|
||||
invalid_xml = "<root><child>value</child></root> trailing"
|
||||
|
||||
with pytest.raises(ValueError, match="text outside the root element"):
|
||||
async for _ in block.run(XMLParserBlock.Input(input_xml=invalid_xml)):
|
||||
pass
|
||||
|
||||
|
||||
class TestStoreMediaFileSecurity:
|
||||
"""Test file storage security limits."""
|
||||
|
||||
@@ -28,7 +28,7 @@ class TestLLMStatsTracking:
|
||||
|
||||
response = await llm.llm_call(
|
||||
credentials=llm.TEST_CREDENTIALS,
|
||||
llm_model=llm.LlmModel.GPT4O,
|
||||
llm_model=llm.DEFAULT_LLM_MODEL,
|
||||
prompt=[{"role": "user", "content": "Hello"}],
|
||||
max_tokens=100,
|
||||
)
|
||||
@@ -65,7 +65,7 @@ class TestLLMStatsTracking:
|
||||
input_data = llm.AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt="Test prompt",
|
||||
expected_format={"key1": "desc1", "key2": "desc2"},
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore # type: ignore
|
||||
)
|
||||
|
||||
@@ -109,7 +109,7 @@ class TestLLMStatsTracking:
|
||||
# Run the block
|
||||
input_data = llm.AITextGeneratorBlock.Input(
|
||||
prompt="Generate text",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -170,7 +170,7 @@ class TestLLMStatsTracking:
|
||||
input_data = llm.AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt="Test prompt",
|
||||
expected_format={"key1": "desc1", "key2": "desc2"},
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
)
|
||||
@@ -228,7 +228,7 @@ class TestLLMStatsTracking:
|
||||
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text=long_text,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_tokens=100, # Small chunks
|
||||
chunk_overlap=10,
|
||||
@@ -299,7 +299,7 @@ class TestLLMStatsTracking:
|
||||
# Test with very short text (should only need 1 chunk + 1 final summary)
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="This is a short text.",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_tokens=1000, # Large enough to avoid chunking
|
||||
)
|
||||
@@ -346,7 +346,7 @@ class TestLLMStatsTracking:
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
],
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -387,7 +387,7 @@ class TestLLMStatsTracking:
|
||||
# Run the block
|
||||
input_data = llm.AIListGeneratorBlock.Input(
|
||||
focus="test items",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_retries=3,
|
||||
)
|
||||
@@ -469,7 +469,7 @@ class TestLLMStatsTracking:
|
||||
input_data = llm.AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt="Test",
|
||||
expected_format={"result": "desc"},
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -513,7 +513,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
style=llm.SummaryStyle.BULLET_POINTS,
|
||||
)
|
||||
@@ -558,7 +558,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
style=llm.SummaryStyle.BULLET_POINTS,
|
||||
max_tokens=1000,
|
||||
@@ -593,7 +593,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
@@ -623,7 +623,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
max_tokens=1000,
|
||||
)
|
||||
@@ -654,7 +654,7 @@ class TestAITextSummarizerValidation:
|
||||
# Create input data
|
||||
input_data = llm.AITextSummarizerBlock.Input(
|
||||
text="Some text to summarize",
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
)
|
||||
|
||||
|
||||
@@ -233,7 +233,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
|
||||
# Create test input
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Should I continue with this task?",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -335,7 +335,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2, # Set retry to 2 for testing
|
||||
agent_mode_max_iterations=0,
|
||||
@@ -402,7 +402,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -462,7 +462,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -526,7 +526,7 @@ async def test_smart_decision_maker_parameter_validation():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Search for keywords",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -648,7 +648,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
retry=2,
|
||||
agent_mode_max_iterations=0,
|
||||
@@ -722,7 +722,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Simple prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -778,7 +778,7 @@ async def test_smart_decision_maker_raw_response_conversion():
|
||||
):
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Another test",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0,
|
||||
)
|
||||
@@ -931,7 +931,7 @@ async def test_smart_decision_maker_agent_mode():
|
||||
# Test agent mode with max_iterations = 3
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Complete this task using tools",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=3, # Enable agent mode with 3 max iterations
|
||||
)
|
||||
@@ -1020,7 +1020,7 @@ async def test_smart_decision_maker_traditional_mode_default():
|
||||
# Test default behavior (traditional mode)
|
||||
input_data = SmartDecisionMakerBlock.Input(
|
||||
prompt="Test prompt",
|
||||
model=llm_module.LlmModel.GPT4O,
|
||||
model=llm_module.DEFAULT_LLM_MODEL,
|
||||
credentials=llm_module.TEST_CREDENTIALS_INPUT, # type: ignore
|
||||
agent_mode_max_iterations=0, # Traditional mode
|
||||
)
|
||||
|
||||
@@ -373,7 +373,7 @@ async def test_output_yielding_with_dynamic_fields():
|
||||
input_data = block.input_schema(
|
||||
prompt="Create a user dictionary",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
agent_mode_max_iterations=0, # Use traditional mode to test output yielding
|
||||
)
|
||||
|
||||
@@ -594,7 +594,7 @@ async def test_validation_errors_dont_pollute_conversation():
|
||||
input_data = block.input_schema(
|
||||
prompt="Test prompt",
|
||||
credentials=llm.TEST_CREDENTIALS_INPUT,
|
||||
model=llm.LlmModel.GPT4O,
|
||||
model=llm.DEFAULT_LLM_MODEL,
|
||||
retry=3, # Allow retries
|
||||
agent_mode_max_iterations=1,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from gravitasml.parser import Parser
|
||||
from gravitasml.token import tokenize
|
||||
from gravitasml.token import Token, tokenize
|
||||
|
||||
from backend.data.block import Block, BlockOutput, BlockSchemaInput, BlockSchemaOutput
|
||||
from backend.data.model import SchemaField
|
||||
@@ -25,6 +25,38 @@ class XMLParserBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _validate_tokens(tokens: list[Token]) -> None:
|
||||
"""Ensure the XML has a single root element and no stray text."""
|
||||
if not tokens:
|
||||
raise ValueError("XML input is empty.")
|
||||
|
||||
depth = 0
|
||||
root_seen = False
|
||||
|
||||
for token in tokens:
|
||||
if token.type == "TAG_OPEN":
|
||||
if depth == 0 and root_seen:
|
||||
raise ValueError("XML must have a single root element.")
|
||||
depth += 1
|
||||
if depth == 1:
|
||||
root_seen = True
|
||||
elif token.type == "TAG_CLOSE":
|
||||
depth -= 1
|
||||
if depth < 0:
|
||||
raise SyntaxError("Unexpected closing tag in XML input.")
|
||||
elif token.type in {"TEXT", "ESCAPE"}:
|
||||
if depth == 0 and token.value:
|
||||
raise ValueError(
|
||||
"XML contains text outside the root element; "
|
||||
"wrap content in a single root tag."
|
||||
)
|
||||
|
||||
if depth != 0:
|
||||
raise SyntaxError("Unclosed tag detected in XML input.")
|
||||
if not root_seen:
|
||||
raise ValueError("XML must include a root element.")
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Security fix: Add size limits to prevent XML bomb attacks
|
||||
MAX_XML_SIZE = 10 * 1024 * 1024 # 10MB limit for XML input
|
||||
@@ -35,7 +67,9 @@ class XMLParserBlock(Block):
|
||||
)
|
||||
|
||||
try:
|
||||
tokens = tokenize(input_data.input_xml)
|
||||
tokens = list(tokenize(input_data.input_xml))
|
||||
self._validate_tokens(tokens)
|
||||
|
||||
parser = Parser(tokens)
|
||||
parsed_result = parser.parse()
|
||||
yield "parsed_xml", parsed_result
|
||||
|
||||
@@ -111,6 +111,8 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
return parsed_url.path.split("/")[2]
|
||||
if parsed_url.path[:3] == "/v/":
|
||||
return parsed_url.path.split("/")[2]
|
||||
if parsed_url.path.startswith("/shorts/"):
|
||||
return parsed_url.path.split("/")[2]
|
||||
raise ValueError(f"Invalid YouTube URL: {url}")
|
||||
|
||||
def get_transcript(
|
||||
|
||||
@@ -59,12 +59,13 @@ from backend.integrations.credentials_store import (
|
||||
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O3: 4,
|
||||
LlmModel.O3_MINI: 2, # $1.10 / $4.40
|
||||
LlmModel.O1: 16, # $15 / $60
|
||||
LlmModel.O3_MINI: 2,
|
||||
LlmModel.O1: 16,
|
||||
LlmModel.O1_MINI: 4,
|
||||
# GPT-5 models
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_2: 6,
|
||||
LlmModel.GPT5_1: 5,
|
||||
LlmModel.GPT5: 2,
|
||||
LlmModel.GPT5_MINI: 1,
|
||||
LlmModel.GPT5_NANO: 1,
|
||||
LlmModel.GPT5_CHAT: 5,
|
||||
@@ -87,7 +88,7 @@ MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: 1,
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: 1,
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: 1,
|
||||
LlmModel.LLAMA3_3_70B: 1, # $0.59 / $0.79
|
||||
LlmModel.LLAMA3_3_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_3: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_2: 1,
|
||||
|
||||
@@ -341,6 +341,19 @@ class UserCreditBase(ABC):
|
||||
|
||||
if result:
|
||||
# UserBalance is already updated by the CTE
|
||||
|
||||
# Clear insufficient funds notification flags when credits are added
|
||||
# so user can receive alerts again if they run out in the future.
|
||||
if transaction.amount > 0 and transaction.type in [
|
||||
CreditTransactionType.GRANT,
|
||||
CreditTransactionType.TOP_UP,
|
||||
]:
|
||||
from backend.executor.manager import (
|
||||
clear_insufficient_funds_notifications,
|
||||
)
|
||||
|
||||
await clear_insufficient_funds_notifications(user_id)
|
||||
|
||||
return result[0]["balance"]
|
||||
|
||||
async def _add_transaction(
|
||||
@@ -530,6 +543,22 @@ class UserCreditBase(ABC):
|
||||
if result:
|
||||
new_balance, tx_key = result[0]["balance"], result[0]["transactionKey"]
|
||||
# UserBalance is already updated by the CTE
|
||||
|
||||
# Clear insufficient funds notification flags when credits are added
|
||||
# so user can receive alerts again if they run out in the future.
|
||||
if (
|
||||
amount > 0
|
||||
and is_active
|
||||
and transaction_type
|
||||
in [CreditTransactionType.GRANT, CreditTransactionType.TOP_UP]
|
||||
):
|
||||
# Lazy import to avoid circular dependency with executor.manager
|
||||
from backend.executor.manager import (
|
||||
clear_insufficient_funds_notifications,
|
||||
)
|
||||
|
||||
await clear_insufficient_funds_notifications(user_id)
|
||||
|
||||
return new_balance, tx_key
|
||||
|
||||
# If no result, either user doesn't exist or insufficient balance
|
||||
|
||||
429
autogpt_platform/backend/backend/data/understanding.py
Normal file
429
autogpt_platform/backend/backend/data/understanding.py
Normal file
@@ -0,0 +1,429 @@
|
||||
"""Data models and access layer for user business understanding."""
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional, cast
|
||||
|
||||
import pydantic
|
||||
from prisma.models import UserBusinessUnderstanding
|
||||
from prisma.types import (
|
||||
UserBusinessUnderstandingCreateInput,
|
||||
UserBusinessUnderstandingUpdateInput,
|
||||
)
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Cache configuration
|
||||
CACHE_KEY_PREFIX = "understanding"
|
||||
CACHE_TTL_SECONDS = 48 * 60 * 60 # 48 hours
|
||||
|
||||
|
||||
def _cache_key(user_id: str) -> str:
|
||||
"""Generate cache key for user business understanding."""
|
||||
return f"{CACHE_KEY_PREFIX}:{user_id}"
|
||||
|
||||
|
||||
def _json_to_list(value: Any) -> list[str]:
|
||||
"""Convert Json field to list[str], handling None."""
|
||||
if value is None:
|
||||
return []
|
||||
if isinstance(value, list):
|
||||
return cast(list[str], value)
|
||||
return []
|
||||
|
||||
|
||||
class BusinessUnderstandingInput(pydantic.BaseModel):
|
||||
"""Input model for updating business understanding - all fields optional for incremental updates."""
|
||||
|
||||
# User info
|
||||
user_name: Optional[str] = pydantic.Field(None, description="The user's name")
|
||||
job_title: Optional[str] = pydantic.Field(None, description="The user's job title")
|
||||
|
||||
# Business basics
|
||||
business_name: Optional[str] = pydantic.Field(
|
||||
None, description="Name of the user's business"
|
||||
)
|
||||
industry: Optional[str] = pydantic.Field(None, description="Industry or sector")
|
||||
business_size: Optional[str] = pydantic.Field(
|
||||
None, description="Company size (e.g., '1-10', '11-50')"
|
||||
)
|
||||
user_role: Optional[str] = pydantic.Field(
|
||||
None,
|
||||
description="User's role in the organization (e.g., 'decision maker', 'implementer')",
|
||||
)
|
||||
|
||||
# Processes & activities
|
||||
key_workflows: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Key business workflows"
|
||||
)
|
||||
daily_activities: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Daily activities performed"
|
||||
)
|
||||
|
||||
# Pain points & goals
|
||||
pain_points: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Current pain points"
|
||||
)
|
||||
bottlenecks: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Process bottlenecks"
|
||||
)
|
||||
manual_tasks: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Manual/repetitive tasks"
|
||||
)
|
||||
automation_goals: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Desired automation goals"
|
||||
)
|
||||
|
||||
# Current tools
|
||||
current_software: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Software/tools currently used"
|
||||
)
|
||||
existing_automation: Optional[list[str]] = pydantic.Field(
|
||||
None, description="Existing automations"
|
||||
)
|
||||
|
||||
# Additional context
|
||||
additional_notes: Optional[str] = pydantic.Field(
|
||||
None, description="Any additional context"
|
||||
)
|
||||
|
||||
|
||||
class BusinessUnderstanding(pydantic.BaseModel):
|
||||
"""Full business understanding model returned from database."""
|
||||
|
||||
id: str
|
||||
user_id: str
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
# User info
|
||||
user_name: Optional[str] = None
|
||||
job_title: Optional[str] = None
|
||||
|
||||
# Business basics
|
||||
business_name: Optional[str] = None
|
||||
industry: Optional[str] = None
|
||||
business_size: Optional[str] = None
|
||||
user_role: Optional[str] = None
|
||||
|
||||
# Processes & activities
|
||||
key_workflows: list[str] = pydantic.Field(default_factory=list)
|
||||
daily_activities: list[str] = pydantic.Field(default_factory=list)
|
||||
|
||||
# Pain points & goals
|
||||
pain_points: list[str] = pydantic.Field(default_factory=list)
|
||||
bottlenecks: list[str] = pydantic.Field(default_factory=list)
|
||||
manual_tasks: list[str] = pydantic.Field(default_factory=list)
|
||||
automation_goals: list[str] = pydantic.Field(default_factory=list)
|
||||
|
||||
# Current tools
|
||||
current_software: list[str] = pydantic.Field(default_factory=list)
|
||||
existing_automation: list[str] = pydantic.Field(default_factory=list)
|
||||
|
||||
# Additional context
|
||||
additional_notes: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def from_db(cls, db_record: UserBusinessUnderstanding) -> "BusinessUnderstanding":
|
||||
"""Convert database record to Pydantic model."""
|
||||
return cls(
|
||||
id=db_record.id,
|
||||
user_id=db_record.userId,
|
||||
created_at=db_record.createdAt,
|
||||
updated_at=db_record.updatedAt,
|
||||
user_name=db_record.userName,
|
||||
job_title=db_record.jobTitle,
|
||||
business_name=db_record.businessName,
|
||||
industry=db_record.industry,
|
||||
business_size=db_record.businessSize,
|
||||
user_role=db_record.userRole,
|
||||
key_workflows=_json_to_list(db_record.keyWorkflows),
|
||||
daily_activities=_json_to_list(db_record.dailyActivities),
|
||||
pain_points=_json_to_list(db_record.painPoints),
|
||||
bottlenecks=_json_to_list(db_record.bottlenecks),
|
||||
manual_tasks=_json_to_list(db_record.manualTasks),
|
||||
automation_goals=_json_to_list(db_record.automationGoals),
|
||||
current_software=_json_to_list(db_record.currentSoftware),
|
||||
existing_automation=_json_to_list(db_record.existingAutomation),
|
||||
additional_notes=db_record.additionalNotes,
|
||||
)
|
||||
|
||||
|
||||
def _merge_lists(existing: list | None, new: list | None) -> list | None:
|
||||
"""Merge two lists, removing duplicates while preserving order."""
|
||||
if new is None:
|
||||
return existing
|
||||
if existing is None:
|
||||
return new
|
||||
# Preserve order, add new items that don't exist
|
||||
merged = list(existing)
|
||||
for item in new:
|
||||
if item not in merged:
|
||||
merged.append(item)
|
||||
return merged
|
||||
|
||||
|
||||
async def _get_from_cache(user_id: str) -> Optional[BusinessUnderstanding]:
|
||||
"""Get business understanding from Redis cache."""
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
cached_data = await redis.get(_cache_key(user_id))
|
||||
if cached_data:
|
||||
return BusinessUnderstanding.model_validate_json(cached_data)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get understanding from cache: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def _set_cache(user_id: str, understanding: BusinessUnderstanding) -> None:
|
||||
"""Set business understanding in Redis cache with TTL."""
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
await redis.setex(
|
||||
_cache_key(user_id),
|
||||
CACHE_TTL_SECONDS,
|
||||
understanding.model_dump_json(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to set understanding in cache: {e}")
|
||||
|
||||
|
||||
async def _delete_cache(user_id: str) -> None:
|
||||
"""Delete business understanding from Redis cache."""
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
await redis.delete(_cache_key(user_id))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete understanding from cache: {e}")
|
||||
|
||||
|
||||
async def get_business_understanding(
|
||||
user_id: str,
|
||||
) -> Optional[BusinessUnderstanding]:
|
||||
"""Get the business understanding for a user.
|
||||
|
||||
Checks cache first, falls back to database if not cached.
|
||||
Results are cached for 48 hours.
|
||||
"""
|
||||
# Try cache first
|
||||
cached = await _get_from_cache(user_id)
|
||||
if cached:
|
||||
logger.debug(f"Business understanding cache hit for user {user_id}")
|
||||
return cached
|
||||
|
||||
# Cache miss - load from database
|
||||
logger.debug(f"Business understanding cache miss for user {user_id}")
|
||||
record = await UserBusinessUnderstanding.prisma().find_unique(
|
||||
where={"userId": user_id}
|
||||
)
|
||||
if record is None:
|
||||
return None
|
||||
|
||||
understanding = BusinessUnderstanding.from_db(record)
|
||||
|
||||
# Store in cache for next time
|
||||
await _set_cache(user_id, understanding)
|
||||
|
||||
return understanding
|
||||
|
||||
|
||||
async def upsert_business_understanding(
|
||||
user_id: str,
|
||||
data: BusinessUnderstandingInput,
|
||||
) -> BusinessUnderstanding:
|
||||
"""
|
||||
Create or update business understanding with incremental merge strategy.
|
||||
|
||||
- String fields: new value overwrites if provided (not None)
|
||||
- List fields: new items are appended to existing (deduplicated)
|
||||
"""
|
||||
# Get existing record for merge
|
||||
existing = await UserBusinessUnderstanding.prisma().find_unique(
|
||||
where={"userId": user_id}
|
||||
)
|
||||
|
||||
# Build update data with merge strategy
|
||||
update_data: UserBusinessUnderstandingUpdateInput = {}
|
||||
create_data: dict[str, Any] = {"userId": user_id}
|
||||
|
||||
# String fields - overwrite if provided
|
||||
if data.user_name is not None:
|
||||
update_data["userName"] = data.user_name
|
||||
create_data["userName"] = data.user_name
|
||||
if data.job_title is not None:
|
||||
update_data["jobTitle"] = data.job_title
|
||||
create_data["jobTitle"] = data.job_title
|
||||
if data.business_name is not None:
|
||||
update_data["businessName"] = data.business_name
|
||||
create_data["businessName"] = data.business_name
|
||||
if data.industry is not None:
|
||||
update_data["industry"] = data.industry
|
||||
create_data["industry"] = data.industry
|
||||
if data.business_size is not None:
|
||||
update_data["businessSize"] = data.business_size
|
||||
create_data["businessSize"] = data.business_size
|
||||
if data.user_role is not None:
|
||||
update_data["userRole"] = data.user_role
|
||||
create_data["userRole"] = data.user_role
|
||||
if data.additional_notes is not None:
|
||||
update_data["additionalNotes"] = data.additional_notes
|
||||
create_data["additionalNotes"] = data.additional_notes
|
||||
|
||||
# List fields - merge with existing
|
||||
if data.key_workflows is not None:
|
||||
existing_list = _json_to_list(existing.keyWorkflows) if existing else None
|
||||
merged = _merge_lists(existing_list, data.key_workflows)
|
||||
update_data["keyWorkflows"] = SafeJson(merged)
|
||||
create_data["keyWorkflows"] = SafeJson(merged)
|
||||
|
||||
if data.daily_activities is not None:
|
||||
existing_list = _json_to_list(existing.dailyActivities) if existing else None
|
||||
merged = _merge_lists(existing_list, data.daily_activities)
|
||||
update_data["dailyActivities"] = SafeJson(merged)
|
||||
create_data["dailyActivities"] = SafeJson(merged)
|
||||
|
||||
if data.pain_points is not None:
|
||||
existing_list = _json_to_list(existing.painPoints) if existing else None
|
||||
merged = _merge_lists(existing_list, data.pain_points)
|
||||
update_data["painPoints"] = SafeJson(merged)
|
||||
create_data["painPoints"] = SafeJson(merged)
|
||||
|
||||
if data.bottlenecks is not None:
|
||||
existing_list = _json_to_list(existing.bottlenecks) if existing else None
|
||||
merged = _merge_lists(existing_list, data.bottlenecks)
|
||||
update_data["bottlenecks"] = SafeJson(merged)
|
||||
create_data["bottlenecks"] = SafeJson(merged)
|
||||
|
||||
if data.manual_tasks is not None:
|
||||
existing_list = _json_to_list(existing.manualTasks) if existing else None
|
||||
merged = _merge_lists(existing_list, data.manual_tasks)
|
||||
update_data["manualTasks"] = SafeJson(merged)
|
||||
create_data["manualTasks"] = SafeJson(merged)
|
||||
|
||||
if data.automation_goals is not None:
|
||||
existing_list = _json_to_list(existing.automationGoals) if existing else None
|
||||
merged = _merge_lists(existing_list, data.automation_goals)
|
||||
update_data["automationGoals"] = SafeJson(merged)
|
||||
create_data["automationGoals"] = SafeJson(merged)
|
||||
|
||||
if data.current_software is not None:
|
||||
existing_list = _json_to_list(existing.currentSoftware) if existing else None
|
||||
merged = _merge_lists(existing_list, data.current_software)
|
||||
update_data["currentSoftware"] = SafeJson(merged)
|
||||
create_data["currentSoftware"] = SafeJson(merged)
|
||||
|
||||
if data.existing_automation is not None:
|
||||
existing_list = _json_to_list(existing.existingAutomation) if existing else None
|
||||
merged = _merge_lists(existing_list, data.existing_automation)
|
||||
update_data["existingAutomation"] = SafeJson(merged)
|
||||
create_data["existingAutomation"] = SafeJson(merged)
|
||||
|
||||
# Upsert
|
||||
record = await UserBusinessUnderstanding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"create": UserBusinessUnderstandingCreateInput(**create_data),
|
||||
"update": update_data,
|
||||
},
|
||||
)
|
||||
|
||||
understanding = BusinessUnderstanding.from_db(record)
|
||||
|
||||
# Update cache with new understanding
|
||||
await _set_cache(user_id, understanding)
|
||||
|
||||
return understanding
|
||||
|
||||
|
||||
async def clear_business_understanding(user_id: str) -> bool:
|
||||
"""Clear/delete business understanding for a user from both DB and cache."""
|
||||
# Delete from cache first
|
||||
await _delete_cache(user_id)
|
||||
|
||||
try:
|
||||
await UserBusinessUnderstanding.prisma().delete(where={"userId": user_id})
|
||||
return True
|
||||
except Exception:
|
||||
# Record might not exist
|
||||
return False
|
||||
|
||||
|
||||
def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str:
|
||||
"""Format business understanding as text for system prompt injection."""
|
||||
sections = []
|
||||
|
||||
# User info section
|
||||
user_info = []
|
||||
if understanding.user_name:
|
||||
user_info.append(f"Name: {understanding.user_name}")
|
||||
if understanding.job_title:
|
||||
user_info.append(f"Job Title: {understanding.job_title}")
|
||||
if user_info:
|
||||
sections.append("## User\n" + "\n".join(user_info))
|
||||
|
||||
# Business section
|
||||
business_info = []
|
||||
if understanding.business_name:
|
||||
business_info.append(f"Company: {understanding.business_name}")
|
||||
if understanding.industry:
|
||||
business_info.append(f"Industry: {understanding.industry}")
|
||||
if understanding.business_size:
|
||||
business_info.append(f"Size: {understanding.business_size}")
|
||||
if understanding.user_role:
|
||||
business_info.append(f"Role Context: {understanding.user_role}")
|
||||
if business_info:
|
||||
sections.append("## Business\n" + "\n".join(business_info))
|
||||
|
||||
# Processes section
|
||||
processes = []
|
||||
if understanding.key_workflows:
|
||||
processes.append(f"Key Workflows: {', '.join(understanding.key_workflows)}")
|
||||
if understanding.daily_activities:
|
||||
processes.append(
|
||||
f"Daily Activities: {', '.join(understanding.daily_activities)}"
|
||||
)
|
||||
if processes:
|
||||
sections.append("## Processes\n" + "\n".join(processes))
|
||||
|
||||
# Pain points section
|
||||
pain_points = []
|
||||
if understanding.pain_points:
|
||||
pain_points.append(f"Pain Points: {', '.join(understanding.pain_points)}")
|
||||
if understanding.bottlenecks:
|
||||
pain_points.append(f"Bottlenecks: {', '.join(understanding.bottlenecks)}")
|
||||
if understanding.manual_tasks:
|
||||
pain_points.append(f"Manual Tasks: {', '.join(understanding.manual_tasks)}")
|
||||
if pain_points:
|
||||
sections.append("## Pain Points\n" + "\n".join(pain_points))
|
||||
|
||||
# Goals section
|
||||
if understanding.automation_goals:
|
||||
sections.append(
|
||||
"## Automation Goals\n"
|
||||
+ "\n".join(f"- {goal}" for goal in understanding.automation_goals)
|
||||
)
|
||||
|
||||
# Current tools section
|
||||
tools_info = []
|
||||
if understanding.current_software:
|
||||
tools_info.append(
|
||||
f"Current Software: {', '.join(understanding.current_software)}"
|
||||
)
|
||||
if understanding.existing_automation:
|
||||
tools_info.append(
|
||||
f"Existing Automation: {', '.join(understanding.existing_automation)}"
|
||||
)
|
||||
if tools_info:
|
||||
sections.append("## Current Tools\n" + "\n".join(tools_info))
|
||||
|
||||
# Additional notes
|
||||
if understanding.additional_notes:
|
||||
sections.append(f"## Additional Context\n{understanding.additional_notes}")
|
||||
|
||||
if not sections:
|
||||
return ""
|
||||
|
||||
return "# User Business Context\n\n" + "\n\n".join(sections)
|
||||
@@ -114,6 +114,40 @@ utilization_gauge = Gauge(
|
||||
"Ratio of active graph runs to max graph workers",
|
||||
)
|
||||
|
||||
# Redis key prefix for tracking insufficient funds Discord notifications.
|
||||
# We only send one notification per user per agent until they top up credits.
|
||||
INSUFFICIENT_FUNDS_NOTIFIED_PREFIX = "insufficient_funds_discord_notified"
|
||||
# TTL for the notification flag (30 days) - acts as a fallback cleanup
|
||||
INSUFFICIENT_FUNDS_NOTIFIED_TTL_SECONDS = 30 * 24 * 60 * 60
|
||||
|
||||
|
||||
async def clear_insufficient_funds_notifications(user_id: str) -> int:
|
||||
"""
|
||||
Clear all insufficient funds notification flags for a user.
|
||||
|
||||
This should be called when a user tops up their credits, allowing
|
||||
Discord notifications to be sent again if they run out of funds.
|
||||
|
||||
Args:
|
||||
user_id: The user ID to clear notifications for.
|
||||
|
||||
Returns:
|
||||
The number of keys that were deleted.
|
||||
"""
|
||||
try:
|
||||
redis_client = await redis.get_redis_async()
|
||||
pattern = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
|
||||
keys = [key async for key in redis_client.scan_iter(match=pattern)]
|
||||
if keys:
|
||||
return await redis_client.delete(*keys)
|
||||
return 0
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to clear insufficient funds notification flags for user "
|
||||
f"{user_id}: {e}"
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
# Thread-local storage for ExecutionProcessor instances
|
||||
_tls = threading.local()
|
||||
@@ -1261,12 +1295,40 @@ class ExecutionProcessor:
|
||||
graph_id: str,
|
||||
e: InsufficientBalanceError,
|
||||
):
|
||||
# Check if we've already sent a notification for this user+agent combo.
|
||||
# We only send one notification per user per agent until they top up credits.
|
||||
redis_key = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id}"
|
||||
try:
|
||||
redis_client = redis.get_redis()
|
||||
# SET NX returns True only if the key was newly set (didn't exist)
|
||||
is_new_notification = redis_client.set(
|
||||
redis_key,
|
||||
"1",
|
||||
nx=True,
|
||||
ex=INSUFFICIENT_FUNDS_NOTIFIED_TTL_SECONDS,
|
||||
)
|
||||
if not is_new_notification:
|
||||
# Already notified for this user+agent, skip all notifications
|
||||
logger.debug(
|
||||
f"Skipping duplicate insufficient funds notification for "
|
||||
f"user={user_id}, graph={graph_id}"
|
||||
)
|
||||
return
|
||||
except Exception as redis_error:
|
||||
# If Redis fails, log and continue to send the notification
|
||||
# (better to occasionally duplicate than to never notify)
|
||||
logger.warning(
|
||||
f"Failed to check/set insufficient funds notification flag in Redis: "
|
||||
f"{redis_error}"
|
||||
)
|
||||
|
||||
shortfall = abs(e.amount) - e.balance
|
||||
metadata = db_client.get_graph_metadata(graph_id)
|
||||
base_url = (
|
||||
settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
)
|
||||
|
||||
# Queue user email notification
|
||||
queue_notification(
|
||||
NotificationEventModel(
|
||||
user_id=user_id,
|
||||
@@ -1280,6 +1342,7 @@ class ExecutionProcessor:
|
||||
)
|
||||
)
|
||||
|
||||
# Send Discord system alert
|
||||
try:
|
||||
user_email = db_client.get_user_email_by_id(user_id)
|
||||
|
||||
|
||||
@@ -0,0 +1,560 @@
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from prisma.enums import NotificationType
|
||||
|
||||
from backend.data.notifications import ZeroBalanceData
|
||||
from backend.executor.manager import (
|
||||
INSUFFICIENT_FUNDS_NOTIFIED_PREFIX,
|
||||
ExecutionProcessor,
|
||||
clear_insufficient_funds_notifications,
|
||||
)
|
||||
from backend.util.exceptions import InsufficientBalanceError
|
||||
from backend.util.test import SpinTestServer
|
||||
|
||||
|
||||
async def async_iter(items):
|
||||
"""Helper to create an async iterator from a list."""
|
||||
for item in items:
|
||||
yield item
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_insufficient_funds_sends_discord_alert_first_time(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that the first insufficient funds notification sends a Discord alert."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
graph_id = "test-graph-456"
|
||||
error = InsufficientBalanceError(
|
||||
message="Insufficient balance",
|
||||
user_id=user_id,
|
||||
balance=72, # $0.72
|
||||
amount=-714, # Attempting to spend $7.14
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
|
||||
# Mock Redis to simulate first-time notification (set returns True)
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis.return_value = mock_redis_client
|
||||
mock_redis_client.set.return_value = True # Key was newly set
|
||||
|
||||
# Create mock database client
|
||||
mock_db_client = MagicMock()
|
||||
mock_graph_metadata = MagicMock()
|
||||
mock_graph_metadata.name = "Test Agent"
|
||||
mock_db_client.get_graph_metadata.return_value = mock_graph_metadata
|
||||
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
|
||||
|
||||
# Test the insufficient funds handler
|
||||
execution_processor._handle_insufficient_funds_notif(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
e=error,
|
||||
)
|
||||
|
||||
# Verify notification was queued
|
||||
mock_queue_notif.assert_called_once()
|
||||
notification_call = mock_queue_notif.call_args[0][0]
|
||||
assert notification_call.type == NotificationType.ZERO_BALANCE
|
||||
assert notification_call.user_id == user_id
|
||||
assert isinstance(notification_call.data, ZeroBalanceData)
|
||||
assert notification_call.data.current_balance == 72
|
||||
|
||||
# Verify Redis was checked with correct key pattern
|
||||
expected_key = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id}"
|
||||
mock_redis_client.set.assert_called_once()
|
||||
call_args = mock_redis_client.set.call_args
|
||||
assert call_args[0][0] == expected_key
|
||||
assert call_args[1]["nx"] is True
|
||||
|
||||
# Verify Discord alert was sent
|
||||
mock_client.discord_system_alert.assert_called_once()
|
||||
discord_message = mock_client.discord_system_alert.call_args[0][0]
|
||||
assert "Insufficient Funds Alert" in discord_message
|
||||
assert "test@example.com" in discord_message
|
||||
assert "Test Agent" in discord_message
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_insufficient_funds_skips_duplicate_notifications(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that duplicate insufficient funds notifications skip both email and Discord."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
graph_id = "test-graph-456"
|
||||
error = InsufficientBalanceError(
|
||||
message="Insufficient balance",
|
||||
user_id=user_id,
|
||||
balance=72,
|
||||
amount=-714,
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Setup mocks
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
|
||||
# Mock Redis to simulate duplicate notification (set returns False/None)
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis.return_value = mock_redis_client
|
||||
mock_redis_client.set.return_value = None # Key already existed
|
||||
|
||||
# Create mock database client
|
||||
mock_db_client = MagicMock()
|
||||
mock_db_client.get_graph_metadata.return_value = MagicMock(name="Test Agent")
|
||||
|
||||
# Test the insufficient funds handler
|
||||
execution_processor._handle_insufficient_funds_notif(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
e=error,
|
||||
)
|
||||
|
||||
# Verify email notification was NOT queued (deduplication worked)
|
||||
mock_queue_notif.assert_not_called()
|
||||
|
||||
# Verify Discord alert was NOT sent (deduplication worked)
|
||||
mock_client.discord_system_alert.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_insufficient_funds_different_agents_get_separate_alerts(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that different agents for the same user get separate Discord alerts."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
graph_id_1 = "test-graph-111"
|
||||
graph_id_2 = "test-graph-222"
|
||||
|
||||
error = InsufficientBalanceError(
|
||||
message="Insufficient balance",
|
||||
user_id=user_id,
|
||||
balance=72,
|
||||
amount=-714,
|
||||
)
|
||||
|
||||
with patch("backend.executor.manager.queue_notification"), patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis.return_value = mock_redis_client
|
||||
# Both calls return True (first time for each agent)
|
||||
mock_redis_client.set.return_value = True
|
||||
|
||||
mock_db_client = MagicMock()
|
||||
mock_graph_metadata = MagicMock()
|
||||
mock_graph_metadata.name = "Test Agent"
|
||||
mock_db_client.get_graph_metadata.return_value = mock_graph_metadata
|
||||
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
|
||||
|
||||
# First agent notification
|
||||
execution_processor._handle_insufficient_funds_notif(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id_1,
|
||||
e=error,
|
||||
)
|
||||
|
||||
# Second agent notification
|
||||
execution_processor._handle_insufficient_funds_notif(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id_2,
|
||||
e=error,
|
||||
)
|
||||
|
||||
# Verify Discord alerts were sent for both agents
|
||||
assert mock_client.discord_system_alert.call_count == 2
|
||||
|
||||
# Verify Redis was called with different keys
|
||||
assert mock_redis_client.set.call_count == 2
|
||||
calls = mock_redis_client.set.call_args_list
|
||||
assert (
|
||||
calls[0][0][0]
|
||||
== f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id_1}"
|
||||
)
|
||||
assert (
|
||||
calls[1][0][0]
|
||||
== f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:{graph_id_2}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_clear_insufficient_funds_notifications(server: SpinTestServer):
|
||||
"""Test that clearing notifications removes all keys for a user."""
|
||||
|
||||
user_id = "test-user-123"
|
||||
|
||||
with patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
mock_redis_client = MagicMock()
|
||||
# get_redis_async is an async function, so we need AsyncMock for it
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
|
||||
# Mock scan_iter to return some keys as an async iterator
|
||||
mock_keys = [
|
||||
f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-1",
|
||||
f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-2",
|
||||
f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-3",
|
||||
]
|
||||
mock_redis_client.scan_iter.return_value = async_iter(mock_keys)
|
||||
# delete is awaited, so use AsyncMock
|
||||
mock_redis_client.delete = AsyncMock(return_value=3)
|
||||
|
||||
# Clear notifications
|
||||
result = await clear_insufficient_funds_notifications(user_id)
|
||||
|
||||
# Verify correct pattern was used
|
||||
expected_pattern = f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
|
||||
mock_redis_client.scan_iter.assert_called_once_with(match=expected_pattern)
|
||||
|
||||
# Verify delete was called with all keys
|
||||
mock_redis_client.delete.assert_called_once_with(*mock_keys)
|
||||
|
||||
# Verify return value
|
||||
assert result == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_clear_insufficient_funds_notifications_no_keys(server: SpinTestServer):
|
||||
"""Test clearing notifications when there are no keys to clear."""
|
||||
|
||||
user_id = "test-user-no-notifications"
|
||||
|
||||
with patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
mock_redis_client = MagicMock()
|
||||
# get_redis_async is an async function, so we need AsyncMock for it
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
|
||||
# Mock scan_iter to return no keys as an async iterator
|
||||
mock_redis_client.scan_iter.return_value = async_iter([])
|
||||
|
||||
# Clear notifications
|
||||
result = await clear_insufficient_funds_notifications(user_id)
|
||||
|
||||
# Verify delete was not called
|
||||
mock_redis_client.delete.assert_not_called()
|
||||
|
||||
# Verify return value
|
||||
assert result == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_clear_insufficient_funds_notifications_handles_redis_error(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that clearing notifications handles Redis errors gracefully."""
|
||||
|
||||
user_id = "test-user-redis-error"
|
||||
|
||||
with patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
# Mock get_redis_async to raise an error
|
||||
mock_redis_module.get_redis_async = AsyncMock(
|
||||
side_effect=Exception("Redis connection failed")
|
||||
)
|
||||
|
||||
# Clear notifications should not raise, just return 0
|
||||
result = await clear_insufficient_funds_notifications(user_id)
|
||||
|
||||
# Verify it returned 0 (graceful failure)
|
||||
assert result == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_handle_insufficient_funds_continues_on_redis_error(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that both email and Discord notifications are still sent when Redis fails."""
|
||||
|
||||
execution_processor = ExecutionProcessor()
|
||||
user_id = "test-user-123"
|
||||
graph_id = "test-graph-456"
|
||||
error = InsufficientBalanceError(
|
||||
message="Insufficient balance",
|
||||
user_id=user_id,
|
||||
balance=72,
|
||||
amount=-714,
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.executor.manager.queue_notification"
|
||||
) as mock_queue_notif, patch(
|
||||
"backend.executor.manager.get_notification_manager_client"
|
||||
) as mock_get_client, patch(
|
||||
"backend.executor.manager.settings"
|
||||
) as mock_settings, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
mock_settings.config.frontend_base_url = "https://test.com"
|
||||
|
||||
# Mock Redis to raise an error
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis.return_value = mock_redis_client
|
||||
mock_redis_client.set.side_effect = Exception("Redis connection error")
|
||||
|
||||
mock_db_client = MagicMock()
|
||||
mock_graph_metadata = MagicMock()
|
||||
mock_graph_metadata.name = "Test Agent"
|
||||
mock_db_client.get_graph_metadata.return_value = mock_graph_metadata
|
||||
mock_db_client.get_user_email_by_id.return_value = "test@example.com"
|
||||
|
||||
# Test the insufficient funds handler
|
||||
execution_processor._handle_insufficient_funds_notif(
|
||||
db_client=mock_db_client,
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
e=error,
|
||||
)
|
||||
|
||||
# Verify email notification was still queued despite Redis error
|
||||
mock_queue_notif.assert_called_once()
|
||||
|
||||
# Verify Discord alert was still sent despite Redis error
|
||||
mock_client.discord_system_alert.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_transaction_clears_notifications_on_grant(server: SpinTestServer):
|
||||
"""Test that _add_transaction clears notification flags when adding GRANT credits."""
|
||||
from prisma.enums import CreditTransactionType
|
||||
|
||||
from backend.data.credit import UserCredit
|
||||
|
||||
user_id = "test-user-grant-clear"
|
||||
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 1000, "transactionKey": "test-tx-key"}]
|
||||
|
||||
# Mock async Redis for notification clearing
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
mock_redis_client.scan_iter.return_value = async_iter(
|
||||
[f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-1"]
|
||||
)
|
||||
mock_redis_client.delete = AsyncMock(return_value=1)
|
||||
|
||||
# Create a concrete instance
|
||||
credit_model = UserCredit()
|
||||
|
||||
# Call _add_transaction with GRANT type (should clear notifications)
|
||||
await credit_model._add_transaction(
|
||||
user_id=user_id,
|
||||
amount=500, # Positive amount
|
||||
transaction_type=CreditTransactionType.GRANT,
|
||||
is_active=True, # Active transaction
|
||||
)
|
||||
|
||||
# Verify notification clearing was called
|
||||
mock_redis_module.get_redis_async.assert_called_once()
|
||||
mock_redis_client.scan_iter.assert_called_once_with(
|
||||
match=f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_transaction_clears_notifications_on_top_up(server: SpinTestServer):
|
||||
"""Test that _add_transaction clears notification flags when adding TOP_UP credits."""
|
||||
from prisma.enums import CreditTransactionType
|
||||
|
||||
from backend.data.credit import UserCredit
|
||||
|
||||
user_id = "test-user-topup-clear"
|
||||
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 2000, "transactionKey": "test-tx-key-2"}]
|
||||
|
||||
# Mock async Redis for notification clearing
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
mock_redis_client.scan_iter.return_value = async_iter([])
|
||||
mock_redis_client.delete = AsyncMock(return_value=0)
|
||||
|
||||
credit_model = UserCredit()
|
||||
|
||||
# Call _add_transaction with TOP_UP type (should clear notifications)
|
||||
await credit_model._add_transaction(
|
||||
user_id=user_id,
|
||||
amount=1000, # Positive amount
|
||||
transaction_type=CreditTransactionType.TOP_UP,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Verify notification clearing was attempted
|
||||
mock_redis_module.get_redis_async.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_transaction_skips_clearing_for_inactive_transaction(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that _add_transaction does NOT clear notifications for inactive transactions."""
|
||||
from prisma.enums import CreditTransactionType
|
||||
|
||||
from backend.data.credit import UserCredit
|
||||
|
||||
user_id = "test-user-inactive"
|
||||
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 500, "transactionKey": "test-tx-key-3"}]
|
||||
|
||||
# Mock async Redis
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
|
||||
credit_model = UserCredit()
|
||||
|
||||
# Call _add_transaction with is_active=False (should NOT clear notifications)
|
||||
await credit_model._add_transaction(
|
||||
user_id=user_id,
|
||||
amount=500,
|
||||
transaction_type=CreditTransactionType.TOP_UP,
|
||||
is_active=False, # Inactive - pending Stripe payment
|
||||
)
|
||||
|
||||
# Verify notification clearing was NOT called
|
||||
mock_redis_module.get_redis_async.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_add_transaction_skips_clearing_for_usage_transaction(
|
||||
server: SpinTestServer,
|
||||
):
|
||||
"""Test that _add_transaction does NOT clear notifications for USAGE transactions."""
|
||||
from prisma.enums import CreditTransactionType
|
||||
|
||||
from backend.data.credit import UserCredit
|
||||
|
||||
user_id = "test-user-usage"
|
||||
|
||||
with patch("backend.data.credit.query_raw_with_schema") as mock_query, patch(
|
||||
"backend.executor.manager.redis"
|
||||
) as mock_redis_module:
|
||||
|
||||
# Mock the query to return a successful transaction
|
||||
mock_query.return_value = [{"balance": 400, "transactionKey": "test-tx-key-4"}]
|
||||
|
||||
# Mock async Redis
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
|
||||
credit_model = UserCredit()
|
||||
|
||||
# Call _add_transaction with USAGE type (spending, should NOT clear)
|
||||
await credit_model._add_transaction(
|
||||
user_id=user_id,
|
||||
amount=-100, # Negative - spending credits
|
||||
transaction_type=CreditTransactionType.USAGE,
|
||||
is_active=True,
|
||||
)
|
||||
|
||||
# Verify notification clearing was NOT called
|
||||
mock_redis_module.get_redis_async.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_enable_transaction_clears_notifications(server: SpinTestServer):
|
||||
"""Test that _enable_transaction clears notification flags when enabling a TOP_UP."""
|
||||
from prisma.enums import CreditTransactionType
|
||||
|
||||
from backend.data.credit import UserCredit
|
||||
|
||||
user_id = "test-user-enable"
|
||||
|
||||
with patch("backend.data.credit.CreditTransaction") as mock_credit_tx, patch(
|
||||
"backend.data.credit.query_raw_with_schema"
|
||||
) as mock_query, patch("backend.executor.manager.redis") as mock_redis_module:
|
||||
|
||||
# Mock finding the pending transaction
|
||||
mock_transaction = MagicMock()
|
||||
mock_transaction.amount = 1000
|
||||
mock_transaction.type = CreditTransactionType.TOP_UP
|
||||
mock_credit_tx.prisma.return_value.find_first = AsyncMock(
|
||||
return_value=mock_transaction
|
||||
)
|
||||
|
||||
# Mock the query to return updated balance
|
||||
mock_query.return_value = [{"balance": 1500}]
|
||||
|
||||
# Mock async Redis for notification clearing
|
||||
mock_redis_client = MagicMock()
|
||||
mock_redis_module.get_redis_async = AsyncMock(return_value=mock_redis_client)
|
||||
mock_redis_client.scan_iter.return_value = async_iter(
|
||||
[f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:graph-1"]
|
||||
)
|
||||
mock_redis_client.delete = AsyncMock(return_value=1)
|
||||
|
||||
credit_model = UserCredit()
|
||||
|
||||
# Call _enable_transaction (simulates Stripe checkout completion)
|
||||
from backend.util.json import SafeJson
|
||||
|
||||
await credit_model._enable_transaction(
|
||||
transaction_key="cs_test_123",
|
||||
user_id=user_id,
|
||||
metadata=SafeJson({"payment": "completed"}),
|
||||
)
|
||||
|
||||
# Verify notification clearing was called
|
||||
mock_redis_module.get_redis_async.assert_called_once()
|
||||
mock_redis_client.scan_iter.assert_called_once_with(
|
||||
match=f"{INSUFFICIENT_FUNDS_NOTIFIED_PREFIX}:{user_id}:*"
|
||||
)
|
||||
@@ -0,0 +1,81 @@
|
||||
-- DropIndex
|
||||
DROP INDEX "StoreListingVersion_storeListingId_version_key";
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "UserBusinessUnderstanding" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"userId" TEXT NOT NULL,
|
||||
"userName" TEXT,
|
||||
"jobTitle" TEXT,
|
||||
"businessName" TEXT,
|
||||
"industry" TEXT,
|
||||
"businessSize" TEXT,
|
||||
"userRole" TEXT,
|
||||
"keyWorkflows" JSONB,
|
||||
"dailyActivities" JSONB,
|
||||
"painPoints" JSONB,
|
||||
"bottlenecks" JSONB,
|
||||
"manualTasks" JSONB,
|
||||
"automationGoals" JSONB,
|
||||
"currentSoftware" JSONB,
|
||||
"existingAutomation" JSONB,
|
||||
"additionalNotes" TEXT,
|
||||
|
||||
CONSTRAINT "UserBusinessUnderstanding_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "ChatSession" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"userId" TEXT,
|
||||
"title" TEXT,
|
||||
"credentials" JSONB NOT NULL DEFAULT '{}',
|
||||
"successfulAgentRuns" JSONB NOT NULL DEFAULT '{}',
|
||||
"successfulAgentSchedules" JSONB NOT NULL DEFAULT '{}',
|
||||
"totalPromptTokens" INTEGER NOT NULL DEFAULT 0,
|
||||
"totalCompletionTokens" INTEGER NOT NULL DEFAULT 0,
|
||||
|
||||
CONSTRAINT "ChatSession_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "ChatMessage" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"sessionId" TEXT NOT NULL,
|
||||
"role" TEXT NOT NULL,
|
||||
"content" TEXT,
|
||||
"name" TEXT,
|
||||
"toolCallId" TEXT,
|
||||
"refusal" TEXT,
|
||||
"toolCalls" JSONB,
|
||||
"functionCall" JSONB,
|
||||
"sequence" INTEGER NOT NULL,
|
||||
|
||||
CONSTRAINT "ChatMessage_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "UserBusinessUnderstanding_userId_key" ON "UserBusinessUnderstanding"("userId");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "UserBusinessUnderstanding_userId_idx" ON "UserBusinessUnderstanding"("userId");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "ChatSession_userId_updatedAt_idx" ON "ChatSession"("userId", "updatedAt");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "ChatMessage_sessionId_sequence_idx" ON "ChatMessage"("sessionId", "sequence");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "ChatMessage_sessionId_sequence_key" ON "ChatMessage"("sessionId", "sequence");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "UserBusinessUnderstanding" ADD CONSTRAINT "UserBusinessUnderstanding_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "ChatMessage" ADD CONSTRAINT "ChatMessage_sessionId_fkey" FOREIGN KEY ("sessionId") REFERENCES "ChatSession"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
24
autogpt_platform/backend/poetry.lock
generated
24
autogpt_platform/backend/poetry.lock
generated
@@ -1906,16 +1906,32 @@ httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
|
||||
pydantic = ">=1.10,<3"
|
||||
pyjwt = ">=2.10.1,<3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "gravitas-md2gdocs"
|
||||
version = "0.1.0"
|
||||
description = "Convert Markdown to Google Docs API requests"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "gravitas_md2gdocs-0.1.0-py3-none-any.whl", hash = "sha256:0cb0627779fdd65c1604818af4142eea1b25d055060183363de1bae4d9e46508"},
|
||||
{file = "gravitas_md2gdocs-0.1.0.tar.gz", hash = "sha256:bb3122fe9fa35c528f3f00b785d3f1398d350082d5d03f60f56c895bdcc68033"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["google-auth-oauthlib (>=1.0.0)", "pytest (>=7.0.0)", "pytest-cov (>=4.0.0)", "python-dotenv (>=1.0.0)", "ruff (>=0.1.0)"]
|
||||
google = ["google-api-python-client (>=2.0.0)", "google-auth (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "gravitasml"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "gravitasml-0.1.3-py3-none-any.whl", hash = "sha256:51ff98b4564b7a61f7796f18d5f2558b919d30b3722579296089645b7bc18b85"},
|
||||
{file = "gravitasml-0.1.3.tar.gz", hash = "sha256:04d240b9fa35878252d57a36032130b6516487468847fcdced1022c032a20f57"},
|
||||
{file = "gravitasml-0.1.4-py3-none-any.whl", hash = "sha256:671a18b11d3d8a0e270c6a80c72cd058458b18d5ef7560d00010e962ab1bca74"},
|
||||
{file = "gravitasml-0.1.4.tar.gz", hash = "sha256:35d0d9fec7431817482d53d9c976e375557c3e041d1eb6928e809324a8c866e3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -7279,4 +7295,4 @@ cffi = ["cffi (>=1.11)"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "13b191b2a1989d3321ff713c66ff6f5f4f3b82d15df4d407e0e5dbf87d7522c4"
|
||||
content-hash = "a93ba0cea3b465cb6ec3e3f258b383b09f84ea352ccfdbfa112902cde5653fc6"
|
||||
|
||||
@@ -27,7 +27,7 @@ google-api-python-client = "^2.177.0"
|
||||
google-auth-oauthlib = "^1.2.2"
|
||||
google-cloud-storage = "^3.2.0"
|
||||
googlemaps = "^4.10.0"
|
||||
gravitasml = "^0.1.3"
|
||||
gravitasml = "^0.1.4"
|
||||
groq = "^0.30.0"
|
||||
html2text = "^2024.2.26"
|
||||
jinja2 = "^3.1.6"
|
||||
@@ -82,6 +82,7 @@ firecrawl-py = "^4.3.6"
|
||||
exa-py = "^1.14.20"
|
||||
croniter = "^6.0.0"
|
||||
stagehand = "^0.5.1"
|
||||
gravitas-md2gdocs = "^0.1.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
aiohappyeyeballs = "^2.6.1"
|
||||
|
||||
@@ -53,6 +53,7 @@ model User {
|
||||
|
||||
Profile Profile[]
|
||||
UserOnboarding UserOnboarding?
|
||||
BusinessUnderstanding UserBusinessUnderstanding?
|
||||
BuilderSearchHistory BuilderSearchHistory[]
|
||||
StoreListings StoreListing[]
|
||||
StoreListingReviews StoreListingReview[]
|
||||
@@ -121,19 +122,109 @@ model UserOnboarding {
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
}
|
||||
|
||||
model UserBusinessUnderstanding {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
userId String @unique
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
// User info
|
||||
userName String?
|
||||
jobTitle String?
|
||||
|
||||
// Business basics (string columns)
|
||||
businessName String?
|
||||
industry String?
|
||||
businessSize String? // "1-10", "11-50", "51-200", "201-1000", "1000+"
|
||||
userRole String? // Role in organization context (e.g., "decision maker", "implementer")
|
||||
|
||||
// Processes & activities (JSON arrays)
|
||||
keyWorkflows Json?
|
||||
dailyActivities Json?
|
||||
|
||||
// Pain points & goals (JSON arrays)
|
||||
painPoints Json?
|
||||
bottlenecks Json?
|
||||
manualTasks Json?
|
||||
automationGoals Json?
|
||||
|
||||
// Current tools (JSON arrays)
|
||||
currentSoftware Json?
|
||||
existingAutomation Json?
|
||||
|
||||
additionalNotes String?
|
||||
|
||||
@@index([userId])
|
||||
}
|
||||
|
||||
model BuilderSearchHistory {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
searchQuery String
|
||||
filter String[] @default([])
|
||||
byCreator String[] @default([])
|
||||
filter String[] @default([])
|
||||
byCreator String[] @default([])
|
||||
|
||||
userId String
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
//////////////// CHAT SESSION TABLES ///////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
model ChatSession {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
userId String?
|
||||
|
||||
// Session metadata
|
||||
title String?
|
||||
credentials Json @default("{}") // Map of provider -> credential metadata
|
||||
|
||||
// Rate limiting counters (stored as JSON maps)
|
||||
successfulAgentRuns Json @default("{}") // Map of graph_id -> count
|
||||
successfulAgentSchedules Json @default("{}") // Map of graph_id -> count
|
||||
|
||||
// Usage tracking
|
||||
totalPromptTokens Int @default(0)
|
||||
totalCompletionTokens Int @default(0)
|
||||
|
||||
Messages ChatMessage[]
|
||||
|
||||
@@index([userId, updatedAt])
|
||||
}
|
||||
|
||||
model ChatMessage {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
sessionId String
|
||||
Session ChatSession @relation(fields: [sessionId], references: [id], onDelete: Cascade)
|
||||
|
||||
// Message content
|
||||
role String // "user", "assistant", "system", "tool", "function"
|
||||
content String?
|
||||
name String?
|
||||
toolCallId String?
|
||||
refusal String?
|
||||
toolCalls Json? // List of tool calls for assistant messages
|
||||
functionCall Json? // Deprecated but kept for compatibility
|
||||
|
||||
// Ordering within session
|
||||
sequence Int
|
||||
|
||||
@@unique([sessionId, sequence])
|
||||
@@index([sessionId, sequence])
|
||||
}
|
||||
|
||||
// This model describes the Agent Graph/Flow (Multi Agent System).
|
||||
model AgentGraph {
|
||||
id String @default(uuid())
|
||||
@@ -721,26 +812,26 @@ view StoreAgent {
|
||||
storeListingVersionId String
|
||||
updated_at DateTime
|
||||
|
||||
slug String
|
||||
agent_name String
|
||||
agent_video String?
|
||||
agent_output_demo String?
|
||||
agent_image String[]
|
||||
slug String
|
||||
agent_name String
|
||||
agent_video String?
|
||||
agent_output_demo String?
|
||||
agent_image String[]
|
||||
|
||||
featured Boolean @default(false)
|
||||
creator_username String?
|
||||
creator_avatar String?
|
||||
sub_heading String
|
||||
description String
|
||||
categories String[]
|
||||
search Unsupported("tsvector")? @default(dbgenerated("''::tsvector"))
|
||||
runs Int
|
||||
rating Float
|
||||
versions String[]
|
||||
agentGraphVersions String[]
|
||||
agentGraphId String
|
||||
is_available Boolean @default(true)
|
||||
useForOnboarding Boolean @default(false)
|
||||
featured Boolean @default(false)
|
||||
creator_username String?
|
||||
creator_avatar String?
|
||||
sub_heading String
|
||||
description String
|
||||
categories String[]
|
||||
search Unsupported("tsvector")? @default(dbgenerated("''::tsvector"))
|
||||
runs Int
|
||||
rating Float
|
||||
versions String[]
|
||||
agentGraphVersions String[]
|
||||
agentGraphId String
|
||||
is_available Boolean @default(true)
|
||||
useForOnboarding Boolean @default(false)
|
||||
|
||||
// Materialized views used (refreshed every 15 minutes via pg_cron):
|
||||
// - mv_agent_run_counts - Pre-aggregated agent execution counts by agentGraphId
|
||||
@@ -856,14 +947,14 @@ model StoreListingVersion {
|
||||
AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version])
|
||||
|
||||
// Content fields
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
agentOutputDemoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
name String
|
||||
subHeading String
|
||||
videoUrl String?
|
||||
agentOutputDemoUrl String?
|
||||
imageUrls String[]
|
||||
description String
|
||||
instructions String?
|
||||
categories String[]
|
||||
|
||||
isFeatured Boolean @default(false)
|
||||
|
||||
@@ -899,7 +990,6 @@ model StoreListingVersion {
|
||||
// Reviews for this specific version
|
||||
Reviews StoreListingReview[]
|
||||
|
||||
@@unique([storeListingId, version])
|
||||
@@index([storeListingId, submissionStatus, isAvailable])
|
||||
@@index([submissionStatus])
|
||||
@@index([reviewerId])
|
||||
@@ -998,16 +1088,16 @@ model OAuthApplication {
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
// Application metadata
|
||||
name String
|
||||
description String?
|
||||
logoUrl String? // URL to app logo stored in GCS
|
||||
clientId String @unique
|
||||
clientSecret String // Hashed with Scrypt (same as API keys)
|
||||
clientSecretSalt String // Salt for Scrypt hashing
|
||||
name String
|
||||
description String?
|
||||
logoUrl String? // URL to app logo stored in GCS
|
||||
clientId String @unique
|
||||
clientSecret String // Hashed with Scrypt (same as API keys)
|
||||
clientSecretSalt String // Salt for Scrypt hashing
|
||||
|
||||
// OAuth configuration
|
||||
redirectUris String[] // Allowed callback URLs
|
||||
grantTypes String[] @default(["authorization_code", "refresh_token"])
|
||||
grantTypes String[] @default(["authorization_code", "refresh_token"])
|
||||
scopes APIKeyPermission[] // Which permissions the app can request
|
||||
|
||||
// Application management
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
from backend.blocks.google.docs import GoogleDocsFormatTextBlock
|
||||
|
||||
|
||||
def _make_mock_docs_service() -> Mock:
|
||||
service = Mock()
|
||||
# Ensure chained call exists: service.documents().batchUpdate(...).execute()
|
||||
service.documents.return_value.batchUpdate.return_value.execute.return_value = {}
|
||||
return service
|
||||
|
||||
|
||||
def test_format_text_parses_shorthand_hex_color():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=False,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#FFF",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
# Verify request body contains correct rgbColor for white.
|
||||
_, kwargs = service.documents.return_value.batchUpdate.call_args
|
||||
requests = kwargs["body"]["requests"]
|
||||
rgb = requests[0]["updateTextStyle"]["textStyle"]["foregroundColor"]["color"][
|
||||
"rgbColor"
|
||||
]
|
||||
assert rgb == {"red": 1.0, "green": 1.0, "blue": 1.0}
|
||||
|
||||
|
||||
def test_format_text_parses_full_hex_color():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=False,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#FF0000",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
_, kwargs = service.documents.return_value.batchUpdate.call_args
|
||||
requests = kwargs["body"]["requests"]
|
||||
rgb = requests[0]["updateTextStyle"]["textStyle"]["foregroundColor"]["color"][
|
||||
"rgbColor"
|
||||
]
|
||||
assert rgb == {"red": 1.0, "green": 0.0, "blue": 0.0}
|
||||
|
||||
|
||||
def test_format_text_ignores_invalid_color_when_other_fields_present():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=True,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#GGG",
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert "warning" in result
|
||||
|
||||
# Should still apply bold, but should NOT include foregroundColor in textStyle.
|
||||
_, kwargs = service.documents.return_value.batchUpdate.call_args
|
||||
requests = kwargs["body"]["requests"]
|
||||
text_style = requests[0]["updateTextStyle"]["textStyle"]
|
||||
fields = requests[0]["updateTextStyle"]["fields"]
|
||||
|
||||
assert text_style == {"bold": True}
|
||||
assert fields == "bold"
|
||||
|
||||
|
||||
def test_format_text_invalid_color_only_does_not_call_api():
|
||||
block = GoogleDocsFormatTextBlock()
|
||||
service = _make_mock_docs_service()
|
||||
|
||||
result = block._format_text(
|
||||
service,
|
||||
document_id="doc_1",
|
||||
start_index=1,
|
||||
end_index=2,
|
||||
bold=False,
|
||||
italic=False,
|
||||
underline=False,
|
||||
font_size=0,
|
||||
foreground_color="#F",
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "Invalid foreground_color" in result["message"]
|
||||
service.documents.return_value.batchUpdate.assert_not_called()
|
||||
@@ -37,6 +37,18 @@ class TestTranscribeYoutubeVideoBlock:
|
||||
video_id = self.youtube_block.extract_video_id(url)
|
||||
assert video_id == "dQw4w9WgXcQ"
|
||||
|
||||
def test_extract_video_id_shorts_url(self):
|
||||
"""Test extracting video ID from YouTube Shorts URL."""
|
||||
url = "https://www.youtube.com/shorts/dtUqwMu3e-g"
|
||||
video_id = self.youtube_block.extract_video_id(url)
|
||||
assert video_id == "dtUqwMu3e-g"
|
||||
|
||||
def test_extract_video_id_shorts_url_with_params(self):
|
||||
"""Test extracting video ID from YouTube Shorts URL with query parameters."""
|
||||
url = "https://www.youtube.com/shorts/dtUqwMu3e-g?feature=share"
|
||||
video_id = self.youtube_block.extract_video_id(url)
|
||||
assert video_id == "dtUqwMu3e-g"
|
||||
|
||||
@patch("backend.blocks.youtube.YouTubeTranscriptApi")
|
||||
def test_get_transcript_english_available(self, mock_api_class):
|
||||
"""Test getting transcript when English is available."""
|
||||
|
||||
146
autogpt_platform/cloudflare_worker.js
Normal file
146
autogpt_platform/cloudflare_worker.js
Normal file
@@ -0,0 +1,146 @@
|
||||
/**
|
||||
* Cloudflare Workers Script for docs.agpt.co → agpt.co/docs migration
|
||||
*
|
||||
* Deploy this script to handle all redirects with a single JavaScript file.
|
||||
* No rule limits, easy to maintain, handles all edge cases.
|
||||
*/
|
||||
|
||||
// URL mapping for special cases that don't follow patterns
|
||||
const SPECIAL_MAPPINGS = {
|
||||
// Root page
|
||||
'/': '/docs/platform',
|
||||
|
||||
// Special cases that don't follow standard patterns
|
||||
'/platform/d_id/': '/docs/integrations/block-integrations/d-id',
|
||||
'/platform/blocks/blocks/': '/docs/integrations',
|
||||
'/platform/blocks/decoder_block/': '/docs/integrations/block-integrations/text-decoder',
|
||||
'/platform/blocks/http': '/docs/integrations/block-integrations/send-web-request',
|
||||
'/platform/blocks/llm/': '/docs/integrations/block-integrations/ai-and-llm',
|
||||
'/platform/blocks/time_blocks': '/docs/integrations/block-integrations/time-and-date',
|
||||
'/platform/blocks/text_to_speech_block': '/docs/integrations/block-integrations/text-to-speech',
|
||||
'/platform/blocks/ai_shortform_video_block': '/docs/integrations/block-integrations/ai-shortform-video',
|
||||
'/platform/blocks/replicate_flux_advanced': '/docs/integrations/block-integrations/replicate-flux-advanced',
|
||||
'/platform/blocks/flux_kontext': '/docs/integrations/block-integrations/flux-kontext',
|
||||
'/platform/blocks/ai_condition/': '/docs/integrations/block-integrations/ai-condition',
|
||||
'/platform/blocks/email_block': '/docs/integrations/block-integrations/email',
|
||||
'/platform/blocks/google_maps': '/docs/integrations/block-integrations/google-maps',
|
||||
'/platform/blocks/google/gmail': '/docs/integrations/block-integrations/gmail',
|
||||
'/platform/blocks/github/issues/': '/docs/integrations/block-integrations/github-issues',
|
||||
'/platform/blocks/github/repo/': '/docs/integrations/block-integrations/github-repo',
|
||||
'/platform/blocks/github/pull_requests': '/docs/integrations/block-integrations/github-pull-requests',
|
||||
'/platform/blocks/twitter/twitter': '/docs/integrations/block-integrations/twitter',
|
||||
'/classic/setup/': '/docs/classic/setup/setting-up-autogpt-classic',
|
||||
'/code-of-conduct/': '/docs/classic/help-us-improve-autogpt/code-of-conduct',
|
||||
'/contributing/': '/docs/classic/contributing',
|
||||
'/contribute/': '/docs/contribute',
|
||||
'/forge/components/introduction/': '/docs/classic/forge/introduction'
|
||||
};
|
||||
|
||||
/**
|
||||
* Transform path by replacing underscores with hyphens and removing trailing slashes
|
||||
*/
|
||||
function transformPath(path) {
|
||||
return path.replace(/_/g, '-').replace(/\/$/, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle docs.agpt.co redirects
|
||||
*/
|
||||
function handleDocsRedirect(url) {
|
||||
const pathname = url.pathname;
|
||||
|
||||
// Check special mappings first
|
||||
if (SPECIAL_MAPPINGS[pathname]) {
|
||||
return `https://agpt.co${SPECIAL_MAPPINGS[pathname]}`;
|
||||
}
|
||||
|
||||
// Pattern-based redirects
|
||||
|
||||
// Platform blocks: /platform/blocks/* → /docs/integrations/block-integrations/*
|
||||
if (pathname.startsWith('/platform/blocks/')) {
|
||||
const blockName = pathname.substring('/platform/blocks/'.length);
|
||||
const transformedName = transformPath(blockName);
|
||||
return `https://agpt.co/docs/integrations/block-integrations/${transformedName}`;
|
||||
}
|
||||
|
||||
// Platform contributing: /platform/contributing/* → /docs/platform/contributing/*
|
||||
if (pathname.startsWith('/platform/contributing/')) {
|
||||
const subPath = pathname.substring('/platform/contributing/'.length);
|
||||
return `https://agpt.co/docs/platform/contributing/${subPath}`;
|
||||
}
|
||||
|
||||
// Platform general: /platform/* → /docs/platform/* (with underscore→hyphen)
|
||||
if (pathname.startsWith('/platform/')) {
|
||||
const subPath = pathname.substring('/platform/'.length);
|
||||
const transformedPath = transformPath(subPath);
|
||||
return `https://agpt.co/docs/platform/${transformedPath}`;
|
||||
}
|
||||
|
||||
// Forge components: /forge/components/* → /docs/classic/forge/introduction/*
|
||||
if (pathname.startsWith('/forge/components/')) {
|
||||
const subPath = pathname.substring('/forge/components/'.length);
|
||||
return `https://agpt.co/docs/classic/forge/introduction/${subPath}`;
|
||||
}
|
||||
|
||||
// Forge general: /forge/* → /docs/classic/forge/*
|
||||
if (pathname.startsWith('/forge/')) {
|
||||
const subPath = pathname.substring('/forge/'.length);
|
||||
return `https://agpt.co/docs/classic/forge/${subPath}`;
|
||||
}
|
||||
|
||||
// Classic: /classic/* → /docs/classic/*
|
||||
if (pathname.startsWith('/classic/')) {
|
||||
const subPath = pathname.substring('/classic/'.length);
|
||||
return `https://agpt.co/docs/classic/${subPath}`;
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return 'https://agpt.co/docs/';
|
||||
}
|
||||
|
||||
/**
|
||||
* Main Worker function
|
||||
*/
|
||||
export default {
|
||||
async fetch(request, env, ctx) {
|
||||
const url = new URL(request.url);
|
||||
|
||||
// Only handle docs.agpt.co requests
|
||||
if (url.hostname === 'docs.agpt.co') {
|
||||
const redirectUrl = handleDocsRedirect(url);
|
||||
|
||||
return new Response(null, {
|
||||
status: 301,
|
||||
headers: {
|
||||
'Location': redirectUrl,
|
||||
'Cache-Control': 'max-age=300' // Cache redirects for 5 minutes
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// For non-docs requests, pass through or return 404
|
||||
return new Response('Not Found', { status: 404 });
|
||||
}
|
||||
};
|
||||
|
||||
// Test function for local development
|
||||
export function testRedirects() {
|
||||
const testCases = [
|
||||
'https://docs.agpt.co/',
|
||||
'https://docs.agpt.co/platform/getting-started/',
|
||||
'https://docs.agpt.co/platform/advanced_setup/',
|
||||
'https://docs.agpt.co/platform/blocks/basic/',
|
||||
'https://docs.agpt.co/platform/blocks/ai_condition/',
|
||||
'https://docs.agpt.co/classic/setup/',
|
||||
'https://docs.agpt.co/forge/components/agents/',
|
||||
'https://docs.agpt.co/contributing/',
|
||||
'https://docs.agpt.co/unknown-page'
|
||||
];
|
||||
|
||||
console.log('Testing redirects:');
|
||||
testCases.forEach(testUrl => {
|
||||
const url = new URL(testUrl);
|
||||
const result = handleDocsRedirect(url);
|
||||
console.log(`${testUrl} → ${result}`);
|
||||
});
|
||||
}
|
||||
@@ -46,14 +46,15 @@
|
||||
"@radix-ui/react-scroll-area": "1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slider": "1.3.6",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "1.2.8",
|
||||
"@rjsf/core": "5.24.13",
|
||||
"@rjsf/utils": "5.24.13",
|
||||
"@rjsf/validator-ajv8": "5.24.13",
|
||||
"@rjsf/core": "6.1.2",
|
||||
"@rjsf/utils": "6.1.2",
|
||||
"@rjsf/validator-ajv8": "6.1.2",
|
||||
"@sentry/nextjs": "10.27.0",
|
||||
"@supabase/ssr": "0.7.0",
|
||||
"@supabase/supabase-js": "2.78.0",
|
||||
@@ -69,6 +70,7 @@
|
||||
"cmdk": "1.1.1",
|
||||
"cookie": "1.0.2",
|
||||
"date-fns": "4.1.0",
|
||||
"dexie": "4.2.1",
|
||||
"dotenv": "17.2.3",
|
||||
"elliptic": "6.6.1",
|
||||
"embla-carousel-react": "8.6.0",
|
||||
|
||||
3814
autogpt_platform/frontend/pnpm-lock.yaml
generated
3814
autogpt_platform/frontend/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
import { OAuthPopupResultMessage } from "@/components/renderers/input-renderer/fields/CredentialField/models/OAuthCredentialModal/useOAuthCredentialModal";
|
||||
import { OAuthPopupResultMessage } from "./types";
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
// This route is intended to be used as the callback for integration OAuth flows,
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & (
|
||||
| {
|
||||
success: true;
|
||||
code: string;
|
||||
state: string;
|
||||
}
|
||||
| {
|
||||
success: false;
|
||||
message: string;
|
||||
}
|
||||
);
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
SheetTitle,
|
||||
SheetTrigger,
|
||||
} from "@/components/__legacy__/ui/sheet";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
@@ -25,7 +26,6 @@ import {
|
||||
import { BookOpenIcon } from "@phosphor-icons/react";
|
||||
import { useMemo } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { BuilderActionButton } from "../BuilderActionButton";
|
||||
|
||||
export const AgentOutputs = ({ flowID }: { flowID: string | null }) => {
|
||||
const hasOutputs = useGraphStore(useShallow((state) => state.hasOutputs));
|
||||
@@ -76,9 +76,13 @@ export const AgentOutputs = ({ flowID }: { flowID: string | null }) => {
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<SheetTrigger asChild>
|
||||
<BuilderActionButton disabled={!flowID || !hasOutputs()}>
|
||||
<BookOpenIcon className="size-6" />
|
||||
</BuilderActionButton>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
disabled={!flowID || !hasOutputs()}
|
||||
>
|
||||
<BookOpenIcon className="size-4" />
|
||||
</Button>
|
||||
</SheetTrigger>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { ButtonProps } from "@/components/atoms/Button/helpers";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CircleNotchIcon } from "@phosphor-icons/react";
|
||||
|
||||
export const BuilderActionButton = ({
|
||||
children,
|
||||
className,
|
||||
isLoading,
|
||||
...props
|
||||
}: ButtonProps & { isLoading?: boolean }) => {
|
||||
return (
|
||||
<Button
|
||||
variant="icon"
|
||||
size={"small"}
|
||||
className={cn(
|
||||
"relative h-12 w-12 min-w-0 text-lg",
|
||||
"bg-gradient-to-br from-zinc-50 to-zinc-200",
|
||||
"border border-zinc-200",
|
||||
"shadow-[inset_0_3px_0_0_rgba(255,255,255,0.5),0_2px_4px_0_rgba(0,0,0,0.2)]",
|
||||
"dark:shadow-[inset_0_1px_0_0_rgba(255,255,255,0.1),0_2px_4px_0_rgba(0,0,0,0.4)]",
|
||||
"hover:shadow-[inset_0_1px_0_0_rgba(255,255,255,0.5),0_1px_2px_0_rgba(0,0,0,0.2)]",
|
||||
"active:shadow-[inset_0_2px_4px_0_rgba(0,0,0,0.2)]",
|
||||
"transition-all duration-150",
|
||||
"disabled:cursor-not-allowed disabled:opacity-50",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{!isLoading ? (
|
||||
children
|
||||
) : (
|
||||
<CircleNotchIcon className="size-6 animate-spin" />
|
||||
)}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
@@ -1,12 +1,12 @@
|
||||
import { ShareIcon } from "@phosphor-icons/react";
|
||||
import { BuilderActionButton } from "../BuilderActionButton";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { usePublishToMarketplace } from "./usePublishToMarketplace";
|
||||
import { PublishAgentModal } from "@/components/contextual/PublishAgentModal/PublishAgentModal";
|
||||
import { ShareIcon } from "@phosphor-icons/react";
|
||||
import { usePublishToMarketplace } from "./usePublishToMarketplace";
|
||||
|
||||
export const PublishToMarketplace = ({ flowID }: { flowID: string | null }) => {
|
||||
const { handlePublishToMarketplace, publishState, handleStateChange } =
|
||||
@@ -16,12 +16,14 @@ export const PublishToMarketplace = ({ flowID }: { flowID: string | null }) => {
|
||||
<>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<BuilderActionButton
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={handlePublishToMarketplace}
|
||||
disabled={!flowID}
|
||||
>
|
||||
<ShareIcon className="size-6 drop-shadow-sm" />
|
||||
</BuilderActionButton>
|
||||
<ShareIcon className="size-4" />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Publish to Marketplace</TooltipContent>
|
||||
</Tooltip>
|
||||
@@ -30,6 +32,7 @@ export const PublishToMarketplace = ({ flowID }: { flowID: string | null }) => {
|
||||
targetState={publishState}
|
||||
onStateChange={handleStateChange}
|
||||
preSelectedAgentId={flowID || undefined}
|
||||
showTrigger={false}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import { useRunGraph } from "./useRunGraph";
|
||||
import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { PlayIcon, StopIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { BuilderActionButton } from "../BuilderActionButton";
|
||||
import { PlayIcon, StopIcon } from "@phosphor-icons/react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
||||
import { useRunGraph } from "./useRunGraph";
|
||||
|
||||
export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
||||
const {
|
||||
@@ -29,21 +28,19 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
|
||||
<>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<BuilderActionButton
|
||||
className={cn(
|
||||
isGraphRunning &&
|
||||
"border-red-500 bg-gradient-to-br from-red-400 to-red-500 shadow-[inset_0_2px_0_0_rgba(255,255,255,0.5),0_2px_4px_0_rgba(0,0,0,0.2)]",
|
||||
)}
|
||||
<Button
|
||||
size="icon"
|
||||
variant={isGraphRunning ? "destructive" : "primary"}
|
||||
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
|
||||
disabled={!flowID || isExecutingGraph || isTerminatingGraph}
|
||||
isLoading={isExecutingGraph || isTerminatingGraph || isSaving}
|
||||
loading={isExecutingGraph || isTerminatingGraph || isSaving}
|
||||
>
|
||||
{!isGraphRunning ? (
|
||||
<PlayIcon className="size-6 drop-shadow-sm" />
|
||||
<PlayIcon className="size-4" />
|
||||
) : (
|
||||
<StopIcon className="size-6 drop-shadow-sm" />
|
||||
<StopIcon className="size-4" />
|
||||
)}
|
||||
</BuilderActionButton>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
{isGraphRunning ? "Stop agent" : "Run agent"}
|
||||
|
||||
@@ -5,7 +5,7 @@ import { useGraphStore } from "@/app/(platform)/build/stores/graphStore";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { ClockIcon, PlayIcon } from "@phosphor-icons/react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { FormRenderer } from "@/components/renderers/input-renderer/FormRenderer";
|
||||
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
|
||||
import { useRunInputDialog } from "./useRunInputDialog";
|
||||
import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog";
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
|
||||
import { useMemo, useState } from "react";
|
||||
import { uiSchema } from "../../../FlowEditor/nodes/uiSchema";
|
||||
import { isCredentialFieldSchema } from "@/components/renderers/input-renderer/fields/CredentialField/helpers";
|
||||
import { isCredentialFieldSchema } from "@/components/renderers/InputRenderer/custom/CredentialField/helpers";
|
||||
|
||||
export const useRunInputDialog = ({
|
||||
setIsOpen,
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import { ClockIcon } from "@phosphor-icons/react";
|
||||
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
||||
import { useScheduleGraph } from "./useScheduleGraph";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { ClockIcon } from "@phosphor-icons/react";
|
||||
import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog";
|
||||
import { BuilderActionButton } from "../BuilderActionButton";
|
||||
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
|
||||
import { useScheduleGraph } from "./useScheduleGraph";
|
||||
|
||||
export const ScheduleGraph = ({ flowID }: { flowID: string | null }) => {
|
||||
const {
|
||||
@@ -23,12 +23,14 @@ export const ScheduleGraph = ({ flowID }: { flowID: string | null }) => {
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<BuilderActionButton
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon"
|
||||
onClick={handleScheduleGraph}
|
||||
disabled={!flowID}
|
||||
>
|
||||
<ClockIcon className="size-6" />
|
||||
</BuilderActionButton>
|
||||
<ClockIcon className="size-4" />
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Schedule Graph</p>
|
||||
|
||||
@@ -0,0 +1,160 @@
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { ClockCounterClockwiseIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { formatTimeAgo } from "@/lib/utils/time";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { useDraftRecoveryPopup } from "./useDraftRecoveryPopup";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { DraftDiff } from "@/lib/dexie/draft-utils";
|
||||
|
||||
interface DraftRecoveryPopupProps {
|
||||
isInitialLoadComplete: boolean;
|
||||
}
|
||||
|
||||
function formatDiffSummary(diff: DraftDiff | null): string {
|
||||
if (!diff) return "";
|
||||
|
||||
const parts: string[] = [];
|
||||
|
||||
// Node changes
|
||||
const nodeChanges: string[] = [];
|
||||
if (diff.nodes.added > 0) nodeChanges.push(`+${diff.nodes.added}`);
|
||||
if (diff.nodes.removed > 0) nodeChanges.push(`-${diff.nodes.removed}`);
|
||||
if (diff.nodes.modified > 0) nodeChanges.push(`~${diff.nodes.modified}`);
|
||||
|
||||
if (nodeChanges.length > 0) {
|
||||
parts.push(
|
||||
`${nodeChanges.join("/")} block${diff.nodes.added + diff.nodes.removed + diff.nodes.modified !== 1 ? "s" : ""}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Edge changes
|
||||
const edgeChanges: string[] = [];
|
||||
if (diff.edges.added > 0) edgeChanges.push(`+${diff.edges.added}`);
|
||||
if (diff.edges.removed > 0) edgeChanges.push(`-${diff.edges.removed}`);
|
||||
if (diff.edges.modified > 0) edgeChanges.push(`~${diff.edges.modified}`);
|
||||
|
||||
if (edgeChanges.length > 0) {
|
||||
parts.push(
|
||||
`${edgeChanges.join("/")} connection${diff.edges.added + diff.edges.removed + diff.edges.modified !== 1 ? "s" : ""}`,
|
||||
);
|
||||
}
|
||||
|
||||
return parts.join(", ");
|
||||
}
|
||||
|
||||
export function DraftRecoveryPopup({
|
||||
isInitialLoadComplete,
|
||||
}: DraftRecoveryPopupProps) {
|
||||
const {
|
||||
isOpen,
|
||||
popupRef,
|
||||
nodeCount,
|
||||
edgeCount,
|
||||
diff,
|
||||
savedAt,
|
||||
onLoad,
|
||||
onDiscard,
|
||||
} = useDraftRecoveryPopup(isInitialLoadComplete);
|
||||
|
||||
const diffSummary = formatDiffSummary(diff);
|
||||
|
||||
return (
|
||||
<AnimatePresence>
|
||||
{isOpen && (
|
||||
<motion.div
|
||||
ref={popupRef}
|
||||
className={cn("absolute left-1/2 top-4 z-50")}
|
||||
initial={{
|
||||
opacity: 0,
|
||||
x: "-50%",
|
||||
y: "-150%",
|
||||
scale: 0.5,
|
||||
filter: "blur(20px)",
|
||||
}}
|
||||
animate={{
|
||||
opacity: 1,
|
||||
x: "-50%",
|
||||
y: "0%",
|
||||
scale: 1,
|
||||
filter: "blur(0px)",
|
||||
}}
|
||||
exit={{
|
||||
opacity: 0,
|
||||
y: "-150%",
|
||||
scale: 0.5,
|
||||
filter: "blur(20px)",
|
||||
transition: { duration: 0.4, type: "spring", bounce: 0.2 },
|
||||
}}
|
||||
transition={{ duration: 0.2, type: "spring", bounce: 0.2 }}
|
||||
>
|
||||
<div
|
||||
className={cn(
|
||||
"flex items-center gap-3 rounded-xlarge border border-amber-200 bg-amber-50 px-4 py-3 shadow-lg",
|
||||
)}
|
||||
>
|
||||
<div className="flex items-center gap-2 text-amber-700 dark:text-amber-300">
|
||||
<ClockCounterClockwiseIcon className="h-5 w-5" weight="fill" />
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col">
|
||||
<Text
|
||||
variant="small-medium"
|
||||
className="text-amber-900 dark:text-amber-100"
|
||||
>
|
||||
Unsaved changes found
|
||||
</Text>
|
||||
<Text
|
||||
variant="small"
|
||||
className="text-amber-700 dark:text-amber-400"
|
||||
>
|
||||
{diffSummary ||
|
||||
`${nodeCount} block${nodeCount !== 1 ? "s" : ""}, ${edgeCount} connection${edgeCount !== 1 ? "s" : ""}`}{" "}
|
||||
• {formatTimeAgo(new Date(savedAt).toISOString())}
|
||||
</Text>
|
||||
</div>
|
||||
|
||||
<div className="ml-2 flex items-center gap-2">
|
||||
<Tooltip delayDuration={10}>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant="primary"
|
||||
size="small"
|
||||
onClick={onLoad}
|
||||
className="aspect-square min-w-0 p-1.5"
|
||||
>
|
||||
<ClockCounterClockwiseIcon size={20} weight="fill" />
|
||||
<span className="sr-only">Restore changes</span>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Restore changes</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip delayDuration={10}>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant="destructive"
|
||||
size="icon"
|
||||
onClick={onDiscard}
|
||||
aria-label="Discard changes"
|
||||
className="aspect-square min-w-0 p-1.5"
|
||||
>
|
||||
<XIcon size={20} />
|
||||
<span className="sr-only">Discard changes</span>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Discard changes</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useDraftManager } from "../FlowEditor/Flow/useDraftManager";
|
||||
|
||||
export const useDraftRecoveryPopup = (isInitialLoadComplete: boolean) => {
|
||||
const popupRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const {
|
||||
isRecoveryOpen: isOpen,
|
||||
savedAt,
|
||||
nodeCount,
|
||||
edgeCount,
|
||||
diff,
|
||||
loadDraft: onLoad,
|
||||
discardDraft: onDiscard,
|
||||
} = useDraftManager(isInitialLoadComplete);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isOpen) return;
|
||||
|
||||
const handleClickOutside = (event: MouseEvent) => {
|
||||
if (
|
||||
popupRef.current &&
|
||||
!popupRef.current.contains(event.target as Node)
|
||||
) {
|
||||
onDiscard();
|
||||
}
|
||||
};
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
document.addEventListener("mousedown", handleClickOutside);
|
||||
}, 100);
|
||||
|
||||
return () => {
|
||||
clearTimeout(timeoutId);
|
||||
document.removeEventListener("mousedown", handleClickOutside);
|
||||
};
|
||||
}, [isOpen, onDiscard]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!isOpen) return;
|
||||
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
if (event.key === "Escape") {
|
||||
onDiscard();
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
return () => {
|
||||
document.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [isOpen, onDiscard]);
|
||||
return {
|
||||
popupRef,
|
||||
isOpen,
|
||||
nodeCount,
|
||||
edgeCount,
|
||||
diff,
|
||||
savedAt,
|
||||
onLoad,
|
||||
onDiscard,
|
||||
};
|
||||
};
|
||||
@@ -1,26 +1,27 @@
|
||||
import { ReactFlow, Background } from "@xyflow/react";
|
||||
import NewControlPanel from "../../NewControlPanel/NewControlPanel";
|
||||
import CustomEdge from "../edges/CustomEdge";
|
||||
import { useFlow } from "./useFlow";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useMemo, useEffect, useCallback } from "react";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { useCustomEdge } from "../edges/useCustomEdge";
|
||||
import { useFlowRealtime } from "./useFlowRealtime";
|
||||
import { GraphLoadingBox } from "./components/GraphLoadingBox";
|
||||
import { BuilderActions } from "../../BuilderActions/BuilderActions";
|
||||
import { RunningBackground } from "./components/RunningBackground";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { useCopyPaste } from "./useCopyPaste";
|
||||
import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel";
|
||||
import { parseAsString, useQueryStates } from "nuqs";
|
||||
import { CustomControls } from "./components/CustomControl";
|
||||
import { useGetV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
|
||||
import { okData } from "@/app/api/helpers";
|
||||
import { FloatingReviewsPanel } from "@/components/organisms/FloatingReviewsPanel/FloatingReviewsPanel";
|
||||
import { Background, ReactFlow } from "@xyflow/react";
|
||||
import { parseAsString, useQueryStates } from "nuqs";
|
||||
import { useCallback, useMemo } from "react";
|
||||
import { useShallow } from "zustand/react/shallow";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { BuilderActions } from "../../BuilderActions/BuilderActions";
|
||||
import { DraftRecoveryPopup } from "../../DraftRecoveryDialog/DraftRecoveryPopup";
|
||||
import { FloatingSafeModeToggle } from "../../FloatingSafeModeToogle";
|
||||
import NewControlPanel from "../../NewControlPanel/NewControlPanel";
|
||||
import CustomEdge from "../edges/CustomEdge";
|
||||
import { useCustomEdge } from "../edges/useCustomEdge";
|
||||
import { CustomNode } from "../nodes/CustomNode/CustomNode";
|
||||
import { CustomControls } from "./components/CustomControl";
|
||||
import { GraphLoadingBox } from "./components/GraphLoadingBox";
|
||||
import { RunningBackground } from "./components/RunningBackground";
|
||||
import { TriggerAgentBanner } from "./components/TriggerAgentBanner";
|
||||
import { resolveCollisions } from "./helpers/resolve-collision";
|
||||
import { FloatingSafeModeToggle } from "../../FloatingSafeModeToogle";
|
||||
import { useCopyPaste } from "./useCopyPaste";
|
||||
import { useFlow } from "./useFlow";
|
||||
import { useFlowRealtime } from "./useFlowRealtime";
|
||||
|
||||
export const Flow = () => {
|
||||
const [{ flowID, flowExecutionID }] = useQueryStates({
|
||||
@@ -41,14 +42,18 @@ export const Flow = () => {
|
||||
|
||||
const nodes = useNodeStore(useShallow((state) => state.nodes));
|
||||
const setNodes = useNodeStore(useShallow((state) => state.setNodes));
|
||||
|
||||
const onNodesChange = useNodeStore(
|
||||
useShallow((state) => state.onNodesChange),
|
||||
);
|
||||
|
||||
const hasWebhookNodes = useNodeStore(
|
||||
useShallow((state) => state.hasWebhookNodes()),
|
||||
);
|
||||
|
||||
const nodeTypes = useMemo(() => ({ custom: CustomNode }), []);
|
||||
const edgeTypes = useMemo(() => ({ custom: CustomEdge }), []);
|
||||
|
||||
const onNodeDragStop = useCallback(() => {
|
||||
setNodes(
|
||||
resolveCollisions(nodes, {
|
||||
@@ -60,29 +65,26 @@ export const Flow = () => {
|
||||
}, [setNodes, nodes]);
|
||||
const { edges, onConnect, onEdgesChange } = useCustomEdge();
|
||||
|
||||
// We use this hook to load the graph and convert them into custom nodes and edges.
|
||||
const { onDragOver, onDrop, isFlowContentLoading, isLocked, setIsLocked } =
|
||||
useFlow();
|
||||
// for loading purpose
|
||||
const {
|
||||
onDragOver,
|
||||
onDrop,
|
||||
isFlowContentLoading,
|
||||
isInitialLoadComplete,
|
||||
isLocked,
|
||||
setIsLocked,
|
||||
} = useFlow();
|
||||
|
||||
// This hook is used for websocket realtime updates.
|
||||
useFlowRealtime();
|
||||
|
||||
// Copy/paste functionality
|
||||
const handleCopyPaste = useCopyPaste();
|
||||
useCopyPaste();
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
handleCopyPaste(event);
|
||||
};
|
||||
|
||||
window.addEventListener("keydown", handleKeyDown);
|
||||
return () => {
|
||||
window.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [handleCopyPaste]);
|
||||
const isGraphRunning = useGraphStore(
|
||||
useShallow((state) => state.isGraphRunning),
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex h-full w-full dark:bg-slate-900">
|
||||
<div className="relative flex-1">
|
||||
@@ -95,6 +97,9 @@ export const Flow = () => {
|
||||
onConnect={onConnect}
|
||||
onEdgesChange={onEdgesChange}
|
||||
onNodeDragStop={onNodeDragStop}
|
||||
onNodeContextMenu={(event) => {
|
||||
event.preventDefault();
|
||||
}}
|
||||
maxZoom={2}
|
||||
minZoom={0.1}
|
||||
onDragOver={onDragOver}
|
||||
@@ -102,6 +107,7 @@ export const Flow = () => {
|
||||
nodesDraggable={!isLocked}
|
||||
nodesConnectable={!isLocked}
|
||||
elementsSelectable={!isLocked}
|
||||
deleteKeyCode={["Backspace", "Delete"]}
|
||||
>
|
||||
<Background />
|
||||
<CustomControls setIsLocked={setIsLocked} isLocked={isLocked} />
|
||||
@@ -115,6 +121,7 @@ export const Flow = () => {
|
||||
className="right-2 top-32 p-2"
|
||||
/>
|
||||
)}
|
||||
<DraftRecoveryPopup isInitialLoadComplete={isInitialLoadComplete} />
|
||||
</ReactFlow>
|
||||
</div>
|
||||
{/* TODO: Need to update it in future - also do not send executionId as prop - rather use useQueryState inside the component */}
|
||||
|
||||
@@ -48,8 +48,6 @@ export const resolveCollisions: CollisionAlgorithm = (
|
||||
const width = (node.width ?? node.measured?.width ?? 0) + margin * 2;
|
||||
const height = (node.height ?? node.measured?.height ?? 0) + margin * 2;
|
||||
|
||||
console.log("width", width);
|
||||
console.log("height", height);
|
||||
const x = node.position.x - margin;
|
||||
const y = node.position.y - margin;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useCallback } from "react";
|
||||
import { useCallback, useEffect } from "react";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
@@ -151,5 +151,16 @@ export function useCopyPaste() {
|
||||
[getViewport, toast],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
handleCopyPaste(event);
|
||||
};
|
||||
|
||||
window.addEventListener("keydown", handleKeyDown);
|
||||
return () => {
|
||||
window.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [handleCopyPaste]);
|
||||
|
||||
return handleCopyPaste;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,319 @@
|
||||
import { useState, useCallback, useEffect, useRef } from "react";
|
||||
import { parseAsString, parseAsInteger, useQueryStates } from "nuqs";
|
||||
import {
|
||||
draftService,
|
||||
getTempFlowId,
|
||||
getOrCreateTempFlowId,
|
||||
DraftData,
|
||||
} from "@/services/builder-draft/draft-service";
|
||||
import { BuilderDraft } from "@/lib/dexie/db";
|
||||
import {
|
||||
cleanNodes,
|
||||
cleanEdges,
|
||||
calculateDraftDiff,
|
||||
DraftDiff,
|
||||
} from "@/lib/dexie/draft-utils";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { useGraphStore } from "../../../stores/graphStore";
|
||||
import { useHistoryStore } from "../../../stores/historyStore";
|
||||
import isEqual from "lodash/isEqual";
|
||||
|
||||
const AUTO_SAVE_INTERVAL_MS = 15000; // 15 seconds
|
||||
|
||||
interface DraftRecoveryState {
|
||||
isOpen: boolean;
|
||||
draft: BuilderDraft | null;
|
||||
diff: DraftDiff | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Consolidated hook for draft persistence and recovery
|
||||
* - Auto-saves builder state every 15 seconds
|
||||
* - Saves on beforeunload event
|
||||
* - Checks for and manages unsaved drafts on load
|
||||
*/
|
||||
export function useDraftManager(isInitialLoadComplete: boolean) {
|
||||
const [state, setState] = useState<DraftRecoveryState>({
|
||||
isOpen: false,
|
||||
draft: null,
|
||||
diff: null,
|
||||
});
|
||||
|
||||
const [{ flowID, flowVersion }] = useQueryStates({
|
||||
flowID: parseAsString,
|
||||
flowVersion: parseAsInteger,
|
||||
});
|
||||
|
||||
const lastSavedStateRef = useRef<DraftData | null>(null);
|
||||
const saveTimeoutRef = useRef<NodeJS.Timeout | null>(null);
|
||||
const isDirtyRef = useRef(false);
|
||||
const hasCheckedForDraft = useRef(false);
|
||||
|
||||
const getEffectiveFlowId = useCallback((): string => {
|
||||
return flowID || getOrCreateTempFlowId();
|
||||
}, [flowID]);
|
||||
|
||||
const getCurrentState = useCallback((): DraftData => {
|
||||
const nodes = useNodeStore.getState().nodes;
|
||||
const edges = useEdgeStore.getState().edges;
|
||||
const nodeCounter = useNodeStore.getState().nodeCounter;
|
||||
const graphStore = useGraphStore.getState();
|
||||
|
||||
return {
|
||||
nodes,
|
||||
edges,
|
||||
graphSchemas: {
|
||||
input: graphStore.inputSchema,
|
||||
credentials: graphStore.credentialsInputSchema,
|
||||
output: graphStore.outputSchema,
|
||||
},
|
||||
nodeCounter,
|
||||
flowVersion: flowVersion ?? undefined,
|
||||
};
|
||||
}, [flowVersion]);
|
||||
|
||||
const cleanStateForComparison = useCallback((stateData: DraftData) => {
|
||||
return {
|
||||
nodes: cleanNodes(stateData.nodes),
|
||||
edges: cleanEdges(stateData.edges),
|
||||
};
|
||||
}, []);
|
||||
|
||||
const hasChanges = useCallback((): boolean => {
|
||||
const currentState = getCurrentState();
|
||||
|
||||
if (!lastSavedStateRef.current) {
|
||||
return currentState.nodes.length > 0;
|
||||
}
|
||||
|
||||
const currentClean = cleanStateForComparison(currentState);
|
||||
const lastClean = cleanStateForComparison(lastSavedStateRef.current);
|
||||
|
||||
return !isEqual(currentClean, lastClean);
|
||||
}, [getCurrentState, cleanStateForComparison]);
|
||||
|
||||
const saveDraft = useCallback(async () => {
|
||||
const effectiveFlowId = getEffectiveFlowId();
|
||||
const currentState = getCurrentState();
|
||||
|
||||
if (currentState.nodes.length === 0 && currentState.edges.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!hasChanges()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await draftService.saveDraft(effectiveFlowId, currentState);
|
||||
lastSavedStateRef.current = currentState;
|
||||
isDirtyRef.current = false;
|
||||
} catch (error) {
|
||||
console.error("[DraftPersistence] Failed to save draft:", error);
|
||||
}
|
||||
}, [getEffectiveFlowId, getCurrentState, hasChanges]);
|
||||
|
||||
const scheduleSave = useCallback(() => {
|
||||
isDirtyRef.current = true;
|
||||
|
||||
if (saveTimeoutRef.current) {
|
||||
clearTimeout(saveTimeoutRef.current);
|
||||
}
|
||||
|
||||
saveTimeoutRef.current = setTimeout(() => {
|
||||
saveDraft();
|
||||
}, AUTO_SAVE_INTERVAL_MS);
|
||||
}, [saveDraft]);
|
||||
|
||||
useEffect(() => {
|
||||
const unsubscribeNodes = useNodeStore.subscribe((storeState, prevState) => {
|
||||
if (storeState.nodes !== prevState.nodes) {
|
||||
scheduleSave();
|
||||
}
|
||||
});
|
||||
|
||||
const unsubscribeEdges = useEdgeStore.subscribe((storeState, prevState) => {
|
||||
if (storeState.edges !== prevState.edges) {
|
||||
scheduleSave();
|
||||
}
|
||||
});
|
||||
|
||||
return () => {
|
||||
unsubscribeNodes();
|
||||
unsubscribeEdges();
|
||||
};
|
||||
}, [scheduleSave]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleBeforeUnload = () => {
|
||||
if (isDirtyRef.current) {
|
||||
const effectiveFlowId = getEffectiveFlowId();
|
||||
const currentState = getCurrentState();
|
||||
|
||||
if (
|
||||
currentState.nodes.length === 0 &&
|
||||
currentState.edges.length === 0
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
draftService.saveDraft(effectiveFlowId, currentState).catch(() => {
|
||||
// Ignore errors on unload
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener("beforeunload", handleBeforeUnload);
|
||||
return () => {
|
||||
window.removeEventListener("beforeunload", handleBeforeUnload);
|
||||
};
|
||||
}, [getEffectiveFlowId, getCurrentState]);
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (saveTimeoutRef.current) {
|
||||
clearTimeout(saveTimeoutRef.current);
|
||||
}
|
||||
if (isDirtyRef.current) {
|
||||
saveDraft();
|
||||
}
|
||||
};
|
||||
}, [saveDraft]);
|
||||
|
||||
useEffect(() => {
|
||||
draftService.cleanupExpired().catch((error) => {
|
||||
console.error(
|
||||
"[DraftPersistence] Failed to cleanup expired drafts:",
|
||||
error,
|
||||
);
|
||||
});
|
||||
}, []);
|
||||
|
||||
const checkForDraft = useCallback(async () => {
|
||||
const effectiveFlowId = flowID || getTempFlowId();
|
||||
|
||||
if (!effectiveFlowId) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const draft = await draftService.loadDraft(effectiveFlowId);
|
||||
|
||||
if (!draft) {
|
||||
return;
|
||||
}
|
||||
|
||||
const currentNodes = useNodeStore.getState().nodes;
|
||||
const currentEdges = useEdgeStore.getState().edges;
|
||||
|
||||
const isDifferent = draftService.isDraftDifferent(
|
||||
draft,
|
||||
currentNodes,
|
||||
currentEdges,
|
||||
);
|
||||
|
||||
if (isDifferent && (draft.nodes.length > 0 || draft.edges.length > 0)) {
|
||||
const diff = calculateDraftDiff(
|
||||
draft.nodes,
|
||||
draft.edges,
|
||||
currentNodes,
|
||||
currentEdges,
|
||||
);
|
||||
setState({
|
||||
isOpen: true,
|
||||
draft,
|
||||
diff,
|
||||
});
|
||||
} else {
|
||||
await draftService.deleteDraft(effectiveFlowId);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[DraftRecovery] Failed to check for draft:", error);
|
||||
}
|
||||
}, [flowID]);
|
||||
|
||||
useEffect(() => {
|
||||
if (isInitialLoadComplete && !hasCheckedForDraft.current) {
|
||||
hasCheckedForDraft.current = true;
|
||||
checkForDraft();
|
||||
}
|
||||
}, [isInitialLoadComplete, checkForDraft]);
|
||||
|
||||
useEffect(() => {
|
||||
hasCheckedForDraft.current = false;
|
||||
setState({
|
||||
isOpen: false,
|
||||
draft: null,
|
||||
diff: null,
|
||||
});
|
||||
}, [flowID]);
|
||||
|
||||
const loadDraft = useCallback(async () => {
|
||||
if (!state.draft) return;
|
||||
|
||||
const { draft } = state;
|
||||
|
||||
try {
|
||||
useNodeStore.getState().setNodes(draft.nodes);
|
||||
useEdgeStore.getState().setEdges(draft.edges);
|
||||
draft.nodes.forEach((node) => {
|
||||
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
|
||||
});
|
||||
|
||||
if (draft.nodeCounter !== undefined) {
|
||||
useNodeStore.setState({ nodeCounter: draft.nodeCounter });
|
||||
}
|
||||
|
||||
if (draft.graphSchemas) {
|
||||
useGraphStore
|
||||
.getState()
|
||||
.setGraphSchemas(
|
||||
draft.graphSchemas.input as Record<string, unknown> | null,
|
||||
draft.graphSchemas.credentials as Record<string, unknown> | null,
|
||||
draft.graphSchemas.output as Record<string, unknown> | null,
|
||||
);
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
}, 100);
|
||||
|
||||
await draftService.deleteDraft(draft.id);
|
||||
|
||||
setState({
|
||||
isOpen: false,
|
||||
draft: null,
|
||||
diff: null,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("[DraftRecovery] Failed to load draft:", error);
|
||||
}
|
||||
}, [state.draft]);
|
||||
|
||||
const discardDraft = useCallback(async () => {
|
||||
if (!state.draft) {
|
||||
setState({ isOpen: false, draft: null, diff: null });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await draftService.deleteDraft(state.draft.id);
|
||||
} catch (error) {
|
||||
console.error("[DraftRecovery] Failed to discard draft:", error);
|
||||
}
|
||||
|
||||
setState({ isOpen: false, draft: null, diff: null });
|
||||
}, [state.draft]);
|
||||
|
||||
return {
|
||||
// Recovery popup props
|
||||
isRecoveryOpen: state.isOpen,
|
||||
savedAt: state.draft?.savedAt ?? 0,
|
||||
nodeCount: state.draft?.nodes.length ?? 0,
|
||||
edgeCount: state.draft?.edges.length ?? 0,
|
||||
diff: state.diff,
|
||||
loadDraft,
|
||||
discardDraft,
|
||||
};
|
||||
}
|
||||
@@ -21,6 +21,7 @@ import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecut
|
||||
export const useFlow = () => {
|
||||
const [isLocked, setIsLocked] = useState(false);
|
||||
const [hasAutoFramed, setHasAutoFramed] = useState(false);
|
||||
const [isInitialLoadComplete, setIsInitialLoadComplete] = useState(false);
|
||||
const addNodes = useNodeStore(useShallow((state) => state.addNodes));
|
||||
const addLinks = useEdgeStore(useShallow((state) => state.addLinks));
|
||||
const updateNodeStatus = useNodeStore(
|
||||
@@ -120,6 +121,14 @@ export const useFlow = () => {
|
||||
if (customNodes.length > 0) {
|
||||
useNodeStore.getState().setNodes([]);
|
||||
addNodes(customNodes);
|
||||
|
||||
// Sync hardcoded values with handle IDs.
|
||||
// If a key–value field has a key without a value, the backend omits it from hardcoded values.
|
||||
// But if a handleId exists for that key, it causes inconsistency.
|
||||
// This ensures hardcoded values stay in sync with handle IDs.
|
||||
customNodes.forEach((node) => {
|
||||
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
|
||||
});
|
||||
}
|
||||
}, [customNodes, addNodes]);
|
||||
|
||||
@@ -174,11 +183,23 @@ export const useFlow = () => {
|
||||
if (customNodes.length > 0 && graph?.links) {
|
||||
const timer = setTimeout(() => {
|
||||
useHistoryStore.getState().initializeHistory();
|
||||
// Mark initial load as complete after history is initialized
|
||||
setIsInitialLoadComplete(true);
|
||||
}, 100);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [customNodes, graph?.links]);
|
||||
|
||||
// Also mark as complete for new flows (no flowID) after a short delay
|
||||
useEffect(() => {
|
||||
if (!flowID && !isGraphLoading && !isBlocksLoading) {
|
||||
const timer = setTimeout(() => {
|
||||
setIsInitialLoadComplete(true);
|
||||
}, 200);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [flowID, isGraphLoading, isBlocksLoading]);
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
useNodeStore.getState().setNodes([]);
|
||||
@@ -217,6 +238,7 @@ export const useFlow = () => {
|
||||
|
||||
useEffect(() => {
|
||||
setHasAutoFramed(false);
|
||||
setIsInitialLoadComplete(false);
|
||||
}, [flowID, flowVersion]);
|
||||
|
||||
// Drag and drop block from block menu
|
||||
@@ -253,6 +275,7 @@ export const useFlow = () => {
|
||||
|
||||
return {
|
||||
isFlowContentLoading: isGraphLoading || isBlocksLoading,
|
||||
isInitialLoadComplete,
|
||||
onDragOver,
|
||||
onDrop,
|
||||
isLocked,
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
import { Connection as RFConnection, EdgeChange } from "@xyflow/react";
|
||||
import {
|
||||
Connection as RFConnection,
|
||||
EdgeChange,
|
||||
applyEdgeChanges,
|
||||
} from "@xyflow/react";
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { useCallback } from "react";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { CustomEdge } from "./CustomEdge";
|
||||
|
||||
export const useCustomEdge = () => {
|
||||
const edges = useEdgeStore((s) => s.edges);
|
||||
const addEdge = useEdgeStore((s) => s.addEdge);
|
||||
const removeEdge = useEdgeStore((s) => s.removeEdge);
|
||||
const setEdges = useEdgeStore((s) => s.setEdges);
|
||||
|
||||
const onConnect = useCallback(
|
||||
(conn: RFConnection) => {
|
||||
@@ -45,14 +50,10 @@ export const useCustomEdge = () => {
|
||||
);
|
||||
|
||||
const onEdgesChange = useCallback(
|
||||
(changes: EdgeChange[]) => {
|
||||
changes.forEach((change) => {
|
||||
if (change.type === "remove") {
|
||||
removeEdge(change.id);
|
||||
}
|
||||
});
|
||||
(changes: EdgeChange<CustomEdge>[]) => {
|
||||
setEdges(applyEdgeChanges(changes, edges));
|
||||
},
|
||||
[removeEdge],
|
||||
[edges, setEdges],
|
||||
);
|
||||
|
||||
return { edges, onConnect, onEdgesChange };
|
||||
|
||||
@@ -1,26 +1,32 @@
|
||||
import { CircleIcon } from "@phosphor-icons/react";
|
||||
import { Handle, Position } from "@xyflow/react";
|
||||
import { useEdgeStore } from "../../../stores/edgeStore";
|
||||
import { cleanUpHandleId } from "@/components/renderers/InputRenderer/helpers";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const NodeHandle = ({
|
||||
const InputNodeHandle = ({
|
||||
handleId,
|
||||
isConnected,
|
||||
side,
|
||||
nodeId,
|
||||
}: {
|
||||
handleId: string;
|
||||
isConnected: boolean;
|
||||
side: "left" | "right";
|
||||
nodeId: string;
|
||||
}) => {
|
||||
const cleanedHandleId = cleanUpHandleId(handleId);
|
||||
const isInputConnected = useEdgeStore((state) =>
|
||||
state.isInputConnected(nodeId ?? "", cleanedHandleId),
|
||||
);
|
||||
|
||||
return (
|
||||
<Handle
|
||||
type={side === "left" ? "target" : "source"}
|
||||
position={side === "left" ? Position.Left : Position.Right}
|
||||
id={handleId}
|
||||
className={side === "left" ? "-ml-4 mr-2" : "-mr-2 ml-2"}
|
||||
type={"target"}
|
||||
position={Position.Left}
|
||||
id={cleanedHandleId}
|
||||
className={"-ml-6 mr-2"}
|
||||
>
|
||||
<div className="pointer-events-none">
|
||||
<CircleIcon
|
||||
size={16}
|
||||
weight={isConnected ? "fill" : "duotone"}
|
||||
weight={isInputConnected ? "fill" : "duotone"}
|
||||
className={"text-gray-400 opacity-100"}
|
||||
/>
|
||||
</div>
|
||||
@@ -28,4 +34,35 @@ const NodeHandle = ({
|
||||
);
|
||||
};
|
||||
|
||||
export default NodeHandle;
|
||||
const OutputNodeHandle = ({
|
||||
field_name,
|
||||
nodeId,
|
||||
hexColor,
|
||||
}: {
|
||||
field_name: string;
|
||||
nodeId: string;
|
||||
hexColor: string;
|
||||
}) => {
|
||||
const isOutputConnected = useEdgeStore((state) =>
|
||||
state.isOutputConnected(nodeId, field_name),
|
||||
);
|
||||
return (
|
||||
<Handle
|
||||
type={"source"}
|
||||
position={Position.Right}
|
||||
id={field_name}
|
||||
className={"-mr-2 ml-2"}
|
||||
>
|
||||
<div className="pointer-events-none">
|
||||
<CircleIcon
|
||||
size={16}
|
||||
weight={"duotone"}
|
||||
color={isOutputConnected ? hexColor : "gray"}
|
||||
className={cn("text-gray-400 opacity-100")}
|
||||
/>
|
||||
</div>
|
||||
</Handle>
|
||||
);
|
||||
};
|
||||
|
||||
export { InputNodeHandle, OutputNodeHandle };
|
||||
|
||||
@@ -1,31 +1,4 @@
|
||||
/**
|
||||
* Handle ID Types for different input structures
|
||||
*
|
||||
* Examples:
|
||||
* SIMPLE: "message"
|
||||
* NESTED: "config.api_key"
|
||||
* ARRAY: "items_$_0", "items_$_1"
|
||||
* KEY_VALUE: "headers_#_Authorization", "params_#_limit"
|
||||
*
|
||||
* Note: All handle IDs are sanitized to remove spaces and special characters.
|
||||
* Spaces become underscores, and special characters are removed.
|
||||
* Example: "user name" becomes "user_name", "email@domain.com" becomes "emaildomaincom"
|
||||
*/
|
||||
export enum HandleIdType {
|
||||
SIMPLE = "SIMPLE",
|
||||
NESTED = "NESTED",
|
||||
ARRAY = "ARRAY",
|
||||
KEY_VALUE = "KEY_VALUE",
|
||||
}
|
||||
|
||||
const fromRjsfId = (id: string): string => {
|
||||
if (!id) return "";
|
||||
const parts = id.split("_");
|
||||
const filtered = parts.filter(
|
||||
(p) => p !== "root" && p !== "properties" && p.length > 0,
|
||||
);
|
||||
return filtered.join("_") || "";
|
||||
};
|
||||
// Here we are handling single level of nesting, if need more in future then i will update it
|
||||
|
||||
const sanitizeForHandleId = (str: string): string => {
|
||||
if (!str) return "";
|
||||
@@ -38,51 +11,53 @@ const sanitizeForHandleId = (str: string): string => {
|
||||
.replace(/^_|_$/g, ""); // Remove leading/trailing underscores
|
||||
};
|
||||
|
||||
export const generateHandleId = (
|
||||
const cleanTitleId = (id: string): string => {
|
||||
if (!id) return "";
|
||||
|
||||
if (id.endsWith("_title")) {
|
||||
id = id.slice(0, -6);
|
||||
}
|
||||
const parts = id.split("_");
|
||||
const filtered = parts.filter(
|
||||
(p) => p !== "root" && p !== "properties" && p.length > 0,
|
||||
);
|
||||
const filtered_id = filtered.join("_") || "";
|
||||
return filtered_id;
|
||||
};
|
||||
|
||||
export const generateHandleIdFromTitleId = (
|
||||
fieldKey: string,
|
||||
nestedValues: string[] = [],
|
||||
type: HandleIdType = HandleIdType.SIMPLE,
|
||||
{
|
||||
isObjectProperty,
|
||||
isAdditionalProperty,
|
||||
isArrayItem,
|
||||
}: {
|
||||
isArrayItem?: boolean;
|
||||
isObjectProperty?: boolean;
|
||||
isAdditionalProperty?: boolean;
|
||||
} = {
|
||||
isArrayItem: false,
|
||||
isObjectProperty: false,
|
||||
isAdditionalProperty: false,
|
||||
},
|
||||
): string => {
|
||||
if (!fieldKey) return "";
|
||||
|
||||
fieldKey = fromRjsfId(fieldKey);
|
||||
fieldKey = sanitizeForHandleId(fieldKey);
|
||||
const filteredKey = cleanTitleId(fieldKey);
|
||||
if (isAdditionalProperty || isArrayItem) {
|
||||
return filteredKey;
|
||||
}
|
||||
const cleanedKey = sanitizeForHandleId(filteredKey);
|
||||
|
||||
if (type === HandleIdType.SIMPLE || nestedValues.length === 0) {
|
||||
return fieldKey;
|
||||
if (isObjectProperty) {
|
||||
// "config_api_key" -> "config.api_key"
|
||||
const parts = cleanedKey.split("_");
|
||||
if (parts.length >= 2) {
|
||||
const baseName = parts[0];
|
||||
const propertyName = parts.slice(1).join("_");
|
||||
return `${baseName}.${propertyName}`;
|
||||
}
|
||||
}
|
||||
|
||||
const sanitizedNestedValues = nestedValues.map((value) =>
|
||||
sanitizeForHandleId(value),
|
||||
);
|
||||
|
||||
switch (type) {
|
||||
case HandleIdType.NESTED:
|
||||
return [fieldKey, ...sanitizedNestedValues].join(".");
|
||||
|
||||
case HandleIdType.ARRAY:
|
||||
return [fieldKey, ...sanitizedNestedValues].join("_$_");
|
||||
|
||||
case HandleIdType.KEY_VALUE:
|
||||
return [fieldKey, ...sanitizedNestedValues].join("_#_");
|
||||
|
||||
default:
|
||||
return fieldKey;
|
||||
}
|
||||
};
|
||||
|
||||
export const parseKeyValueHandleId = (
|
||||
handleId: string,
|
||||
type: HandleIdType,
|
||||
): string => {
|
||||
if (type === HandleIdType.KEY_VALUE) {
|
||||
return handleId.split("_#_")[1];
|
||||
} else if (type === HandleIdType.ARRAY) {
|
||||
return handleId.split("_$_")[1];
|
||||
} else if (type === HandleIdType.NESTED) {
|
||||
return handleId.split(".")[1];
|
||||
} else if (type === HandleIdType.SIMPLE) {
|
||||
return handleId.split("_")[1];
|
||||
}
|
||||
return "";
|
||||
return cleanedKey;
|
||||
};
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
import React from "react";
|
||||
import { Node as XYNode, NodeProps } from "@xyflow/react";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
import { BlockUIType } from "../../../types";
|
||||
import { StickyNoteBlock } from "./components/StickyNoteBlock";
|
||||
import { BlockInfoCategoriesItem } from "@/app/api/__generated__/models/blockInfoCategoriesItem";
|
||||
import { BlockCost } from "@/app/api/__generated__/models/blockCost";
|
||||
import { AgentExecutionStatus } from "@/app/api/__generated__/models/agentExecutionStatus";
|
||||
import { BlockCost } from "@/app/api/__generated__/models/blockCost";
|
||||
import { BlockInfoCategoriesItem } from "@/app/api/__generated__/models/blockInfoCategoriesItem";
|
||||
import { NodeExecutionResult } from "@/app/api/__generated__/models/nodeExecutionResult";
|
||||
import { NodeContainer } from "./components/NodeContainer";
|
||||
import { NodeHeader } from "./components/NodeHeader";
|
||||
import { FormCreator } from "../FormCreator";
|
||||
import { preprocessInputSchema } from "@/components/renderers/input-renderer/utils/input-schema-pre-processor";
|
||||
import { OutputHandler } from "../OutputHandler";
|
||||
import { NodeAdvancedToggle } from "./components/NodeAdvancedToggle";
|
||||
import { NodeDataRenderer } from "./components/NodeOutput/NodeOutput";
|
||||
import { NodeExecutionBadge } from "./components/NodeExecutionBadge";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { WebhookDisclaimer } from "./components/WebhookDisclaimer";
|
||||
import { AyrshareConnectButton } from "./components/AyrshareConnectButton";
|
||||
import { NodeModelMetadata } from "@/app/api/__generated__/models/nodeModelMetadata";
|
||||
import { preprocessInputSchema } from "@/components/renderers/InputRenderer/utils/input-schema-pre-processor";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
import { NodeProps, Node as XYNode } from "@xyflow/react";
|
||||
import React from "react";
|
||||
import { BlockUIType } from "../../../types";
|
||||
import { FormCreator } from "../FormCreator";
|
||||
import { OutputHandler } from "../OutputHandler";
|
||||
import { AyrshareConnectButton } from "./components/AyrshareConnectButton";
|
||||
import { NodeAdvancedToggle } from "./components/NodeAdvancedToggle";
|
||||
import { NodeContainer } from "./components/NodeContainer";
|
||||
import { NodeExecutionBadge } from "./components/NodeExecutionBadge";
|
||||
import { NodeHeader } from "./components/NodeHeader";
|
||||
import { NodeDataRenderer } from "./components/NodeOutput/NodeOutput";
|
||||
import { NodeRightClickMenu } from "./components/NodeRightClickMenu";
|
||||
import { StickyNoteBlock } from "./components/StickyNoteBlock";
|
||||
import { WebhookDisclaimer } from "./components/WebhookDisclaimer";
|
||||
|
||||
export type CustomNodeData = {
|
||||
hardcodedValues: {
|
||||
@@ -88,7 +89,7 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
|
||||
// Currently all blockTypes design are similar - that's why i am using the same component for all of them
|
||||
// If in future - if we need some drastic change in some blockTypes design - we can create separate components for them
|
||||
return (
|
||||
const node = (
|
||||
<NodeContainer selected={selected} nodeId={nodeId} hasErrors={hasErrors}>
|
||||
<div className="rounded-xlarge bg-white">
|
||||
<NodeHeader data={data} nodeId={nodeId} />
|
||||
@@ -99,7 +100,7 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
nodeId={nodeId}
|
||||
uiType={data.uiType}
|
||||
className={cn(
|
||||
"bg-white pr-6",
|
||||
"bg-white px-4",
|
||||
isWebhook && "pointer-events-none opacity-50",
|
||||
)}
|
||||
showHandles={showHandles}
|
||||
@@ -117,6 +118,15 @@ export const CustomNode: React.FC<NodeProps<CustomNode>> = React.memo(
|
||||
<NodeExecutionBadge nodeId={nodeId} />
|
||||
</NodeContainer>
|
||||
);
|
||||
|
||||
return (
|
||||
<NodeRightClickMenu
|
||||
nodeId={nodeId}
|
||||
subGraphID={data.hardcodedValues?.graph_id}
|
||||
>
|
||||
{node}
|
||||
</NodeRightClickMenu>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ export const NodeAdvancedToggle = ({ nodeId }: { nodeId: string }) => {
|
||||
);
|
||||
const setShowAdvanced = useNodeStore((state) => state.setShowAdvanced);
|
||||
return (
|
||||
<div className="flex items-center justify-between gap-2 rounded-b-xlarge border-t border-slate-200/50 bg-white px-5 py-3.5">
|
||||
<div className="flex items-center justify-between gap-2 rounded-b-xlarge border-t border-zinc-200 bg-white px-5 py-3.5">
|
||||
<Text variant="body" className="font-medium text-slate-700">
|
||||
Advanced
|
||||
</Text>
|
||||
|
||||
@@ -22,7 +22,7 @@ export const NodeContainer = ({
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"z-12 max-w-[370px] rounded-xlarge ring-1 ring-slate-200/60",
|
||||
"z-12 w-[350px] rounded-xlarge ring-1 ring-slate-200/60",
|
||||
selected && "shadow-lg ring-2 ring-slate-200",
|
||||
status && nodeStyleBasedOnStatus[status],
|
||||
hasErrors ? nodeStyleBasedOnStatus[AgentExecutionStatus.FAILED] : "",
|
||||
|
||||
@@ -1,26 +1,31 @@
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { useCopyPasteStore } from "@/app/(platform)/build/stores/copyPasteStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/molecules/DropdownMenu/DropdownMenu";
|
||||
import { DotsThreeOutlineVerticalIcon } from "@phosphor-icons/react";
|
||||
import { Copy, Trash2, ExternalLink } from "lucide-react";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useCopyPasteStore } from "@/app/(platform)/build/stores/copyPasteStore";
|
||||
import {
|
||||
SecondaryDropdownMenuContent,
|
||||
SecondaryDropdownMenuItem,
|
||||
SecondaryDropdownMenuSeparator,
|
||||
} from "@/components/molecules/SecondaryMenu/SecondaryMenu";
|
||||
import {
|
||||
ArrowSquareOutIcon,
|
||||
CopyIcon,
|
||||
DotsThreeOutlineVerticalIcon,
|
||||
TrashIcon,
|
||||
} from "@phosphor-icons/react";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
|
||||
export const NodeContextMenu = ({
|
||||
nodeId,
|
||||
subGraphID,
|
||||
}: {
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
subGraphID?: string;
|
||||
}) => {
|
||||
};
|
||||
|
||||
export const NodeContextMenu = ({ nodeId, subGraphID }: Props) => {
|
||||
const { deleteElements } = useReactFlow();
|
||||
|
||||
const handleCopy = () => {
|
||||
function handleCopy() {
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({
|
||||
...node,
|
||||
@@ -30,47 +35,47 @@ export const NodeContextMenu = ({
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
};
|
||||
}
|
||||
|
||||
const handleDelete = () => {
|
||||
function handleDelete() {
|
||||
deleteElements({ nodes: [{ id: nodeId }] });
|
||||
};
|
||||
}
|
||||
|
||||
return (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger className="py-2">
|
||||
<DotsThreeOutlineVerticalIcon size={16} weight="fill" />
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent
|
||||
side="right"
|
||||
align="start"
|
||||
className="rounded-xlarge"
|
||||
>
|
||||
<DropdownMenuItem onClick={handleCopy} className="hover:rounded-xlarge">
|
||||
<Copy className="mr-2 h-4 w-4" />
|
||||
Copy Node
|
||||
</DropdownMenuItem>
|
||||
<SecondaryDropdownMenuContent side="right" align="start">
|
||||
<SecondaryDropdownMenuItem onClick={handleCopy}>
|
||||
<CopyIcon size={20} className="mr-2 dark:text-gray-100" />
|
||||
<span className="dark:text-gray-100">Copy</span>
|
||||
</SecondaryDropdownMenuItem>
|
||||
<SecondaryDropdownMenuSeparator />
|
||||
|
||||
{subGraphID && (
|
||||
<DropdownMenuItem
|
||||
onClick={() => window.open(`/build?flowID=${subGraphID}`)}
|
||||
className="hover:rounded-xlarge"
|
||||
>
|
||||
<ExternalLink className="mr-2 h-4 w-4" />
|
||||
Open Agent
|
||||
</DropdownMenuItem>
|
||||
<>
|
||||
<SecondaryDropdownMenuItem
|
||||
onClick={() => window.open(`/build?flowID=${subGraphID}`)}
|
||||
>
|
||||
<ArrowSquareOutIcon
|
||||
size={20}
|
||||
className="mr-2 dark:text-gray-100"
|
||||
/>
|
||||
<span className="dark:text-gray-100">Open agent</span>
|
||||
</SecondaryDropdownMenuItem>
|
||||
<SecondaryDropdownMenuSeparator />
|
||||
</>
|
||||
)}
|
||||
|
||||
<Separator className="my-2" />
|
||||
|
||||
<DropdownMenuItem
|
||||
onClick={handleDelete}
|
||||
className="text-red-600 hover:rounded-xlarge"
|
||||
>
|
||||
<Trash2 className="mr-2 h-4 w-4" />
|
||||
Delete
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
<SecondaryDropdownMenuItem variant="destructive" onClick={handleDelete}>
|
||||
<TrashIcon
|
||||
size={20}
|
||||
className="mr-2 text-red-500 dark:text-red-400"
|
||||
/>
|
||||
<span className="dark:text-red-400">Delete</span>
|
||||
</SecondaryDropdownMenuItem>
|
||||
</SecondaryDropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,29 +1,30 @@
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { NodeCost } from "./NodeCost";
|
||||
import { NodeBadges } from "./NodeBadges";
|
||||
import { NodeContextMenu } from "./NodeContextMenu";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import { useState } from "react";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { beautifyString, cn } from "@/lib/utils";
|
||||
import { useState } from "react";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { NodeBadges } from "./NodeBadges";
|
||||
import { NodeContextMenu } from "./NodeContextMenu";
|
||||
import { NodeCost } from "./NodeCost";
|
||||
|
||||
export const NodeHeader = ({
|
||||
data,
|
||||
nodeId,
|
||||
}: {
|
||||
type Props = {
|
||||
data: CustomNodeData;
|
||||
nodeId: string;
|
||||
}) => {
|
||||
};
|
||||
|
||||
export const NodeHeader = ({ data, nodeId }: Props) => {
|
||||
const updateNodeData = useNodeStore((state) => state.updateNodeData);
|
||||
const title = (data.metadata?.customized_name as string) || data.title;
|
||||
const [isEditingTitle, setIsEditingTitle] = useState(false);
|
||||
const [editedTitle, setEditedTitle] = useState(title);
|
||||
const [editedTitle, setEditedTitle] = useState(
|
||||
beautifyString(title).replace("Block", "").trim(),
|
||||
);
|
||||
|
||||
const handleTitleEdit = () => {
|
||||
updateNodeData(nodeId, {
|
||||
@@ -41,7 +42,7 @@ export const NodeHeader = ({
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex h-auto flex-col gap-1 rounded-xlarge border-b border-slate-200/50 bg-gradient-to-r from-slate-50/80 to-white/90 px-4 py-4 pt-3">
|
||||
<div className="flex h-auto flex-col gap-1 rounded-xlarge border-b border-zinc-200 bg-gradient-to-r from-slate-50/80 to-white/90 px-4 py-4 pt-3">
|
||||
{/* Title row with context menu */}
|
||||
<div className="flex items-start justify-between gap-2">
|
||||
<div className="flex min-w-0 flex-1 items-center gap-2">
|
||||
@@ -68,12 +69,12 @@ export const NodeHeader = ({
|
||||
<TooltipTrigger asChild>
|
||||
<div>
|
||||
<Text variant="large-semibold" className="line-clamp-1">
|
||||
{beautifyString(title)}
|
||||
{beautifyString(title).replace("Block", "").trim()}
|
||||
</Text>
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{beautifyString(title)}</p>
|
||||
<p>{beautifyString(title).replace("Block", "").trim()}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
|
||||
@@ -23,7 +23,7 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-3 rounded-b-xl border-t border-slate-200/50 px-4 py-4">
|
||||
<div className="flex flex-col gap-3 rounded-b-xl border-t border-zinc-200 px-4 py-4">
|
||||
<div className="flex items-center justify-between">
|
||||
<Text variant="body-medium" className="!font-semibold text-slate-700">
|
||||
Node Output
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
import { useCopyPasteStore } from "@/app/(platform)/build/stores/copyPasteStore";
|
||||
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
|
||||
import {
|
||||
SecondaryMenuContent,
|
||||
SecondaryMenuItem,
|
||||
SecondaryMenuSeparator,
|
||||
} from "@/components/molecules/SecondaryMenu/SecondaryMenu";
|
||||
import { ArrowSquareOutIcon, CopyIcon, TrashIcon } from "@phosphor-icons/react";
|
||||
import * as ContextMenu from "@radix-ui/react-context-menu";
|
||||
import { useReactFlow } from "@xyflow/react";
|
||||
import { useEffect, useRef } from "react";
|
||||
import { CustomNode } from "../CustomNode";
|
||||
|
||||
type Props = {
|
||||
nodeId: string;
|
||||
subGraphID?: string;
|
||||
children: React.ReactNode;
|
||||
};
|
||||
|
||||
const DOUBLE_CLICK_TIMEOUT = 300;
|
||||
|
||||
export function NodeRightClickMenu({ nodeId, subGraphID, children }: Props) {
|
||||
const { deleteElements } = useReactFlow<CustomNode>();
|
||||
const lastRightClickTime = useRef<number>(0);
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
function copyNode() {
|
||||
useNodeStore.setState((state) => ({
|
||||
nodes: state.nodes.map((node) => ({
|
||||
...node,
|
||||
selected: node.id === nodeId,
|
||||
})),
|
||||
}));
|
||||
|
||||
useCopyPasteStore.getState().copySelectedNodes();
|
||||
useCopyPasteStore.getState().pasteNodes();
|
||||
}
|
||||
|
||||
function deleteNode() {
|
||||
deleteElements({ nodes: [{ id: nodeId }] });
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const container = containerRef.current;
|
||||
if (!container) return;
|
||||
|
||||
function handleContextMenu(e: MouseEvent) {
|
||||
const now = Date.now();
|
||||
const timeSinceLastClick = now - lastRightClickTime.current;
|
||||
|
||||
if (timeSinceLastClick < DOUBLE_CLICK_TIMEOUT) {
|
||||
e.stopImmediatePropagation();
|
||||
lastRightClickTime.current = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
lastRightClickTime.current = now;
|
||||
}
|
||||
|
||||
container.addEventListener("contextmenu", handleContextMenu, true);
|
||||
|
||||
return () => {
|
||||
container.removeEventListener("contextmenu", handleContextMenu, true);
|
||||
};
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<ContextMenu.Root>
|
||||
<ContextMenu.Trigger asChild>
|
||||
<div ref={containerRef}>{children}</div>
|
||||
</ContextMenu.Trigger>
|
||||
<SecondaryMenuContent>
|
||||
<SecondaryMenuItem onSelect={copyNode}>
|
||||
<CopyIcon size={20} className="mr-2 dark:text-gray-100" />
|
||||
<span className="dark:text-gray-100">Copy</span>
|
||||
</SecondaryMenuItem>
|
||||
<SecondaryMenuSeparator />
|
||||
|
||||
{subGraphID && (
|
||||
<>
|
||||
<SecondaryMenuItem
|
||||
onClick={() => window.open(`/build?flowID=${subGraphID}`)}
|
||||
>
|
||||
<ArrowSquareOutIcon
|
||||
size={20}
|
||||
className="mr-2 dark:text-gray-100"
|
||||
/>
|
||||
<span className="dark:text-gray-100">Open agent</span>
|
||||
</SecondaryMenuItem>
|
||||
<SecondaryMenuSeparator />
|
||||
</>
|
||||
)}
|
||||
|
||||
<SecondaryMenuItem variant="destructive" onSelect={deleteNode}>
|
||||
<TrashIcon
|
||||
size={20}
|
||||
className="mr-2 text-red-500 dark:text-red-400"
|
||||
/>
|
||||
<span className="dark:text-red-400">Delete</span>
|
||||
</SecondaryMenuItem>
|
||||
</SecondaryMenuContent>
|
||||
</ContextMenu.Root>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useMemo } from "react";
|
||||
import { FormCreator } from "../../FormCreator";
|
||||
import { preprocessInputSchema } from "@/components/renderers/input-renderer/utils/input-schema-pre-processor";
|
||||
import { preprocessInputSchema } from "@/components/renderers/InputRenderer/utils/input-schema-pre-processor";
|
||||
import { CustomNodeData } from "../CustomNode";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
@@ -3,7 +3,7 @@ import React from "react";
|
||||
import { uiSchema } from "./uiSchema";
|
||||
import { useNodeStore } from "../../../stores/nodeStore";
|
||||
import { BlockUIType } from "../../types";
|
||||
import { FormRenderer } from "@/components/renderers/input-renderer/FormRenderer";
|
||||
import { FormRenderer } from "@/components/renderers/InputRenderer/FormRenderer";
|
||||
|
||||
export const FormCreator = React.memo(
|
||||
({
|
||||
|
||||
@@ -4,7 +4,7 @@ import { CaretDownIcon, InfoIcon } from "@phosphor-icons/react";
|
||||
import { RJSFSchema } from "@rjsf/utils";
|
||||
import { useState } from "react";
|
||||
|
||||
import NodeHandle from "../handlers/NodeHandle";
|
||||
import { OutputNodeHandle } from "../handlers/NodeHandle";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
@@ -13,7 +13,6 @@ import {
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { useEdgeStore } from "@/app/(platform)/build/stores/edgeStore";
|
||||
import { getTypeDisplayInfo } from "./helpers";
|
||||
import { generateHandleId } from "../handlers/helpers";
|
||||
import { BlockUIType } from "../../types";
|
||||
|
||||
export const OutputHandler = ({
|
||||
@@ -29,8 +28,73 @@ export const OutputHandler = ({
|
||||
const properties = outputSchema?.properties || {};
|
||||
const [isOutputVisible, setIsOutputVisible] = useState(true);
|
||||
|
||||
const showHandles = uiType !== BlockUIType.OUTPUT;
|
||||
|
||||
const renderOutputHandles = (
|
||||
schema: RJSFSchema,
|
||||
keyPrefix: string = "",
|
||||
titlePrefix: string = "",
|
||||
): React.ReactNode[] => {
|
||||
return Object.entries(schema).map(
|
||||
([key, fieldSchema]: [string, RJSFSchema]) => {
|
||||
const fullKey = keyPrefix ? `${keyPrefix}_#_${key}` : key;
|
||||
const fieldTitle = titlePrefix + (fieldSchema?.title || key);
|
||||
|
||||
const isConnected = isOutputConnected(nodeId, fullKey);
|
||||
const shouldShow = isConnected || isOutputVisible;
|
||||
const { displayType, colorClass, hexColor } =
|
||||
getTypeDisplayInfo(fieldSchema);
|
||||
|
||||
return shouldShow ? (
|
||||
<div key={fullKey} className="flex flex-col items-end gap-2">
|
||||
<div className="relative flex items-center gap-2">
|
||||
{fieldSchema?.description && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<span
|
||||
style={{ marginLeft: 6, cursor: "pointer" }}
|
||||
aria-label="info"
|
||||
tabIndex={0}
|
||||
>
|
||||
<InfoIcon />
|
||||
</span>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{fieldSchema?.description}</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
<Text variant="body" className="text-slate-700">
|
||||
{fieldTitle}
|
||||
</Text>
|
||||
<Text variant="small" as="span" className={colorClass}>
|
||||
({displayType})
|
||||
</Text>
|
||||
|
||||
{showHandles && (
|
||||
<OutputNodeHandle
|
||||
field_name={fullKey}
|
||||
nodeId={nodeId}
|
||||
hexColor={hexColor}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Recursively render nested properties */}
|
||||
{fieldSchema?.properties &&
|
||||
renderOutputHandles(
|
||||
fieldSchema.properties,
|
||||
fullKey,
|
||||
`${fieldTitle}.`,
|
||||
)}
|
||||
</div>
|
||||
) : null;
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-col items-end justify-between gap-2 rounded-b-xlarge border-t border-slate-200/50 bg-white py-3.5">
|
||||
<div className="flex flex-col items-end justify-between gap-2 rounded-b-xlarge border-t border-zinc-200 bg-white py-3.5">
|
||||
<Button
|
||||
variant="ghost"
|
||||
className="mr-4 h-fit min-w-0 p-0 hover:border-transparent hover:bg-transparent"
|
||||
@@ -49,50 +113,9 @@ export const OutputHandler = ({
|
||||
</Text>
|
||||
</Button>
|
||||
|
||||
{
|
||||
<div className="flex flex-col items-end gap-2">
|
||||
{Object.entries(properties).map(([key, property]: [string, any]) => {
|
||||
const isConnected = isOutputConnected(nodeId, key);
|
||||
const shouldShow = isConnected || isOutputVisible;
|
||||
const { displayType, colorClass } = getTypeDisplayInfo(property);
|
||||
|
||||
return shouldShow ? (
|
||||
<div key={key} className="relative flex items-center gap-2">
|
||||
{property?.description && (
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<span
|
||||
style={{ marginLeft: 6, cursor: "pointer" }}
|
||||
aria-label="info"
|
||||
tabIndex={0}
|
||||
>
|
||||
<InfoIcon />
|
||||
</span>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>{property?.description}</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
)}
|
||||
<Text variant="body" className="text-slate-700">
|
||||
{property?.title || key}{" "}
|
||||
</Text>
|
||||
<Text variant="small" as="span" className={colorClass}>
|
||||
({displayType})
|
||||
</Text>
|
||||
|
||||
<NodeHandle
|
||||
handleId={
|
||||
uiType === BlockUIType.AGENT ? key : generateHandleId(key)
|
||||
}
|
||||
isConnected={isConnected}
|
||||
side="right"
|
||||
/>
|
||||
</div>
|
||||
) : null;
|
||||
})}
|
||||
</div>
|
||||
}
|
||||
<div className="flex flex-col items-end gap-2">
|
||||
{renderOutputHandles(properties)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -92,14 +92,38 @@ export const getTypeDisplayInfo = (schema: any) => {
|
||||
if (schema?.type === "string" && schema?.format) {
|
||||
const formatMap: Record<
|
||||
string,
|
||||
{ displayType: string; colorClass: string }
|
||||
{ displayType: string; colorClass: string; hexColor: string }
|
||||
> = {
|
||||
file: { displayType: "file", colorClass: "!text-green-500" },
|
||||
date: { displayType: "date", colorClass: "!text-blue-500" },
|
||||
time: { displayType: "time", colorClass: "!text-blue-500" },
|
||||
"date-time": { displayType: "datetime", colorClass: "!text-blue-500" },
|
||||
"long-text": { displayType: "text", colorClass: "!text-green-500" },
|
||||
"short-text": { displayType: "text", colorClass: "!text-green-500" },
|
||||
file: {
|
||||
displayType: "file",
|
||||
colorClass: "!text-green-500",
|
||||
hexColor: "#22c55e",
|
||||
},
|
||||
date: {
|
||||
displayType: "date",
|
||||
colorClass: "!text-blue-500",
|
||||
hexColor: "#3b82f6",
|
||||
},
|
||||
time: {
|
||||
displayType: "time",
|
||||
colorClass: "!text-blue-500",
|
||||
hexColor: "#3b82f6",
|
||||
},
|
||||
"date-time": {
|
||||
displayType: "datetime",
|
||||
colorClass: "!text-blue-500",
|
||||
hexColor: "#3b82f6",
|
||||
},
|
||||
"long-text": {
|
||||
displayType: "text",
|
||||
colorClass: "!text-green-500",
|
||||
hexColor: "#22c55e",
|
||||
},
|
||||
"short-text": {
|
||||
displayType: "text",
|
||||
colorClass: "!text-green-500",
|
||||
hexColor: "#22c55e",
|
||||
},
|
||||
};
|
||||
|
||||
const formatInfo = formatMap[schema.format];
|
||||
@@ -131,10 +155,23 @@ export const getTypeDisplayInfo = (schema: any) => {
|
||||
any: "!text-gray-500",
|
||||
};
|
||||
|
||||
const hexColorMap: Record<string, string> = {
|
||||
string: "#22c55e",
|
||||
number: "#3b82f6",
|
||||
integer: "#3b82f6",
|
||||
boolean: "#eab308",
|
||||
object: "#a855f7",
|
||||
array: "#6366f1",
|
||||
null: "#6b7280",
|
||||
any: "#6b7280",
|
||||
};
|
||||
|
||||
const colorClass = colorMap[schema?.type] || "!text-gray-500";
|
||||
const hexColor = hexColorMap[schema?.type] || "#6b7280";
|
||||
|
||||
return {
|
||||
displayType,
|
||||
colorClass,
|
||||
hexColor,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -24,7 +24,7 @@ export const ControlPanelButton: React.FC<Props> = ({
|
||||
role={as === "div" ? "button" : undefined}
|
||||
disabled={as === "button" ? disabled : undefined}
|
||||
className={cn(
|
||||
"flex h-[4.25rem] w-[4.25rem] items-center justify-center whitespace-normal bg-white p-[1.38rem] text-zinc-800 shadow-none hover:cursor-pointer hover:bg-zinc-100 hover:text-zinc-950 focus:ring-0",
|
||||
"flex w-auto items-center justify-center whitespace-normal bg-white px-4 py-4 text-zinc-800 shadow-none hover:cursor-pointer hover:bg-zinc-100 hover:text-zinc-950 focus:ring-0",
|
||||
selected &&
|
||||
"bg-violet-50 text-violet-700 hover:cursor-default hover:bg-violet-50 hover:text-violet-700 active:bg-violet-50 active:text-violet-700",
|
||||
disabled && "cursor-not-allowed opacity-50 hover:cursor-not-allowed",
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
import React from "react";
|
||||
import { useControlPanelStore } from "@/app/(platform)/build/stores/controlPanelStore";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { BlockMenuContent } from "../BlockMenuContent/BlockMenuContent";
|
||||
import { ControlPanelButton } from "../../ControlPanelButton";
|
||||
import { LegoIcon } from "@phosphor-icons/react";
|
||||
import { useControlPanelStore } from "@/app/(platform)/build/stores/controlPanelStore";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { LegoIcon } from "@phosphor-icons/react";
|
||||
import { ControlPanelButton } from "../../ControlPanelButton";
|
||||
import { BlockMenuContent } from "../BlockMenuContent/BlockMenuContent";
|
||||
|
||||
export const BlockMenu = () => {
|
||||
const { blockMenuOpen, setBlockMenuOpen } = useControlPanelStore();
|
||||
@@ -28,7 +27,7 @@ export const BlockMenu = () => {
|
||||
selected={blockMenuOpen}
|
||||
className="rounded-none"
|
||||
>
|
||||
<LegoIcon className="h-6 w-6" />
|
||||
<LegoIcon className="size-5" />
|
||||
</ControlPanelButton>
|
||||
</PopoverTrigger>
|
||||
</TooltipTrigger>
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { FilterChip } from "../FilterChip";
|
||||
import { categories } from "./constants";
|
||||
import { FilterSheet } from "../FilterSheet/FilterSheet";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const BlockMenuFilters = () => {
|
||||
const {
|
||||
filters,
|
||||
addFilter,
|
||||
removeFilter,
|
||||
categoryCounts,
|
||||
creators,
|
||||
addCreator,
|
||||
removeCreator,
|
||||
} = useBlockMenuStore();
|
||||
|
||||
const handleFilterClick = (filter: GetV2BuilderSearchFilterAnyOfItem) => {
|
||||
if (filters.includes(filter)) {
|
||||
removeFilter(filter);
|
||||
} else {
|
||||
addFilter(filter);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCreatorClick = (creator: string) => {
|
||||
if (creators.includes(creator)) {
|
||||
removeCreator(creator);
|
||||
} else {
|
||||
addCreator(creator);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-wrap gap-2">
|
||||
<FilterSheet categories={categories} />
|
||||
{creators.length > 0 &&
|
||||
creators.map((creator) => (
|
||||
<FilterChip
|
||||
key={creator}
|
||||
name={"Created by " + creator.slice(0, 10) + "..."}
|
||||
selected={creators.includes(creator)}
|
||||
onClick={() => handleCreatorClick(creator)}
|
||||
/>
|
||||
))}
|
||||
{categories.map((category) => (
|
||||
<FilterChip
|
||||
key={category.key}
|
||||
name={category.name}
|
||||
selected={filters.includes(category.key)}
|
||||
onClick={() => handleFilterClick(category.key)}
|
||||
number={categoryCounts[category.key] ?? 0}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
import { CategoryKey } from "./types";
|
||||
|
||||
export const categories: Array<{ key: CategoryKey; name: string }> = [
|
||||
{ key: GetV2BuilderSearchFilterAnyOfItem.blocks, name: "Blocks" },
|
||||
{
|
||||
key: GetV2BuilderSearchFilterAnyOfItem.integrations,
|
||||
name: "Integrations",
|
||||
},
|
||||
{
|
||||
key: GetV2BuilderSearchFilterAnyOfItem.marketplace_agents,
|
||||
name: "Marketplace agents",
|
||||
},
|
||||
{ key: GetV2BuilderSearchFilterAnyOfItem.my_agents, name: "My agents" },
|
||||
];
|
||||
@@ -0,0 +1,26 @@
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export type DefaultStateType =
|
||||
| "suggestion"
|
||||
| "all_blocks"
|
||||
| "input_blocks"
|
||||
| "action_blocks"
|
||||
| "output_blocks"
|
||||
| "integrations"
|
||||
| "marketplace_agents"
|
||||
| "my_agents";
|
||||
|
||||
export type CategoryKey = GetV2BuilderSearchFilterAnyOfItem;
|
||||
|
||||
export interface Filters {
|
||||
categories: {
|
||||
blocks: boolean;
|
||||
integrations: boolean;
|
||||
marketplace_agents: boolean;
|
||||
my_agents: boolean;
|
||||
providers: boolean;
|
||||
};
|
||||
createdBy: string[];
|
||||
}
|
||||
|
||||
export type CategoryCounts = Record<CategoryKey, number>;
|
||||
@@ -1,111 +1,14 @@
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { useBlockMenuSearch } from "./useBlockMenuSearch";
|
||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
||||
import { LoadingSpinner } from "@/components/__legacy__/ui/loading";
|
||||
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
|
||||
import { MarketplaceAgentBlock } from "../MarketplaceAgentBlock";
|
||||
import { Block } from "../Block";
|
||||
import { UGCAgentBlock } from "../UGCAgentBlock";
|
||||
import { getSearchItemType } from "./helper";
|
||||
import { useBlockMenuStore } from "../../../../stores/blockMenuStore";
|
||||
import { blockMenuContainerStyle } from "../style";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { NoSearchResult } from "../NoSearchResult";
|
||||
import { BlockMenuFilters } from "../BlockMenuFilters/BlockMenuFilters";
|
||||
import { BlockMenuSearchContent } from "../BlockMenuSearchContent/BlockMenuSearchContent";
|
||||
|
||||
export const BlockMenuSearch = () => {
|
||||
const {
|
||||
searchResults,
|
||||
isFetchingNextPage,
|
||||
fetchNextPage,
|
||||
hasNextPage,
|
||||
searchLoading,
|
||||
handleAddLibraryAgent,
|
||||
handleAddMarketplaceAgent,
|
||||
addingLibraryAgentId,
|
||||
addingMarketplaceAgentSlug,
|
||||
} = useBlockMenuSearch();
|
||||
const { searchQuery } = useBlockMenuStore();
|
||||
|
||||
if (searchLoading) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
blockMenuContainerStyle,
|
||||
"flex items-center justify-center",
|
||||
)}
|
||||
>
|
||||
<LoadingSpinner className="size-13" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (searchResults.length === 0) {
|
||||
return <NoSearchResult />;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={blockMenuContainerStyle}>
|
||||
<BlockMenuFilters />
|
||||
<Text variant="body-medium">Search results</Text>
|
||||
<InfiniteScroll
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
fetchNextPage={fetchNextPage}
|
||||
hasNextPage={hasNextPage}
|
||||
loader={<LoadingSpinner className="size-13" />}
|
||||
className="space-y-2.5"
|
||||
>
|
||||
{searchResults.map((item: SearchResponseItemsItem, index: number) => {
|
||||
const { type, data } = getSearchItemType(item);
|
||||
// backend give support to these 3 types only [right now] - we need to give support to integration and ai agent types in follow up PRs
|
||||
switch (type) {
|
||||
case "store_agent":
|
||||
return (
|
||||
<MarketplaceAgentBlock
|
||||
key={index}
|
||||
slug={data.slug}
|
||||
highlightedText={searchQuery}
|
||||
title={data.agent_name}
|
||||
image_url={data.agent_image}
|
||||
creator_name={data.creator}
|
||||
number_of_runs={data.runs}
|
||||
loading={addingMarketplaceAgentSlug === data.slug}
|
||||
onClick={() =>
|
||||
handleAddMarketplaceAgent({
|
||||
creator_name: data.creator,
|
||||
slug: data.slug,
|
||||
})
|
||||
}
|
||||
/>
|
||||
);
|
||||
case "block":
|
||||
return (
|
||||
<Block
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
description={data.description}
|
||||
blockData={data}
|
||||
/>
|
||||
);
|
||||
|
||||
case "library_agent":
|
||||
return (
|
||||
<UGCAgentBlock
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
image_url={data.image_url}
|
||||
version={data.graph_version}
|
||||
edited_time={data.updated_at}
|
||||
isLoading={addingLibraryAgentId === data.id}
|
||||
onClick={() => handleAddLibraryAgent(data)}
|
||||
/>
|
||||
);
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
})}
|
||||
</InfiniteScroll>
|
||||
<BlockMenuSearchContent />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
import { SearchResponseItemsItem } from "@/app/api/__generated__/models/searchResponseItemsItem";
|
||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||
import { InfiniteScroll } from "@/components/contextual/InfiniteScroll/InfiniteScroll";
|
||||
import { getSearchItemType } from "./helper";
|
||||
import { MarketplaceAgentBlock } from "../MarketplaceAgentBlock";
|
||||
import { Block } from "../Block";
|
||||
import { UGCAgentBlock } from "../UGCAgentBlock";
|
||||
import { useBlockMenuSearchContent } from "./useBlockMenuSearchContent";
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { blockMenuContainerStyle } from "../style";
|
||||
import { NoSearchResult } from "../NoSearchResult";
|
||||
|
||||
export const BlockMenuSearchContent = () => {
|
||||
const {
|
||||
searchResults,
|
||||
isFetchingNextPage,
|
||||
fetchNextPage,
|
||||
hasNextPage,
|
||||
searchLoading,
|
||||
handleAddLibraryAgent,
|
||||
handleAddMarketplaceAgent,
|
||||
addingLibraryAgentId,
|
||||
addingMarketplaceAgentSlug,
|
||||
} = useBlockMenuSearchContent();
|
||||
|
||||
const { searchQuery } = useBlockMenuStore();
|
||||
|
||||
if (searchLoading) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
blockMenuContainerStyle,
|
||||
"flex items-center justify-center",
|
||||
)}
|
||||
>
|
||||
<LoadingSpinner className="size-13" />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
if (searchResults.length === 0) {
|
||||
return <NoSearchResult />;
|
||||
}
|
||||
|
||||
return (
|
||||
<InfiniteScroll
|
||||
isFetchingNextPage={isFetchingNextPage}
|
||||
fetchNextPage={fetchNextPage}
|
||||
hasNextPage={hasNextPage}
|
||||
loader={<LoadingSpinner className="size-13" />}
|
||||
className="space-y-2.5"
|
||||
>
|
||||
{searchResults.map((item: SearchResponseItemsItem, index: number) => {
|
||||
const { type, data } = getSearchItemType(item);
|
||||
// backend give support to these 3 types only [right now] - we need to give support to integration and ai agent types in follow up PRs
|
||||
switch (type) {
|
||||
case "store_agent":
|
||||
return (
|
||||
<MarketplaceAgentBlock
|
||||
key={index}
|
||||
slug={data.slug}
|
||||
highlightedText={searchQuery}
|
||||
title={data.agent_name}
|
||||
image_url={data.agent_image}
|
||||
creator_name={data.creator}
|
||||
number_of_runs={data.runs}
|
||||
loading={addingMarketplaceAgentSlug === data.slug}
|
||||
onClick={() =>
|
||||
handleAddMarketplaceAgent({
|
||||
creator_name: data.creator,
|
||||
slug: data.slug,
|
||||
})
|
||||
}
|
||||
/>
|
||||
);
|
||||
case "block":
|
||||
return (
|
||||
<Block
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
description={data.description}
|
||||
blockData={data}
|
||||
/>
|
||||
);
|
||||
|
||||
case "library_agent":
|
||||
return (
|
||||
<UGCAgentBlock
|
||||
key={index}
|
||||
title={data.name}
|
||||
highlightedText={searchQuery}
|
||||
image_url={data.image_url}
|
||||
version={data.graph_version}
|
||||
edited_time={data.updated_at}
|
||||
isLoading={addingLibraryAgentId === data.id}
|
||||
onClick={() => handleAddLibraryAgent(data)}
|
||||
/>
|
||||
);
|
||||
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
})}
|
||||
</InfiniteScroll>
|
||||
);
|
||||
};
|
||||
@@ -23,9 +23,19 @@ import { LibraryAgent } from "@/app/api/__generated__/models/libraryAgent";
|
||||
import { getQueryClient } from "@/lib/react-query/queryClient";
|
||||
import { useToast } from "@/components/molecules/Toast/use-toast";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const useBlockMenuSearchContent = () => {
|
||||
const {
|
||||
searchQuery,
|
||||
searchId,
|
||||
setSearchId,
|
||||
filters,
|
||||
setCreatorsList,
|
||||
creators,
|
||||
setCategoryCounts,
|
||||
} = useBlockMenuStore();
|
||||
|
||||
export const useBlockMenuSearch = () => {
|
||||
const { searchQuery, searchId, setSearchId } = useBlockMenuStore();
|
||||
const { toast } = useToast();
|
||||
const { addAgentToBuilder, addLibraryAgentToBuilder } =
|
||||
useAddAgentToBuilder();
|
||||
@@ -57,6 +67,8 @@ export const useBlockMenuSearch = () => {
|
||||
page_size: 8,
|
||||
search_query: searchQuery,
|
||||
search_id: searchId,
|
||||
filter: filters.length > 0 ? filters : undefined,
|
||||
by_creator: creators.length > 0 ? creators : undefined,
|
||||
},
|
||||
{
|
||||
query: { getNextPageParam: getPaginationNextPageNumber },
|
||||
@@ -98,6 +110,26 @@ export const useBlockMenuSearch = () => {
|
||||
}
|
||||
}, [searchQueryData, searchId, setSearchId]);
|
||||
|
||||
// from all the results, we need to get all the unique creators
|
||||
useEffect(() => {
|
||||
if (!searchQueryData?.pages?.length) {
|
||||
return;
|
||||
}
|
||||
const latestData = okData(searchQueryData.pages.at(-1));
|
||||
setCategoryCounts(
|
||||
(latestData?.total_items as Record<
|
||||
GetV2BuilderSearchFilterAnyOfItem,
|
||||
number
|
||||
>) || {
|
||||
blocks: 0,
|
||||
integrations: 0,
|
||||
marketplace_agents: 0,
|
||||
my_agents: 0,
|
||||
},
|
||||
);
|
||||
setCreatorsList(latestData?.items || []);
|
||||
}, [searchQueryData]);
|
||||
|
||||
useEffect(() => {
|
||||
if (searchId && !searchQuery) {
|
||||
resetSearchSession();
|
||||
@@ -1,7 +1,9 @@
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { X } from "lucide-react";
|
||||
import React, { ButtonHTMLAttributes } from "react";
|
||||
import { XIcon } from "@phosphor-icons/react";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
|
||||
import React, { ButtonHTMLAttributes, useState } from "react";
|
||||
|
||||
interface Props extends ButtonHTMLAttributes<HTMLButtonElement> {
|
||||
selected?: boolean;
|
||||
@@ -16,39 +18,51 @@ export const FilterChip: React.FC<Props> = ({
|
||||
className,
|
||||
...rest
|
||||
}) => {
|
||||
const [isHovered, setIsHovered] = useState(false);
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"group w-fit space-x-1 rounded-[1.5rem] border border-zinc-300 bg-transparent px-[0.625rem] py-[0.375rem] shadow-none transition-transform duration-300 ease-in-out",
|
||||
"hover:border-violet-500 hover:bg-transparent focus:ring-0 disabled:cursor-not-allowed",
|
||||
selected && "border-0 bg-violet-700 hover:border",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
<span
|
||||
<AnimatePresence mode="wait">
|
||||
<Button
|
||||
onMouseEnter={() => setIsHovered(true)}
|
||||
onMouseLeave={() => setIsHovered(false)}
|
||||
className={cn(
|
||||
"font-sans text-sm font-medium leading-[1.375rem] text-zinc-600 group-hover:text-zinc-600 group-disabled:text-zinc-400",
|
||||
selected && "text-zinc-50",
|
||||
"group w-fit space-x-1 rounded-[1.5rem] border border-zinc-300 bg-transparent px-[0.625rem] py-[0.375rem] shadow-none",
|
||||
"hover:border-violet-500 hover:bg-transparent focus:ring-0 disabled:cursor-not-allowed",
|
||||
selected && "border-0 bg-violet-700 hover:border",
|
||||
className,
|
||||
)}
|
||||
{...rest}
|
||||
>
|
||||
{name}
|
||||
</span>
|
||||
{selected && (
|
||||
<>
|
||||
<span className="flex h-4 w-4 items-center justify-center rounded-full bg-zinc-50 transition-all duration-300 ease-in-out group-hover:hidden">
|
||||
<X
|
||||
className="h-3 w-3 rounded-full text-violet-700"
|
||||
strokeWidth={2}
|
||||
/>
|
||||
</span>
|
||||
{number !== undefined && (
|
||||
<span className="hidden h-[1.375rem] items-center rounded-[1.25rem] bg-violet-700 p-[0.375rem] text-zinc-50 transition-all duration-300 ease-in-out animate-in fade-in zoom-in group-hover:flex">
|
||||
{number > 100 ? "100+" : number}
|
||||
</span>
|
||||
<span
|
||||
className={cn(
|
||||
"font-sans text-sm font-medium leading-[1.375rem] text-zinc-600 group-hover:text-zinc-600 group-disabled:text-zinc-400",
|
||||
selected && "text-zinc-50",
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
>
|
||||
{name}
|
||||
</span>
|
||||
{selected && !isHovered && (
|
||||
<motion.span
|
||||
initial={{ opacity: 0.5, scale: 0.5, filter: "blur(20px)" }}
|
||||
animate={{ opacity: 1, scale: 1, filter: "blur(0px)" }}
|
||||
exit={{ opacity: 0.5, scale: 0.5, filter: "blur(20px)" }}
|
||||
transition={{ duration: 0.3, type: "spring", bounce: 0.2 }}
|
||||
className="flex h-4 w-4 items-center justify-center rounded-full bg-zinc-50"
|
||||
>
|
||||
<XIcon size={12} weight="bold" className="text-violet-700" />
|
||||
</motion.span>
|
||||
)}
|
||||
{number !== undefined && isHovered && (
|
||||
<motion.span
|
||||
initial={{ opacity: 0.5, scale: 0.5, filter: "blur(10px)" }}
|
||||
animate={{ opacity: 1, scale: 1, filter: "blur(0px)" }}
|
||||
exit={{ opacity: 0.5, scale: 0.5, filter: "blur(10px)" }}
|
||||
transition={{ duration: 0.3, type: "spring", bounce: 0.2 }}
|
||||
className="flex h-[1.375rem] items-center rounded-[1.25rem] bg-violet-700 p-[0.375rem] text-zinc-50"
|
||||
>
|
||||
{number > 100 ? "100+" : number}
|
||||
</motion.span>
|
||||
)}
|
||||
</Button>
|
||||
</AnimatePresence>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -0,0 +1,156 @@
|
||||
import { FilterChip } from "../FilterChip";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { CategoryKey } from "../BlockMenuFilters/types";
|
||||
import { AnimatePresence, motion } from "framer-motion";
|
||||
import { XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Text } from "@/components/atoms/Text/Text";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { Checkbox } from "@/components/__legacy__/ui/checkbox";
|
||||
import { useFilterSheet } from "./useFilterSheet";
|
||||
import { INITIAL_CREATORS_TO_SHOW } from "./constant";
|
||||
|
||||
export function FilterSheet({
|
||||
categories,
|
||||
}: {
|
||||
categories: Array<{ key: CategoryKey; name: string }>;
|
||||
}) {
|
||||
const {
|
||||
isOpen,
|
||||
localCategories,
|
||||
localCreators,
|
||||
displayedCreatorsCount,
|
||||
handleLocalCategoryChange,
|
||||
handleToggleShowMoreCreators,
|
||||
handleLocalCreatorChange,
|
||||
handleClearFilters,
|
||||
handleCloseButton,
|
||||
handleApplyFilters,
|
||||
hasLocalActiveFilters,
|
||||
visibleCreators,
|
||||
creators,
|
||||
handleOpenFilters,
|
||||
hasActiveFilters,
|
||||
} = useFilterSheet();
|
||||
|
||||
return (
|
||||
<div className="m-0 inline w-fit p-0">
|
||||
<FilterChip
|
||||
name={hasActiveFilters() ? "Edit filters" : "All filters"}
|
||||
onClick={handleOpenFilters}
|
||||
/>
|
||||
|
||||
<AnimatePresence>
|
||||
{isOpen && (
|
||||
<motion.div
|
||||
className={cn(
|
||||
"absolute bottom-2 left-2 top-2 z-20 w-3/4 max-w-[22.5rem] space-y-4 overflow-hidden rounded-[0.75rem] bg-white pb-4 shadow-[0_4px_12px_2px_rgba(0,0,0,0.1)]",
|
||||
)}
|
||||
initial={{ x: "-100%", filter: "blur(10px)" }}
|
||||
animate={{ x: 0, filter: "blur(0px)" }}
|
||||
exit={{ x: "-110%", filter: "blur(10px)" }}
|
||||
transition={{ duration: 0.4, type: "spring", bounce: 0.2 }}
|
||||
>
|
||||
{/* Top section */}
|
||||
<div className="flex items-center justify-between px-5 pt-4">
|
||||
<Text variant="body">Filters</Text>
|
||||
<Button
|
||||
className="p-0"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
onClick={handleCloseButton}
|
||||
>
|
||||
<XIcon size={20} />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
<Separator className="h-[1px] w-full text-zinc-300" />
|
||||
|
||||
{/* Category section */}
|
||||
<div className="space-y-4 px-5">
|
||||
<Text variant="large">Categories</Text>
|
||||
<div className="space-y-2">
|
||||
{categories.map((category) => (
|
||||
<div
|
||||
key={category.key}
|
||||
className="flex items-center space-x-2"
|
||||
>
|
||||
<Checkbox
|
||||
id={category.key}
|
||||
checked={localCategories.includes(category.key)}
|
||||
onCheckedChange={() =>
|
||||
handleLocalCategoryChange(category.key)
|
||||
}
|
||||
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
|
||||
/>
|
||||
<label
|
||||
htmlFor={category.key}
|
||||
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
|
||||
>
|
||||
{category.name}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Created by section */}
|
||||
<div className="space-y-4 px-5">
|
||||
<p className="font-sans text-base font-medium text-zinc-800">
|
||||
Created by
|
||||
</p>
|
||||
<div className="space-y-2">
|
||||
{visibleCreators.map((creator, i) => (
|
||||
<div key={i} className="flex items-center space-x-2">
|
||||
<Checkbox
|
||||
id={`creator-${creator}`}
|
||||
checked={localCreators.includes(creator)}
|
||||
onCheckedChange={() => handleLocalCreatorChange(creator)}
|
||||
className="border border-[#D4D4D4] shadow-none data-[state=checked]:border-none data-[state=checked]:bg-violet-700 data-[state=checked]:text-white"
|
||||
/>
|
||||
<label
|
||||
htmlFor={`creator-${creator}`}
|
||||
className="font-sans text-sm leading-[1.375rem] text-zinc-600"
|
||||
>
|
||||
{creator}
|
||||
</label>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
{creators.length > INITIAL_CREATORS_TO_SHOW && (
|
||||
<Button
|
||||
variant={"link"}
|
||||
className="m-0 p-0 font-sans text-sm font-medium leading-[1.375rem] text-zinc-800 underline hover:text-zinc-600"
|
||||
onClick={handleToggleShowMoreCreators}
|
||||
>
|
||||
{displayedCreatorsCount < creators.length ? "More" : "Less"}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer section */}
|
||||
<div className="fixed bottom-0 flex w-full justify-between gap-3 border-t border-zinc-200 bg-white px-5 py-3">
|
||||
<Button
|
||||
size="small"
|
||||
variant={"outline"}
|
||||
onClick={handleClearFilters}
|
||||
className="rounded-[8px] px-2 py-1.5"
|
||||
>
|
||||
Clear
|
||||
</Button>
|
||||
|
||||
<Button
|
||||
size="small"
|
||||
onClick={handleApplyFilters}
|
||||
disabled={!hasLocalActiveFilters()}
|
||||
className="rounded-[8px] px-2 py-1.5"
|
||||
>
|
||||
Apply filters
|
||||
</Button>
|
||||
</div>
|
||||
</motion.div>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
export const INITIAL_CREATORS_TO_SHOW = 5;
|
||||
@@ -0,0 +1,100 @@
|
||||
import { useBlockMenuStore } from "@/app/(platform)/build/stores/blockMenuStore";
|
||||
import { useState } from "react";
|
||||
import { INITIAL_CREATORS_TO_SHOW } from "./constant";
|
||||
import { GetV2BuilderSearchFilterAnyOfItem } from "@/app/api/__generated__/models/getV2BuilderSearchFilterAnyOfItem";
|
||||
|
||||
export const useFilterSheet = () => {
|
||||
const { filters, creators_list, creators, setFilters, setCreators } =
|
||||
useBlockMenuStore();
|
||||
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
const [localCategories, setLocalCategories] =
|
||||
useState<GetV2BuilderSearchFilterAnyOfItem[]>(filters);
|
||||
const [localCreators, setLocalCreators] = useState<string[]>(creators);
|
||||
const [displayedCreatorsCount, setDisplayedCreatorsCount] = useState(
|
||||
INITIAL_CREATORS_TO_SHOW,
|
||||
);
|
||||
|
||||
const handleLocalCategoryChange = (
|
||||
category: GetV2BuilderSearchFilterAnyOfItem,
|
||||
) => {
|
||||
setLocalCategories((prev) => {
|
||||
if (prev.includes(category)) {
|
||||
return prev.filter((c) => c !== category);
|
||||
}
|
||||
return [...prev, category];
|
||||
});
|
||||
};
|
||||
|
||||
const hasActiveFilters = () => {
|
||||
return filters.length > 0 || creators.length > 0;
|
||||
};
|
||||
|
||||
const handleToggleShowMoreCreators = () => {
|
||||
if (displayedCreatorsCount < creators.length) {
|
||||
setDisplayedCreatorsCount(creators.length);
|
||||
} else {
|
||||
setDisplayedCreatorsCount(INITIAL_CREATORS_TO_SHOW);
|
||||
}
|
||||
};
|
||||
|
||||
const handleLocalCreatorChange = (creator: string) => {
|
||||
setLocalCreators((prev) => {
|
||||
if (prev.includes(creator)) {
|
||||
return prev.filter((c) => c !== creator);
|
||||
}
|
||||
return [...prev, creator];
|
||||
});
|
||||
};
|
||||
|
||||
const handleClearFilters = () => {
|
||||
setLocalCategories([]);
|
||||
setLocalCreators([]);
|
||||
setDisplayedCreatorsCount(INITIAL_CREATORS_TO_SHOW);
|
||||
};
|
||||
|
||||
const handleCloseButton = () => {
|
||||
setIsOpen(false);
|
||||
setLocalCategories(filters);
|
||||
setLocalCreators(creators);
|
||||
setDisplayedCreatorsCount(INITIAL_CREATORS_TO_SHOW);
|
||||
};
|
||||
|
||||
const handleApplyFilters = () => {
|
||||
setFilters(localCategories);
|
||||
setCreators(localCreators);
|
||||
setIsOpen(false);
|
||||
};
|
||||
|
||||
const handleOpenFilters = () => {
|
||||
setIsOpen(true);
|
||||
setLocalCategories(filters);
|
||||
setLocalCreators(creators);
|
||||
};
|
||||
|
||||
const hasLocalActiveFilters = () => {
|
||||
return localCategories.length > 0 || localCreators.length > 0;
|
||||
};
|
||||
|
||||
const visibleCreators = creators_list.slice(0, displayedCreatorsCount);
|
||||
|
||||
return {
|
||||
creators,
|
||||
isOpen,
|
||||
setIsOpen,
|
||||
localCategories,
|
||||
localCreators,
|
||||
displayedCreatorsCount,
|
||||
setDisplayedCreatorsCount,
|
||||
handleLocalCategoryChange,
|
||||
handleToggleShowMoreCreators,
|
||||
handleLocalCreatorChange,
|
||||
handleClearFilters,
|
||||
handleCloseButton,
|
||||
handleOpenFilters,
|
||||
handleApplyFilters,
|
||||
hasLocalActiveFilters,
|
||||
visibleCreators,
|
||||
hasActiveFilters,
|
||||
};
|
||||
};
|
||||
@@ -7,10 +7,10 @@ import { useNewControlPanel } from "./useNewControlPanel";
|
||||
import { GraphExecutionID } from "@/lib/autogpt-server-api";
|
||||
// import { ControlPanelButton } from "../ControlPanelButton";
|
||||
// import { GraphSearchMenu } from "../GraphMenu/GraphMenu";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { NewSaveControl } from "./NewSaveControl/NewSaveControl";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { CustomNode } from "../FlowEditor/nodes/CustomNode/CustomNode";
|
||||
import { NewSaveControl } from "./NewSaveControl/NewSaveControl";
|
||||
import { UndoRedoButtons } from "./UndoRedoButtons";
|
||||
|
||||
export type Control = {
|
||||
@@ -56,7 +56,7 @@ export const NewControlPanel = memo(
|
||||
return (
|
||||
<section
|
||||
className={cn(
|
||||
"absolute left-4 top-10 z-10 w-[4.25rem] overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
|
||||
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
|
||||
)}
|
||||
>
|
||||
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
import React from "react";
|
||||
import { Card, CardContent, CardFooter } from "@/components/__legacy__/ui/card";
|
||||
import { Form, FormField } from "@/components/__legacy__/ui/form";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { Card, CardContent, CardFooter } from "@/components/__legacy__/ui/card";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { useNewSaveControl } from "./useNewSaveControl";
|
||||
import { Form, FormField } from "@/components/__legacy__/ui/form";
|
||||
import { ControlPanelButton } from "../ControlPanelButton";
|
||||
import { useControlPanelStore } from "../../../stores/controlPanelStore";
|
||||
import { FloppyDiskIcon } from "@phosphor-icons/react";
|
||||
import { Input } from "@/components/atoms/Input/Input";
|
||||
import { Button } from "@/components/atoms/Button/Button";
|
||||
import { useControlPanelStore } from "../../../stores/controlPanelStore";
|
||||
import { ControlPanelButton } from "../ControlPanelButton";
|
||||
import { useNewSaveControl } from "./useNewSaveControl";
|
||||
|
||||
export const NewSaveControl = () => {
|
||||
const { form, isSaving, graphVersion, handleSave } = useNewSaveControl();
|
||||
@@ -33,7 +32,7 @@ export const NewSaveControl = () => {
|
||||
selected={saveControlOpen}
|
||||
className="rounded-none"
|
||||
>
|
||||
<FloppyDiskIcon className="h-6 w-6" />
|
||||
<FloppyDiskIcon className="size-5" />
|
||||
</ControlPanelButton>
|
||||
</PopoverTrigger>
|
||||
</TooltipTrigger>
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import React from "react";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/__legacy__/ui/popover";
|
||||
import { MagnifyingGlassIcon } from "@phosphor-icons/react";
|
||||
import { GraphSearchContent } from "../GraphMenuContent/GraphContent";
|
||||
import React from "react";
|
||||
import { ControlPanelButton } from "../../ControlPanelButton";
|
||||
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { GraphSearchContent } from "../GraphMenuContent/GraphContent";
|
||||
import { useGraphMenu } from "./useGraphMenu";
|
||||
|
||||
interface GraphSearchMenuProps {
|
||||
@@ -50,7 +50,7 @@ export const GraphSearchMenu: React.FC<GraphSearchMenuProps> = ({
|
||||
selected={blockMenuSelected === "search"}
|
||||
className="rounded-none"
|
||||
>
|
||||
<MagnifyingGlassIcon className="h-5 w-6" strokeWidth={2} />
|
||||
<MagnifyingGlassIcon className="size-5" strokeWidth={2} />
|
||||
</ControlPanelButton>
|
||||
</PopoverTrigger>
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { Separator } from "@/components/__legacy__/ui/separator";
|
||||
import { ControlPanelButton } from "./ControlPanelButton";
|
||||
import { ArrowUUpLeftIcon, ArrowUUpRightIcon } from "@phosphor-icons/react";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/atoms/Tooltip/BaseTooltip";
|
||||
import { ArrowUUpLeftIcon, ArrowUUpRightIcon } from "@phosphor-icons/react";
|
||||
import { useHistoryStore } from "../../stores/historyStore";
|
||||
import { ControlPanelButton } from "./ControlPanelButton";
|
||||
|
||||
import { useEffect } from "react";
|
||||
|
||||
@@ -43,7 +43,7 @@ export const UndoRedoButtons = () => {
|
||||
<Tooltip delayDuration={100}>
|
||||
<TooltipTrigger asChild>
|
||||
<ControlPanelButton as="button" disabled={!canUndo()} onClick={undo}>
|
||||
<ArrowUUpLeftIcon className="h-6 w-6" />
|
||||
<ArrowUUpLeftIcon className="size-5" />
|
||||
</ControlPanelButton>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">Undo</TooltipContent>
|
||||
@@ -52,7 +52,7 @@ export const UndoRedoButtons = () => {
|
||||
<Tooltip delayDuration={100}>
|
||||
<TooltipTrigger asChild>
|
||||
<ControlPanelButton as="button" disabled={!canRedo()} onClick={redo}>
|
||||
<ArrowUUpRightIcon className="h-6 w-6" />
|
||||
<ArrowUUpRightIcon className="size-5" />
|
||||
</ControlPanelButton>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="right">Redo</TooltipContent>
|
||||
|
||||
@@ -4,19 +4,12 @@ import {
|
||||
OutputActions,
|
||||
OutputItem,
|
||||
} from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/selected-views/OutputRenderers";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import { beautifyString } from "@/lib/utils";
|
||||
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
|
||||
import { Clipboard, Maximize2 } from "lucide-react";
|
||||
import React, { FC, useMemo, useState } from "react";
|
||||
import { Button } from "../../../../../components/__legacy__/ui/button";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "../../../../../components/__legacy__/ui/dialog";
|
||||
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
|
||||
import { ScrollArea } from "../../../../../components/__legacy__/ui/scroll-area";
|
||||
import { Separator } from "../../../../../components/__legacy__/ui/separator";
|
||||
@@ -120,138 +113,155 @@ const ExpandableOutputDialog: FC<ExpandableOutputDialogProps> = ({
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={isOpen} onOpenChange={onClose}>
|
||||
<DialogContent className="flex h-[90vh] w-[90vw] max-w-4xl flex-col">
|
||||
<DialogHeader>
|
||||
<DialogTitle className="flex items-center justify-between pr-8">
|
||||
<div className="flex items-center gap-2">
|
||||
<Maximize2 size={20} />
|
||||
Full Output Preview
|
||||
</div>
|
||||
{enableEnhancedOutputHandling && (
|
||||
<div className="flex items-center gap-3">
|
||||
<label
|
||||
htmlFor="enhanced-rendering-toggle"
|
||||
className="cursor-pointer select-none text-sm font-normal text-gray-600"
|
||||
>
|
||||
Enhanced Rendering
|
||||
</label>
|
||||
<Switch
|
||||
id="enhanced-rendering-toggle"
|
||||
checked={useEnhancedRenderer}
|
||||
onCheckedChange={setUseEnhancedRenderer}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</DialogTitle>
|
||||
<DialogDescription>
|
||||
Execution ID: <span className="font-mono text-xs">{execId}</span>
|
||||
<br />
|
||||
Pin:{" "}
|
||||
<span className="font-semibold">{beautifyString(pinName)}</span>
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="flex-1 overflow-hidden">
|
||||
{useEnhancedRenderer && outputItems.length > 0 && (
|
||||
<div className="border-b px-4 py-2">
|
||||
<OutputActions
|
||||
items={outputItems.map((item) => ({
|
||||
value: item.value,
|
||||
metadata: item.metadata,
|
||||
renderer: item.renderer,
|
||||
}))}
|
||||
<Dialog
|
||||
title={
|
||||
<div className="flex items-center justify-between pr-8">
|
||||
<div className="flex items-center gap-2">
|
||||
<Maximize2 size={20} />
|
||||
Full Output Preview
|
||||
</div>
|
||||
{enableEnhancedOutputHandling && (
|
||||
<div className="flex items-center gap-3">
|
||||
<label
|
||||
htmlFor="enhanced-rendering-toggle"
|
||||
className="cursor-pointer select-none text-sm font-normal text-gray-600"
|
||||
>
|
||||
Enhanced Rendering
|
||||
</label>
|
||||
<Switch
|
||||
id="enhanced-rendering-toggle"
|
||||
checked={useEnhancedRenderer}
|
||||
onCheckedChange={setUseEnhancedRenderer}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<ScrollArea className="h-full">
|
||||
<div className="p-4">
|
||||
{data.length > 0 ? (
|
||||
useEnhancedRenderer ? (
|
||||
<div className="space-y-4">
|
||||
{outputItems.map((item) => (
|
||||
<OutputItem
|
||||
key={item.key}
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
label={item.label}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-4">
|
||||
{data.map((item, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className="rounded-lg border bg-gray-50 p-4"
|
||||
>
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
<span className="text-sm font-medium text-gray-600">
|
||||
Item {index + 1} of {data.length}
|
||||
</span>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => {
|
||||
const itemData =
|
||||
typeof item === "object"
|
||||
? JSON.stringify(item, null, 2)
|
||||
: String(item);
|
||||
navigator.clipboard
|
||||
.writeText(itemData)
|
||||
.then(() => {
|
||||
toast({
|
||||
title: `Item ${index + 1} copied to clipboard!`,
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
}}
|
||||
className="flex items-center gap-1"
|
||||
>
|
||||
<Clipboard size={14} />
|
||||
Copy Item
|
||||
</Button>
|
||||
</div>
|
||||
<Separator className="mb-3" />
|
||||
<div className="whitespace-pre-wrap break-words font-mono text-sm">
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={false}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)
|
||||
) : (
|
||||
<div className="py-8 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
}
|
||||
controlled={{
|
||||
isOpen,
|
||||
set: (open) => {
|
||||
if (!open) onClose();
|
||||
},
|
||||
}}
|
||||
onClose={onClose}
|
||||
styling={{
|
||||
maxWidth: "56rem",
|
||||
width: "90vw",
|
||||
height: "90vh",
|
||||
}}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex h-full flex-col">
|
||||
<div className="pb-4">
|
||||
<p className="text-sm text-zinc-600">
|
||||
Execution ID: <span className="font-mono text-xs">{execId}</span>
|
||||
<br />
|
||||
Pin:{" "}
|
||||
<span className="font-semibold">{beautifyString(pinName)}</span>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<DialogFooter className="flex justify-between">
|
||||
<div className="text-sm text-gray-600">
|
||||
{data.length} item{data.length !== 1 ? "s" : ""} total
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
{!useEnhancedRenderer && (
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={copyData}
|
||||
className="flex items-center gap-1"
|
||||
>
|
||||
<Clipboard size={16} />
|
||||
Copy All
|
||||
</Button>
|
||||
<div className="flex flex-1 flex-col overflow-hidden">
|
||||
{useEnhancedRenderer && outputItems.length > 0 && (
|
||||
<div className="border-b px-4 py-2">
|
||||
<OutputActions
|
||||
items={outputItems.map((item) => ({
|
||||
value: item.value,
|
||||
metadata: item.metadata,
|
||||
renderer: item.renderer,
|
||||
}))}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<Button onClick={onClose}>Close</Button>
|
||||
<ScrollArea className="h-full">
|
||||
<div className="p-4">
|
||||
{data.length > 0 ? (
|
||||
useEnhancedRenderer ? (
|
||||
<div className="space-y-4">
|
||||
{outputItems.map((item) => (
|
||||
<OutputItem
|
||||
key={item.key}
|
||||
value={item.value}
|
||||
metadata={item.metadata}
|
||||
renderer={item.renderer}
|
||||
label={item.label}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
<div className="space-y-4">
|
||||
{data.map((item, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className="rounded-lg border bg-gray-50 p-4"
|
||||
>
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
<span className="text-sm font-medium text-gray-600">
|
||||
Item {index + 1} of {data.length}
|
||||
</span>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={() => {
|
||||
const itemData =
|
||||
typeof item === "object"
|
||||
? JSON.stringify(item, null, 2)
|
||||
: String(item);
|
||||
navigator.clipboard
|
||||
.writeText(itemData)
|
||||
.then(() => {
|
||||
toast({
|
||||
title: `Item ${index + 1} copied to clipboard!`,
|
||||
duration: 2000,
|
||||
});
|
||||
});
|
||||
}}
|
||||
className="flex items-center gap-1"
|
||||
>
|
||||
<Clipboard size={14} />
|
||||
Copy Item
|
||||
</Button>
|
||||
</div>
|
||||
<Separator className="mb-3" />
|
||||
<div className="whitespace-pre-wrap break-words font-mono text-sm">
|
||||
<ContentRenderer
|
||||
value={item}
|
||||
truncateLongData={false}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)
|
||||
) : (
|
||||
<div className="py-8 text-center text-gray-500">
|
||||
No data available
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
</DialogFooter>
|
||||
</DialogContent>
|
||||
|
||||
<Dialog.Footer className="flex justify-between">
|
||||
<div className="text-sm text-gray-600">
|
||||
{data.length} item{data.length !== 1 ? "s" : ""} total
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
{!useEnhancedRenderer && (
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={copyData}
|
||||
className="flex items-center gap-1"
|
||||
>
|
||||
<Clipboard size={16} />
|
||||
Copy All
|
||||
</Button>
|
||||
)}
|
||||
<Button onClick={onClose}>Close</Button>
|
||||
</div>
|
||||
</Dialog.Footer>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@ import {
|
||||
ConnectionData,
|
||||
CustomNodeData,
|
||||
} from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
|
||||
import { NodeTableInput } from "@/app/(platform)/build/components/legacy-builder/NodeTableInput";
|
||||
import { CredentialsInput } from "@/app/(platform)/library/agents/[id]/components/NewAgentLibraryView/components/modals/CredentialsInputs/CredentialsInputs";
|
||||
import { Button } from "@/components/__legacy__/ui/button";
|
||||
import { Calendar } from "@/components/__legacy__/ui/calendar";
|
||||
@@ -28,7 +29,6 @@ import {
|
||||
} from "@/components/__legacy__/ui/select";
|
||||
import { Switch } from "@/components/atoms/Switch/Switch";
|
||||
import { GoogleDrivePickerInput } from "@/components/contextual/GoogleDrivePicker/GoogleDrivePickerInput";
|
||||
import { NodeTableInput } from "@/components/node-table-input";
|
||||
import {
|
||||
BlockIOArraySubSchema,
|
||||
BlockIOBooleanSubSchema,
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import React, { FC, useCallback, useEffect, useState } from "react";
|
||||
import { FC, useCallback, useEffect, useState } from "react";
|
||||
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { cn } from "@/lib/utils";
|
||||
import NodeHandle from "@/app/(platform)/build/components/legacy-builder/NodeHandle";
|
||||
import {
|
||||
BlockIOTableSubSchema,
|
||||
TableRow,
|
||||
TableCellValue,
|
||||
TableRow,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import { Input } from "./atoms/Input/Input";
|
||||
import { Button } from "./atoms/Button/Button";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { PlusIcon, XIcon } from "@phosphor-icons/react";
|
||||
import { Button } from "../../../../../components/atoms/Button/Button";
|
||||
import { Input } from "../../../../../components/atoms/Input/Input";
|
||||
|
||||
interface NodeTableInputProps {
|
||||
/** Unique identifier for the node in the builder graph */
|
||||
@@ -1,17 +1,11 @@
|
||||
import React, { useCallback } from "react";
|
||||
import { useCallback } from "react";
|
||||
|
||||
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
|
||||
import { Dialog } from "@/components/molecules/Dialog/Dialog";
|
||||
import type {
|
||||
CredentialsMetaInput,
|
||||
GraphMeta,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogDescription,
|
||||
} from "@/components/__legacy__/ui/dialog";
|
||||
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
|
||||
|
||||
interface RunInputDialogProps {
|
||||
isOpen: boolean;
|
||||
@@ -70,21 +64,33 @@ export function RunnerInputDialog({
|
||||
);
|
||||
|
||||
return (
|
||||
<Dialog open={isOpen} onOpenChange={doClose}>
|
||||
<DialogContent className="flex w-[90vw] max-w-4xl flex-col p-10">
|
||||
<DialogHeader>
|
||||
<DialogTitle className="text-2xl">Run your agent</DialogTitle>
|
||||
<DialogDescription className="mt-2">{graph.name}</DialogDescription>
|
||||
</DialogHeader>
|
||||
<AgentRunDraftView
|
||||
className="p-0"
|
||||
graph={graph}
|
||||
doRun={doRun ? handleRun : undefined}
|
||||
onRun={doRun ? undefined : doClose}
|
||||
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
|
||||
onCreateSchedule={doCreateSchedule ? undefined : doClose}
|
||||
/>
|
||||
</DialogContent>
|
||||
<Dialog
|
||||
title="Run your agent"
|
||||
controlled={{
|
||||
isOpen,
|
||||
set: (open) => {
|
||||
if (!open) doClose();
|
||||
},
|
||||
}}
|
||||
onClose={doClose}
|
||||
styling={{
|
||||
maxWidth: "56rem",
|
||||
width: "90vw",
|
||||
}}
|
||||
>
|
||||
<Dialog.Content>
|
||||
<div className="flex flex-col p-10">
|
||||
<p className="mt-2 text-sm text-zinc-600">{graph.name}</p>
|
||||
<AgentRunDraftView
|
||||
className="p-0"
|
||||
graph={graph}
|
||||
doRun={doRun ? handleRun : undefined}
|
||||
onRun={doRun ? undefined : doClose}
|
||||
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
|
||||
onCreateSchedule={doCreateSchedule ? undefined : doClose}
|
||||
/>
|
||||
</div>
|
||||
</Dialog.Content>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user