Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
10c6c3eb46 chore(deps): bump peter-evans/create-pull-request from 7 to 8
Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 7 to 8.
- [Release notes](https://github.com/peter-evans/create-pull-request/releases)
- [Commits](https://github.com/peter-evans/create-pull-request/compare/v7...v8)

---
updated-dependencies:
- dependency-name: peter-evans/create-pull-request
  dependency-version: '8'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-19 07:26:14 +00:00
208 changed files with 2771 additions and 6730 deletions

View File

@@ -49,7 +49,7 @@ jobs:
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
uses: peter-evans/create-pull-request@v7
uses: peter-evans/create-pull-request@v8
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}

View File

@@ -16,32 +16,6 @@ See `docs/content/platform/getting-started.md` for setup instructions.
- Format Python code with `poetry run format`.
- Format frontend code using `pnpm format`.
## Frontend guidelines:
See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference:
1. **Pages**: Create in `src/app/(platform)/feature-name/page.tsx`
- Add `usePageName.ts` hook for logic
- Put sub-components in local `components/` folder
2. **Components**: Structure as `ComponentName/ComponentName.tsx` + `useComponentName.ts` + `helpers.ts`
- Use design system components from `src/components/` (atoms, molecules, organisms)
- Never use `src/components/__legacy__/*`
3. **Data fetching**: Use generated API hooks from `@/app/api/__generated__/endpoints/`
- Regenerate with `pnpm generate:api`
- Pattern: `use{Method}{Version}{OperationName}`
4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only
5. **Testing**: Add Storybook stories for new components, Playwright for E2E
6. **Code conventions**: Function declarations (not arrow functions) for components/handlers
- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component
- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts)
- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible
- Avoid large hooks, abstract logic into `helpers.ts` files when sensible
- Use function declarations for components, arrow functions only for callbacks
- No barrel files or `index.ts` re-exports
- Do not use `useCallback` or `useMemo` unless strictly needed
- Avoid comments at all times unless the code is very complex
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).

View File

@@ -201,7 +201,7 @@ If you get any pushback or hit complex block conditions check the new_blocks gui
3. Write tests alongside the route file
4. Run `poetry run test` to verify
### Frontend guidelines:
**Frontend feature development:**
See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference:
@@ -217,14 +217,6 @@ See `/frontend/CONTRIBUTING.md` for complete patterns. Quick reference:
4. **Styling**: Tailwind CSS only, use design tokens, Phosphor Icons only
5. **Testing**: Add Storybook stories for new components, Playwright for E2E
6. **Code conventions**: Function declarations (not arrow functions) for components/handlers
- Component props should be `interface Props { ... }` (not exported) unless the interface needs to be used outside the component
- Separate render logic from business logic (component.tsx + useComponent.ts + helpers.ts)
- Colocate state when possible and avoid creating large components, use sub-components ( local `/components` folder next to the parent component ) when sensible
- Avoid large hooks, abstract logic into `helpers.ts` files when sensible
- Use function declarations for components, arrow functions only for callbacks
- No barrel files or `index.ts` re-exports
- Do not use `useCallback` or `useMemo` unless strictly needed
- Avoid comments at all times unless the code is very complex
### Security Implementation

View File

@@ -290,11 +290,6 @@ async def _cache_session(session: ChatSession) -> None:
await async_redis.setex(redis_key, config.session_ttl, session.model_dump_json())
async def cache_chat_session(session: ChatSession) -> None:
"""Cache a chat session without persisting to the database."""
await _cache_session(session)
async def _get_session_from_db(session_id: str) -> ChatSession | None:
"""Get a chat session from the database."""
prisma_session = await chat_db.get_chat_session(session_id)

View File

@@ -172,12 +172,12 @@ async def get_session(
user_id: The optional authenticated user ID, or None for anonymous access.
Returns:
SessionDetailResponse: Details for the requested session, or None if not found.
SessionDetailResponse: Details for the requested session; raises NotFoundError if not found.
"""
session = await get_chat_session(session_id, user_id)
if not session:
raise NotFoundError(f"Session {session_id} not found.")
raise NotFoundError(f"Session {session_id} not found")
messages = [message.model_dump() for message in session.messages]
logger.info(
@@ -222,8 +222,6 @@ async def stream_chat_post(
session = await _validate_and_get_session(session_id, user_id)
async def event_generator() -> AsyncGenerator[str, None]:
chunk_count = 0
first_chunk_type: str | None = None
async for chunk in chat_service.stream_chat_completion(
session_id,
request.message,
@@ -232,26 +230,7 @@ async def stream_chat_post(
session=session, # Pass pre-fetched session to avoid double-fetch
context=request.context,
):
if chunk_count < 3:
logger.info(
"Chat stream chunk",
extra={
"session_id": session_id,
"chunk_type": str(chunk.type),
},
)
if not first_chunk_type:
first_chunk_type = str(chunk.type)
chunk_count += 1
yield chunk.to_sse()
logger.info(
"Chat stream completed",
extra={
"session_id": session_id,
"chunk_count": chunk_count,
"first_chunk_type": first_chunk_type,
},
)
# AI SDK protocol termination
yield "data: [DONE]\n\n"
@@ -296,8 +275,6 @@ async def stream_chat_get(
session = await _validate_and_get_session(session_id, user_id)
async def event_generator() -> AsyncGenerator[str, None]:
chunk_count = 0
first_chunk_type: str | None = None
async for chunk in chat_service.stream_chat_completion(
session_id,
message,
@@ -305,26 +282,7 @@ async def stream_chat_get(
user_id=user_id,
session=session, # Pass pre-fetched session to avoid double-fetch
):
if chunk_count < 3:
logger.info(
"Chat stream chunk",
extra={
"session_id": session_id,
"chunk_type": str(chunk.type),
},
)
if not first_chunk_type:
first_chunk_type = str(chunk.type)
chunk_count += 1
yield chunk.to_sse()
logger.info(
"Chat stream completed",
extra={
"session_id": session_id,
"chunk_count": chunk_count,
"first_chunk_type": first_chunk_type,
},
)
# AI SDK protocol termination
yield "data: [DONE]\n\n"

View File

@@ -1,18 +1,15 @@
import asyncio
import logging
import time
from asyncio import CancelledError
from collections.abc import AsyncGenerator
from typing import Any
import orjson
from langfuse import get_client, propagate_attributes
from langfuse.openai import openai # type: ignore
from langfuse import Langfuse
from openai import (
APIConnectionError,
APIError,
APIStatusError,
PermissionDeniedError,
AsyncOpenAI,
RateLimitError,
)
from openai.types.chat import ChatCompletionChunk, ChatCompletionToolParam
@@ -24,12 +21,12 @@ from backend.data.understanding import (
from backend.util.exceptions import NotFoundError
from backend.util.settings import Settings
from . import db as chat_db
from .config import ChatConfig
from .model import (
ChatMessage,
ChatSession,
Usage,
cache_chat_session,
get_chat_session,
update_session_title,
upsert_chat_session,
@@ -53,10 +50,10 @@ logger = logging.getLogger(__name__)
config = ChatConfig()
settings = Settings()
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
langfuse = get_client()
# Langfuse client (lazy initialization)
_langfuse_client: Langfuse | None = None
class LangfuseNotConfiguredError(Exception):
@@ -72,6 +69,65 @@ def _is_langfuse_configured() -> bool:
)
def _get_langfuse_client() -> Langfuse:
"""Get or create the Langfuse client for prompt management and tracing."""
global _langfuse_client
if _langfuse_client is None:
if not _is_langfuse_configured():
raise LangfuseNotConfiguredError(
"Langfuse is not configured. The chat feature requires Langfuse for prompt management. "
"Please set the LANGFUSE_PUBLIC_KEY and LANGFUSE_SECRET_KEY environment variables."
)
_langfuse_client = Langfuse(
public_key=settings.secrets.langfuse_public_key,
secret_key=settings.secrets.langfuse_secret_key,
host=settings.secrets.langfuse_host or "https://cloud.langfuse.com",
)
return _langfuse_client
def _get_environment() -> str:
"""Get the current environment name for Langfuse tagging."""
return settings.config.app_env.value
def _get_langfuse_prompt() -> str:
"""Fetch the latest production prompt from Langfuse.
Returns:
The compiled prompt text from Langfuse.
Raises:
Exception: If Langfuse is unavailable or prompt fetch fails.
"""
try:
langfuse = _get_langfuse_client()
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
compiled = prompt.compile()
logger.info(
f"Fetched prompt '{config.langfuse_prompt_name}' from Langfuse "
f"(version: {prompt.version})"
)
return compiled
except Exception as e:
logger.error(f"Failed to fetch prompt from Langfuse: {e}")
raise
async def _is_first_session(user_id: str) -> bool:
"""Check if this is the user's first chat session.
Returns True if the user has 1 or fewer sessions (meaning this is their first).
"""
try:
session_count = await chat_db.get_user_session_count(user_id)
return session_count <= 1
except Exception as e:
logger.warning(f"Failed to check session count for user {user_id}: {e}")
return False # Default to non-onboarding if we can't check
async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
"""Build the full system prompt including business understanding if available.
@@ -83,6 +139,8 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
Tuple of (compiled prompt string, Langfuse prompt object for tracing)
"""
langfuse = _get_langfuse_client()
# cache_ttl_seconds=0 disables SDK caching to always get the latest prompt
prompt = langfuse.get_prompt(config.langfuse_prompt_name, cache_ttl_seconds=0)
@@ -100,7 +158,7 @@ async def _build_system_prompt(user_id: str | None) -> tuple[str, Any]:
context = "This is the first time you are meeting the user. Greet them and introduce them to the platform"
compiled = prompt.compile(users_information=context)
return compiled, understanding
return compiled, prompt
async def _generate_session_title(message: str) -> str | None:
@@ -159,7 +217,6 @@ async def assign_user_to_session(
async def stream_chat_completion(
session_id: str,
message: str | None = None,
tool_call_response: str | None = None,
is_user_message: bool = True,
user_id: str | None = None,
retry_count: int = 0,
@@ -199,6 +256,11 @@ async def stream_chat_completion(
yield StreamFinish()
return
# Langfuse observations will be created after session is loaded (need messages for input)
# Initialize to None so finally block can safely check and end them
trace = None
generation = None
# Only fetch from Redis if session not provided (initial call)
if session is None:
session = await get_chat_session(session_id, user_id)
@@ -274,349 +336,297 @@ async def stream_chat_completion(
asyncio.create_task(_update_title())
# Build system prompt with business understanding
system_prompt, understanding = await _build_system_prompt(user_id)
system_prompt, langfuse_prompt = await _build_system_prompt(user_id)
# Build input messages including system prompt for complete Langfuse logging
trace_input_messages = [{"role": "system", "content": system_prompt}] + [
m.model_dump() for m in session.messages
]
# Create Langfuse trace for this LLM call (each call gets its own trace, grouped by session_id)
# Using v3 SDK: start_observation creates a root span, update_trace sets trace-level attributes
input = message
if not message and tool_call_response:
input = tool_call_response
langfuse = get_client()
with langfuse.start_as_current_observation(
as_type="span",
name="user-copilot-request",
input=input,
) as span:
with propagate_attributes(
try:
langfuse = _get_langfuse_client()
env = _get_environment()
trace = langfuse.start_observation(
name="chat_completion",
input={"messages": trace_input_messages},
metadata={
"environment": env,
"model": config.model,
"message_count": len(session.messages),
"prompt_name": langfuse_prompt.name if langfuse_prompt else None,
"prompt_version": langfuse_prompt.version if langfuse_prompt else None,
},
)
# Set trace-level attributes (session_id, user_id, tags)
trace.update_trace(
session_id=session_id,
user_id=user_id,
tags=["copilot"],
metadata={
"users_information": format_understanding_for_prompt(understanding)[
:200
] # langfuse only accepts upto to 200 chars
},
):
tags=[env, "copilot"],
)
except Exception as e:
logger.warning(f"Failed to create Langfuse trace: {e}")
# Initialize variables that will be used in finally block (must be defined before try)
assistant_response = ChatMessage(
role="assistant",
content="",
# Initialize variables that will be used in finally block (must be defined before try)
assistant_response = ChatMessage(
role="assistant",
content="",
)
accumulated_tool_calls: list[dict[str, Any]] = []
# Wrap main logic in try/finally to ensure Langfuse observations are always ended
try:
has_yielded_end = False
has_yielded_error = False
has_done_tool_call = False
has_received_text = False
text_streaming_ended = False
tool_response_messages: list[ChatMessage] = []
should_retry = False
# Generate unique IDs for AI SDK protocol
import uuid as uuid_module
message_id = str(uuid_module.uuid4())
text_block_id = str(uuid_module.uuid4())
# Yield message start
yield StreamStart(messageId=message_id)
# Create Langfuse generation for each LLM call, linked to the prompt
# Using v3 SDK: start_observation with as_type="generation"
generation = (
trace.start_observation(
as_type="generation",
name="llm_call",
model=config.model,
input={"messages": trace_input_messages},
prompt=langfuse_prompt,
)
accumulated_tool_calls: list[dict[str, Any]] = []
has_saved_assistant_message = False
has_appended_streaming_message = False
last_cache_time = 0.0
last_cache_content_len = 0
if trace
else None
)
# Wrap main logic in try/finally to ensure Langfuse observations are always ended
has_yielded_end = False
has_yielded_error = False
has_done_tool_call = False
has_received_text = False
text_streaming_ended = False
tool_response_messages: list[ChatMessage] = []
should_retry = False
try:
async for chunk in _stream_chat_chunks(
session=session,
tools=tools,
system_prompt=system_prompt,
text_block_id=text_block_id,
):
# Generate unique IDs for AI SDK protocol
import uuid as uuid_module
message_id = str(uuid_module.uuid4())
text_block_id = str(uuid_module.uuid4())
# Yield message start
yield StreamStart(messageId=message_id)
try:
async for chunk in _stream_chat_chunks(
session=session,
tools=tools,
system_prompt=system_prompt,
text_block_id=text_block_id,
):
if isinstance(chunk, StreamTextStart):
# Emit text-start before first text delta
if not has_received_text:
yield chunk
elif isinstance(chunk, StreamTextDelta):
delta = chunk.delta or ""
assert assistant_response.content is not None
assistant_response.content += delta
has_received_text = True
if not has_appended_streaming_message:
session.messages.append(assistant_response)
has_appended_streaming_message = True
current_time = time.monotonic()
content_len = len(assistant_response.content)
if (
current_time - last_cache_time >= 1.0
and content_len > last_cache_content_len
):
try:
await cache_chat_session(session)
except Exception as e:
logger.warning(
f"Failed to cache partial session {session.session_id}: {e}"
)
last_cache_time = current_time
last_cache_content_len = content_len
if isinstance(chunk, StreamTextStart):
# Emit text-start before first text delta
if not has_received_text:
yield chunk
elif isinstance(chunk, StreamTextEnd):
# Emit text-end after text completes
if has_received_text and not text_streaming_ended:
text_streaming_ended = True
if assistant_response.content:
logger.warn(
f"StreamTextEnd: Attempting to set output {assistant_response.content}"
)
span.update_trace(output=assistant_response.content)
span.update(output=assistant_response.content)
yield chunk
elif isinstance(chunk, StreamToolInputStart):
# Emit text-end before first tool call, but only if we've received text
elif isinstance(chunk, StreamTextDelta):
delta = chunk.delta or ""
assert assistant_response.content is not None
assistant_response.content += delta
has_received_text = True
yield chunk
elif isinstance(chunk, StreamTextEnd):
# Emit text-end after text completes
if has_received_text and not text_streaming_ended:
text_streaming_ended = True
yield chunk
elif isinstance(chunk, StreamToolInputStart):
# Emit text-end before first tool call, but only if we've received text
if has_received_text and not text_streaming_ended:
yield StreamTextEnd(id=text_block_id)
text_streaming_ended = True
yield chunk
elif isinstance(chunk, StreamToolInputAvailable):
# Accumulate tool calls in OpenAI format
accumulated_tool_calls.append(
{
"id": chunk.toolCallId,
"type": "function",
"function": {
"name": chunk.toolName,
"arguments": orjson.dumps(chunk.input).decode("utf-8"),
},
}
)
elif isinstance(chunk, StreamToolOutputAvailable):
result_content = (
chunk.output
if isinstance(chunk.output, str)
else orjson.dumps(chunk.output).decode("utf-8")
)
tool_response_messages.append(
ChatMessage(
role="tool",
content=result_content,
tool_call_id=chunk.toolCallId,
)
)
has_done_tool_call = True
# Track if any tool execution failed
if not chunk.success:
logger.warning(
f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed"
)
yield chunk
elif isinstance(chunk, StreamFinish):
if not has_done_tool_call:
# Emit text-end before finish if we received text but haven't closed it
if has_received_text and not text_streaming_ended:
yield StreamTextEnd(id=text_block_id)
text_streaming_ended = True
has_yielded_end = True
yield chunk
elif isinstance(chunk, StreamToolInputAvailable):
# Accumulate tool calls in OpenAI format
accumulated_tool_calls.append(
{
"id": chunk.toolCallId,
"type": "function",
"function": {
"name": chunk.toolName,
"arguments": orjson.dumps(chunk.input).decode(
"utf-8"
),
},
}
elif isinstance(chunk, StreamError):
has_yielded_error = True
elif isinstance(chunk, StreamUsage):
session.usage.append(
Usage(
prompt_tokens=chunk.promptTokens,
completion_tokens=chunk.completionTokens,
total_tokens=chunk.totalTokens,
)
elif isinstance(chunk, StreamToolOutputAvailable):
result_content = (
chunk.output
if isinstance(chunk.output, str)
else orjson.dumps(chunk.output).decode("utf-8")
)
tool_response_messages.append(
ChatMessage(
role="tool",
content=result_content,
tool_call_id=chunk.toolCallId,
)
)
has_done_tool_call = True
# Track if any tool execution failed
if not chunk.success:
logger.warning(
f"Tool {chunk.toolName} (ID: {chunk.toolCallId}) execution failed"
)
yield chunk
elif isinstance(chunk, StreamFinish):
if not has_done_tool_call:
# Emit text-end before finish if we received text but haven't closed it
if has_received_text and not text_streaming_ended:
yield StreamTextEnd(id=text_block_id)
text_streaming_ended = True
# Save assistant message before yielding finish to ensure it's persisted
# even if client disconnects immediately after receiving StreamFinish
if not has_saved_assistant_message:
messages_to_save_early: list[ChatMessage] = []
if accumulated_tool_calls:
assistant_response.tool_calls = (
accumulated_tool_calls
)
if not has_appended_streaming_message and (
assistant_response.content
or assistant_response.tool_calls
):
messages_to_save_early.append(assistant_response)
messages_to_save_early.extend(tool_response_messages)
if messages_to_save_early:
session.messages.extend(messages_to_save_early)
logger.info(
f"Saving assistant message before StreamFinish: "
f"content_len={len(assistant_response.content or '')}, "
f"tool_calls={len(assistant_response.tool_calls or [])}, "
f"tool_responses={len(tool_response_messages)}"
)
if (
messages_to_save_early
or has_appended_streaming_message
):
await upsert_chat_session(session)
has_saved_assistant_message = True
has_yielded_end = True
yield chunk
elif isinstance(chunk, StreamError):
has_yielded_error = True
yield chunk
elif isinstance(chunk, StreamUsage):
session.usage.append(
Usage(
prompt_tokens=chunk.promptTokens,
completion_tokens=chunk.completionTokens,
total_tokens=chunk.totalTokens,
)
)
else:
logger.error(
f"Unknown chunk type: {type(chunk)}", exc_info=True
)
if assistant_response.content:
langfuse.update_current_trace(output=assistant_response.content)
langfuse.update_current_span(output=assistant_response.content)
elif tool_response_messages:
langfuse.update_current_trace(output=str(tool_response_messages))
langfuse.update_current_span(output=str(tool_response_messages))
except CancelledError:
if not has_saved_assistant_message:
if accumulated_tool_calls:
assistant_response.tool_calls = accumulated_tool_calls
if assistant_response.content:
assistant_response.content = (
f"{assistant_response.content}\n\n[interrupted]"
)
else:
assistant_response.content = "[interrupted]"
if not has_appended_streaming_message:
session.messages.append(assistant_response)
if tool_response_messages:
session.messages.extend(tool_response_messages)
try:
await upsert_chat_session(session)
except Exception as e:
logger.warning(
f"Failed to save interrupted session {session.session_id}: {e}"
)
raise
except Exception as e:
logger.error(f"Error during stream: {e!s}", exc_info=True)
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
is_retryable = isinstance(
e, (orjson.JSONDecodeError, KeyError, TypeError)
)
if is_retryable and retry_count < config.max_retries:
logger.info(
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
)
should_retry = True
else:
# Non-retryable error or max retries exceeded
# Save any partial progress before reporting error
messages_to_save: list[ChatMessage] = []
logger.error(f"Unknown chunk type: {type(chunk)}", exc_info=True)
except Exception as e:
logger.error(f"Error during stream: {e!s}", exc_info=True)
# Add assistant message if it has content or tool calls
if accumulated_tool_calls:
assistant_response.tool_calls = accumulated_tool_calls
if not has_appended_streaming_message and (
assistant_response.content or assistant_response.tool_calls
):
messages_to_save.append(assistant_response)
# Check if this is a retryable error (JSON parsing, incomplete tool calls, etc.)
is_retryable = isinstance(e, (orjson.JSONDecodeError, KeyError, TypeError))
# Add tool response messages after assistant message
messages_to_save.extend(tool_response_messages)
if not has_saved_assistant_message:
if messages_to_save:
session.messages.extend(messages_to_save)
if messages_to_save or has_appended_streaming_message:
await upsert_chat_session(session)
if not has_yielded_error:
error_message = str(e)
if not is_retryable:
error_message = f"Non-retryable error: {error_message}"
elif retry_count >= config.max_retries:
error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}"
error_response = StreamError(errorText=error_message)
yield error_response
if not has_yielded_end:
yield StreamFinish()
return
# Handle retry outside of exception handler to avoid nesting
if should_retry and retry_count < config.max_retries:
if is_retryable and retry_count < config.max_retries:
logger.info(
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
retry_count=retry_count + 1,
session=session,
context=context,
):
yield chunk
return # Exit after retry to avoid double-saving in finally block
# Normal completion path - save session and handle tool call continuation
# Only save if we haven't already saved when StreamFinish was received
if not has_saved_assistant_message:
logger.info(
f"Normal completion path: session={session.session_id}, "
f"current message_count={len(session.messages)}"
)
# Build the messages list in the correct order
should_retry = True
else:
# Non-retryable error or max retries exceeded
# Save any partial progress before reporting error
messages_to_save: list[ChatMessage] = []
# Add assistant message with tool_calls if any
# Add assistant message if it has content or tool calls
if accumulated_tool_calls:
assistant_response.tool_calls = accumulated_tool_calls
logger.info(
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
)
if not has_appended_streaming_message and (
assistant_response.content or assistant_response.tool_calls
):
if assistant_response.content or assistant_response.tool_calls:
messages_to_save.append(assistant_response)
logger.info(
f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}"
)
# Add tool response messages after assistant message
messages_to_save.extend(tool_response_messages)
logger.info(
f"Saving {len(tool_response_messages)} tool response messages, "
f"total_to_save={len(messages_to_save)}"
)
if messages_to_save:
session.messages.extend(messages_to_save)
logger.info(
f"Extended session messages, new message_count={len(session.messages)}"
)
if messages_to_save or has_appended_streaming_message:
await upsert_chat_session(session)
else:
logger.info(
"Assistant message already saved when StreamFinish was received, "
"skipping duplicate save"
)
session.messages.extend(messages_to_save)
await upsert_chat_session(session)
# If we did a tool call, stream the chat completion again to get the next response
if has_done_tool_call:
logger.info(
"Tool call executed, streaming chat completion again to get assistant response"
if not has_yielded_error:
error_message = str(e)
if not is_retryable:
error_message = f"Non-retryable error: {error_message}"
elif retry_count >= config.max_retries:
error_message = f"Max retries ({config.max_retries}) exceeded: {error_message}"
error_response = StreamError(errorText=error_message)
yield error_response
if not has_yielded_end:
yield StreamFinish()
return
# Handle retry outside of exception handler to avoid nesting
if should_retry and retry_count < config.max_retries:
logger.info(
f"Retrying stream_chat_completion for session {session_id}, attempt {retry_count + 1}"
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
retry_count=retry_count + 1,
session=session,
context=context,
):
yield chunk
return # Exit after retry to avoid double-saving in finally block
# Normal completion path - save session and handle tool call continuation
logger.info(
f"Normal completion path: session={session.session_id}, "
f"current message_count={len(session.messages)}"
)
# Build the messages list in the correct order
messages_to_save: list[ChatMessage] = []
# Add assistant message with tool_calls if any
if accumulated_tool_calls:
assistant_response.tool_calls = accumulated_tool_calls
logger.info(
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
)
if assistant_response.content or assistant_response.tool_calls:
messages_to_save.append(assistant_response)
logger.info(
f"Saving assistant message with content_len={len(assistant_response.content or '')}, tool_calls={len(assistant_response.tool_calls or [])}"
)
# Add tool response messages after assistant message
messages_to_save.extend(tool_response_messages)
logger.info(
f"Saving {len(tool_response_messages)} tool response messages, "
f"total_to_save={len(messages_to_save)}"
)
session.messages.extend(messages_to_save)
logger.info(
f"Extended session messages, new message_count={len(session.messages)}"
)
await upsert_chat_session(session)
# If we did a tool call, stream the chat completion again to get the next response
if has_done_tool_call:
logger.info(
"Tool call executed, streaming chat completion again to get assistant response"
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
session=session, # Pass session object to avoid Redis refetch
context=context,
):
yield chunk
finally:
# Always end Langfuse observations to prevent resource leaks
# Guard against None and catch errors to avoid masking original exceptions
if generation is not None:
try:
latest_usage = session.usage[-1] if session.usage else None
generation.update(
model=config.model,
output={
"content": assistant_response.content,
"tool_calls": accumulated_tool_calls or None,
},
usage_details=(
{
"input": latest_usage.prompt_tokens,
"output": latest_usage.completion_tokens,
"total": latest_usage.total_tokens,
}
if latest_usage
else None
),
)
async for chunk in stream_chat_completion(
session_id=session.session_id,
user_id=user_id,
session=session, # Pass session object to avoid Redis refetch
context=context,
tool_call_response=str(tool_response_messages),
):
yield chunk
generation.end()
except Exception as e:
logger.warning(f"Failed to end Langfuse generation: {e}")
if trace is not None:
try:
if accumulated_tool_calls:
trace.update_trace(output={"tool_calls": accumulated_tool_calls})
else:
trace.update_trace(output={"response": assistant_response.content})
trace.end()
except Exception as e:
logger.warning(f"Failed to end Langfuse trace: {e}")
# Retry configuration for OpenAI API calls
@@ -644,12 +654,6 @@ def _is_retryable_error(error: Exception) -> bool:
return False
def _is_region_blocked_error(error: Exception) -> bool:
if isinstance(error, PermissionDeniedError):
return "not available in your region" in str(error).lower()
return "not available in your region" in str(error).lower()
async def _stream_chat_chunks(
session: ChatSession,
tools: list[ChatCompletionToolParam],
@@ -842,18 +846,7 @@ async def _stream_chat_chunks(
f"Error in stream (not retrying): {e!s}",
exc_info=True,
)
error_code = None
error_text = str(e)
if _is_region_blocked_error(e):
error_code = "MODEL_NOT_AVAILABLE_REGION"
error_text = (
"This model is not available in your region. "
"Please connect via VPN and try again."
)
error_response = StreamError(
errorText=error_text,
code=error_code,
)
error_response = StreamError(errorText=str(e))
yield error_response
yield StreamFinish()
return
@@ -907,4 +900,5 @@ async def _yield_tool_call(
session=session,
)
logger.info(f"Yielding Tool execution response: {tool_execution_response}")
yield tool_execution_response

View File

@@ -30,7 +30,7 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
"find_library_agent": FindLibraryAgentTool(),
"run_agent": RunAgentTool(),
"run_block": RunBlockTool(),
"view_agent_output": AgentOutputTool(),
"agent_output": AgentOutputTool(),
"search_docs": SearchDocsTool(),
"get_doc_page": GetDocPageTool(),
}

View File

@@ -3,8 +3,6 @@
import logging
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from backend.data.understanding import (
BusinessUnderstandingInput,
@@ -61,7 +59,6 @@ and automations for the user's specific needs."""
"""Requires authentication to store user-specific data."""
return True
@observe(as_type="tool", name="add_understanding")
async def _execute(
self,
user_id: str | None,

View File

@@ -218,7 +218,6 @@ async def save_agent_to_library(
library_agents = await library_db.create_library_agent(
graph=created_graph,
user_id=user_id,
sensitive_action_safe_mode=True,
create_library_agents_for_sub_graphs=False,
)

View File

@@ -5,7 +5,6 @@ import re
from datetime import datetime, timedelta, timezone
from typing import Any
from langfuse import observe
from pydantic import BaseModel, field_validator
from backend.api.features.chat.model import ChatSession
@@ -104,7 +103,7 @@ class AgentOutputTool(BaseTool):
@property
def name(self) -> str:
return "view_agent_output"
return "agent_output"
@property
def description(self) -> str:
@@ -329,7 +328,6 @@ class AgentOutputTool(BaseTool):
total_executions=len(available_executions) if available_executions else 1,
)
@observe(as_type="tool", name="view_agent_output")
async def _execute(
self,
user_id: str | None,

View File

@@ -3,8 +3,6 @@
import logging
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from .agent_generator import (
@@ -80,7 +78,6 @@ class CreateAgentTool(BaseTool):
"required": ["description"],
}
@observe(as_type="tool", name="create_agent")
async def _execute(
self,
user_id: str | None,

View File

@@ -3,8 +3,6 @@
import logging
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from .agent_generator import (
@@ -87,7 +85,6 @@ class EditAgentTool(BaseTool):
"required": ["agent_id", "changes"],
}
@observe(as_type="tool", name="edit_agent")
async def _execute(
self,
user_id: str | None,

View File

@@ -2,8 +2,6 @@
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from .agent_search import search_agents
@@ -37,7 +35,6 @@ class FindAgentTool(BaseTool):
"required": ["query"],
}
@observe(as_type="tool", name="find_agent")
async def _execute(
self, user_id: str | None, session: ChatSession, **kwargs
) -> ToolResponseBase:

View File

@@ -1,7 +1,6 @@
import logging
from typing import Any
from langfuse import observe
from prisma.enums import ContentType
from backend.api.features.chat.model import ChatSession
@@ -56,7 +55,6 @@ class FindBlockTool(BaseTool):
def requires_auth(self) -> bool:
return True
@observe(as_type="tool", name="find_block")
async def _execute(
self,
user_id: str | None,

View File

@@ -2,8 +2,6 @@
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from .agent_search import search_agents
@@ -43,7 +41,6 @@ class FindLibraryAgentTool(BaseTool):
def requires_auth(self) -> bool:
return True
@observe(as_type="tool", name="find_library_agent")
async def _execute(
self, user_id: str | None, session: ChatSession, **kwargs
) -> ToolResponseBase:

View File

@@ -4,8 +4,6 @@ import logging
from pathlib import Path
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from backend.api.features.chat.tools.base import BaseTool
from backend.api.features.chat.tools.models import (
@@ -73,7 +71,6 @@ class GetDocPageTool(BaseTool):
url_path = path.rsplit(".", 1)[0] if "." in path else path
return f"{DOCS_BASE_URL}/{url_path}"
@observe(as_type="tool", name="get_doc_page")
async def _execute(
self,
user_id: str | None,

View File

@@ -3,7 +3,6 @@
import logging
from typing import Any
from langfuse import observe
from pydantic import BaseModel, Field, field_validator
from backend.api.features.chat.config import ChatConfig
@@ -33,7 +32,7 @@ from .models import (
UserReadiness,
)
from .utils import (
build_missing_credentials_from_graph,
check_user_has_required_credentials,
extract_credentials_from_schema,
fetch_graph_from_store_slug,
get_or_create_library_agent,
@@ -155,7 +154,6 @@ class RunAgentTool(BaseTool):
"""All operations require authentication."""
return True
@observe(as_type="tool", name="run_agent")
async def _execute(
self,
user_id: str | None,
@@ -237,13 +235,15 @@ class RunAgentTool(BaseTool):
# Return credentials needed response with input data info
# The UI handles credential setup automatically, so the message
# focuses on asking about input data
requirements_creds_dict = build_missing_credentials_from_graph(
graph, None
credentials = extract_credentials_from_schema(
graph.credentials_input_schema
)
missing_credentials_dict = build_missing_credentials_from_graph(
graph, graph_credentials
missing_creds_check = await check_user_has_required_credentials(
user_id, credentials
)
requirements_creds_list = list(requirements_creds_dict.values())
missing_credentials_dict = {
c.id: c.model_dump() for c in missing_creds_check
}
return SetupRequirementsResponse(
message=self._build_inputs_message(graph, MSG_WHAT_VALUES_TO_USE),
@@ -257,7 +257,7 @@ class RunAgentTool(BaseTool):
ready_to_run=False,
),
requirements={
"credentials": requirements_creds_list,
"credentials": [c.model_dump() for c in credentials],
"inputs": self._get_inputs_list(graph.input_schema),
"execution_modes": self._get_execution_modes(graph),
},

View File

@@ -4,8 +4,6 @@ import logging
from collections import defaultdict
from typing import Any
from langfuse import observe
from backend.api.features.chat.model import ChatSession
from backend.data.block import get_block
from backend.data.execution import ExecutionContext
@@ -22,7 +20,6 @@ from .models import (
ToolResponseBase,
UserReadiness,
)
from .utils import build_missing_credentials_from_field_info
logger = logging.getLogger(__name__)
@@ -130,7 +127,6 @@ class RunBlockTool(BaseTool):
return matched_credentials, missing_credentials
@observe(as_type="tool", name="run_block")
async def _execute(
self,
user_id: str | None,
@@ -190,11 +186,7 @@ class RunBlockTool(BaseTool):
if missing_credentials:
# Return setup requirements response with missing credentials
credentials_fields_info = block.input_schema.get_credentials_fields_info()
missing_creds_dict = build_missing_credentials_from_field_info(
credentials_fields_info, set(matched_credentials.keys())
)
missing_creds_list = list(missing_creds_dict.values())
missing_creds_dict = {c.id: c.model_dump() for c in missing_credentials}
return SetupRequirementsResponse(
message=(
@@ -211,7 +203,7 @@ class RunBlockTool(BaseTool):
ready_to_run=False,
),
requirements={
"credentials": missing_creds_list,
"credentials": [c.model_dump() for c in missing_credentials],
"inputs": self._get_inputs_list(block),
"execution_modes": ["immediate"],
},

View File

@@ -3,7 +3,6 @@
import logging
from typing import Any
from langfuse import observe
from prisma.enums import ContentType
from backend.api.features.chat.model import ChatSession
@@ -88,7 +87,6 @@ class SearchDocsTool(BaseTool):
url_path = path.rsplit(".", 1)[0] if "." in path else path
return f"{DOCS_BASE_URL}/{url_path}"
@observe(as_type="tool", name="search_docs")
async def _execute(
self,
user_id: str | None,

View File

@@ -8,7 +8,7 @@ from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data import graph as graph_db
from backend.data.graph import GraphModel
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.data.model import CredentialsMetaInput
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.util.exceptions import NotFoundError
@@ -89,59 +89,6 @@ def extract_credentials_from_schema(
return credentials
def _serialize_missing_credential(
field_key: str, field_info: CredentialsFieldInfo
) -> dict[str, Any]:
"""
Convert credential field info into a serializable dict that preserves all supported
credential types (e.g., api_key + oauth2) so the UI can offer multiple options.
"""
supported_types = sorted(field_info.supported_types)
provider = next(iter(field_info.provider), "unknown")
scopes = sorted(field_info.required_scopes or [])
return {
"id": field_key,
"title": field_key.replace("_", " ").title(),
"provider": provider,
"provider_name": provider.replace("_", " ").title(),
"type": supported_types[0] if supported_types else "api_key",
"types": supported_types,
"scopes": scopes,
}
def build_missing_credentials_from_graph(
graph: GraphModel, matched_credentials: dict[str, CredentialsMetaInput] | None
) -> dict[str, Any]:
"""
Build a missing_credentials mapping from a graph's aggregated credentials inputs,
preserving all supported credential types for each field.
"""
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
aggregated_fields = graph.aggregate_credentials_inputs()
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, (field_info, _node_fields) in aggregated_fields.items()
if field_key not in matched_keys
}
def build_missing_credentials_from_field_info(
credential_fields: dict[str, CredentialsFieldInfo],
matched_keys: set[str],
) -> dict[str, Any]:
"""
Build missing_credentials mapping from a simple credentials field info dictionary.
"""
return {
field_key: _serialize_missing_credential(field_key, field_info)
for field_key, field_info in credential_fields.items()
if field_key not in matched_keys
}
def extract_credentials_as_dict(
credentials_input_schema: dict[str, Any] | None,
) -> dict[str, CredentialsMetaInput]:

View File

@@ -401,11 +401,27 @@ async def add_generated_agent_image(
)
def _initialize_graph_settings(graph: graph_db.GraphModel) -> GraphSettings:
"""
Initialize GraphSettings based on graph content.
Args:
graph: The graph to analyze
Returns:
GraphSettings with appropriate human_in_the_loop_safe_mode value
"""
if graph.has_human_in_the_loop:
# Graph has HITL blocks - set safe mode to True by default
return GraphSettings(human_in_the_loop_safe_mode=True)
else:
# Graph has no HITL blocks - keep None
return GraphSettings(human_in_the_loop_safe_mode=None)
async def create_library_agent(
graph: graph_db.GraphModel,
user_id: str,
hitl_safe_mode: bool = True,
sensitive_action_safe_mode: bool = False,
create_library_agents_for_sub_graphs: bool = True,
) -> list[library_model.LibraryAgent]:
"""
@@ -414,8 +430,6 @@ async def create_library_agent(
Args:
agent: The agent/Graph to add to the library.
user_id: The user to whom the agent will be added.
hitl_safe_mode: Whether HITL blocks require manual review (default True).
sensitive_action_safe_mode: Whether sensitive action blocks require review.
create_library_agents_for_sub_graphs: If True, creates LibraryAgent records for sub-graphs as well.
Returns:
@@ -451,11 +465,7 @@ async def create_library_agent(
}
},
settings=SafeJson(
GraphSettings.from_graph(
graph_entry,
hitl_safe_mode=hitl_safe_mode,
sensitive_action_safe_mode=sensitive_action_safe_mode,
).model_dump()
_initialize_graph_settings(graph_entry).model_dump()
),
),
include=library_agent_include(
@@ -617,6 +627,33 @@ async def update_library_agent(
raise DatabaseError("Failed to update library agent") from e
async def update_library_agent_settings(
user_id: str,
agent_id: str,
settings: GraphSettings,
) -> library_model.LibraryAgent:
"""
Updates the settings for a specific LibraryAgent.
Args:
user_id: The owner of the LibraryAgent.
agent_id: The ID of the LibraryAgent to update.
settings: New GraphSettings to apply.
Returns:
The updated LibraryAgent.
Raises:
NotFoundError: If the specified LibraryAgent does not exist.
DatabaseError: If there's an error in the update operation.
"""
return await update_library_agent(
library_agent_id=agent_id,
user_id=user_id,
settings=settings,
)
async def delete_library_agent(
library_agent_id: str, user_id: str, soft_delete: bool = True
) -> None:
@@ -801,7 +838,7 @@ async def add_store_agent_to_library(
"isCreatedByUser": False,
"useGraphIsActiveVersion": False,
"settings": SafeJson(
GraphSettings.from_graph(graph_model).model_dump()
_initialize_graph_settings(graph_model).model_dump()
),
},
include=library_agent_include(
@@ -1191,15 +1228,8 @@ async def fork_library_agent(
)
new_graph = await on_graph_activate(new_graph, user_id=user_id)
# Create a library agent for the new graph, preserving safe mode settings
return (
await create_library_agent(
new_graph,
user_id,
hitl_safe_mode=original_agent.settings.human_in_the_loop_safe_mode,
sensitive_action_safe_mode=original_agent.settings.sensitive_action_safe_mode,
)
)[0]
# Create a library agent for the new graph
return (await create_library_agent(new_graph, user_id))[0]
except prisma.errors.PrismaError as e:
logger.error(f"Database error cloning library agent: {e}")
raise DatabaseError("Failed to fork library agent") from e

View File

@@ -73,12 +73,6 @@ class LibraryAgent(pydantic.BaseModel):
has_external_trigger: bool = pydantic.Field(
description="Whether the agent has an external trigger (e.g. webhook) node"
)
has_human_in_the_loop: bool = pydantic.Field(
description="Whether the agent has human-in-the-loop blocks"
)
has_sensitive_action: bool = pydantic.Field(
description="Whether the agent has sensitive action blocks"
)
trigger_setup_info: Optional[GraphTriggerInfo] = None
# Indicates whether there's a new output (based on recent runs)
@@ -186,8 +180,6 @@ class LibraryAgent(pydantic.BaseModel):
graph.credentials_input_schema if sub_graphs is not None else None
),
has_external_trigger=graph.has_external_trigger,
has_human_in_the_loop=graph.has_human_in_the_loop,
has_sensitive_action=graph.has_sensitive_action,
trigger_setup_info=graph.trigger_setup_info,
new_output=new_output,
can_access_graph=can_access_graph,

View File

@@ -52,8 +52,6 @@ async def test_get_library_agents_success(
output_schema={"type": "object", "properties": {}},
credentials_input_schema={"type": "object", "properties": {}},
has_external_trigger=False,
has_human_in_the_loop=False,
has_sensitive_action=False,
status=library_model.LibraryAgentStatus.COMPLETED,
recommended_schedule_cron=None,
new_output=False,
@@ -77,8 +75,6 @@ async def test_get_library_agents_success(
output_schema={"type": "object", "properties": {}},
credentials_input_schema={"type": "object", "properties": {}},
has_external_trigger=False,
has_human_in_the_loop=False,
has_sensitive_action=False,
status=library_model.LibraryAgentStatus.COMPLETED,
recommended_schedule_cron=None,
new_output=False,
@@ -154,8 +150,6 @@ async def test_get_favorite_library_agents_success(
output_schema={"type": "object", "properties": {}},
credentials_input_schema={"type": "object", "properties": {}},
has_external_trigger=False,
has_human_in_the_loop=False,
has_sensitive_action=False,
status=library_model.LibraryAgentStatus.COMPLETED,
recommended_schedule_cron=None,
new_output=False,
@@ -224,8 +218,6 @@ def test_add_agent_to_library_success(
output_schema={"type": "object", "properties": {}},
credentials_input_schema={"type": "object", "properties": {}},
has_external_trigger=False,
has_human_in_the_loop=False,
has_sensitive_action=False,
status=library_model.LibraryAgentStatus.COMPLETED,
new_output=False,
can_access_graph=True,

View File

@@ -154,7 +154,6 @@ async def store_content_embedding(
# Upsert the embedding
# WHERE clause in DO UPDATE prevents PostgreSQL 15 bug with NULLS NOT DISTINCT
# Use unqualified ::vector - pgvector is in search_path on all environments
await execute_raw_with_schema(
"""
INSERT INTO {schema_prefix}"UnifiedContentEmbedding" (
@@ -178,6 +177,7 @@ async def store_content_embedding(
searchable_text,
metadata_json,
client=client,
set_public_search_path=True,
)
logger.info(f"Stored embedding for {content_type}:{content_id}")
@@ -236,6 +236,7 @@ async def get_content_embedding(
content_type,
content_id,
user_id,
set_public_search_path=True,
)
if result and len(result) > 0:
@@ -870,45 +871,31 @@ async def semantic_search(
# Add content type parameters and build placeholders dynamically
content_type_start_idx = len(params) + 1
content_type_placeholders = ", ".join(
"$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"'
f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"'
for i in range(len(content_types))
)
params.extend([ct.value for ct in content_types])
# Build min_similarity param index before appending
min_similarity_idx = len(params) + 1
params.append(min_similarity)
# Use unqualified ::vector and <=> operator - pgvector is in search_path on all environments
sql = (
"""
sql = f"""
SELECT
"contentId" as content_id,
"contentType" as content_type,
"searchableText" as searchable_text,
metadata,
1 - (embedding <=> '"""
+ embedding_str
+ """'::vector) as similarity
FROM {schema_prefix}"UnifiedContentEmbedding"
WHERE "contentType" IN ("""
+ content_type_placeholders
+ """)
"""
+ user_filter
+ """
AND 1 - (embedding <=> '"""
+ embedding_str
+ """'::vector) >= $"""
+ str(min_similarity_idx)
+ """
1 - (embedding <=> '{embedding_str}'::vector) as similarity
FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding"
WHERE "contentType" IN ({content_type_placeholders})
{user_filter}
AND 1 - (embedding <=> '{embedding_str}'::vector) >= ${len(params) + 1}
ORDER BY similarity DESC
LIMIT $1
"""
)
params.append(min_similarity)
try:
results = await query_raw_with_schema(sql, *params)
results = await query_raw_with_schema(
sql, *params, set_public_search_path=True
)
return [
{
"content_id": row["content_id"],
@@ -935,41 +922,31 @@ async def semantic_search(
# Add content type parameters and build placeholders dynamically
content_type_start_idx = len(params_lexical) + 1
content_type_placeholders_lexical = ", ".join(
"$" + str(content_type_start_idx + i) + '::{schema_prefix}"ContentType"'
f'${content_type_start_idx + i}::{{{{schema_prefix}}}}"ContentType"'
for i in range(len(content_types))
)
params_lexical.extend([ct.value for ct in content_types])
# Build query param index before appending
query_param_idx = len(params_lexical) + 1
params_lexical.append(f"%{query}%")
# Use regular string (not f-string) for template to preserve {schema_prefix} placeholders
sql_lexical = (
"""
sql_lexical = f"""
SELECT
"contentId" as content_id,
"contentType" as content_type,
"searchableText" as searchable_text,
metadata,
0.0 as similarity
FROM {schema_prefix}"UnifiedContentEmbedding"
WHERE "contentType" IN ("""
+ content_type_placeholders_lexical
+ """)
"""
+ user_filter
+ """
AND "searchableText" ILIKE $"""
+ str(query_param_idx)
+ """
FROM {{{{schema_prefix}}}}"UnifiedContentEmbedding"
WHERE "contentType" IN ({content_type_placeholders_lexical})
{user_filter}
AND "searchableText" ILIKE ${len(params_lexical) + 1}
ORDER BY "updatedAt" DESC
LIMIT $1
"""
)
params_lexical.append(f"%{query}%")
try:
results = await query_raw_with_schema(sql_lexical, *params_lexical)
results = await query_raw_with_schema(
sql_lexical, *params_lexical, set_public_search_path=True
)
return [
{
"content_id": row["content_id"],

View File

@@ -155,14 +155,18 @@ async def test_store_embedding_success(mocker):
)
assert result is True
# execute_raw is called once for INSERT (no separate SET search_path needed)
assert mock_client.execute_raw.call_count == 1
# execute_raw is called twice: once for SET search_path, once for INSERT
assert mock_client.execute_raw.call_count == 2
# Verify the INSERT query with the actual data
call_args = mock_client.execute_raw.call_args_list[0][0]
assert "test-version-id" in call_args
assert "[0.1,0.2,0.3]" in call_args
assert None in call_args # userId should be None for store agents
# First call: SET search_path
first_call_args = mock_client.execute_raw.call_args_list[0][0]
assert "SET search_path" in first_call_args[0]
# Second call: INSERT query with the actual data
second_call_args = mock_client.execute_raw.call_args_list[1][0]
assert "test-version-id" in second_call_args
assert "[0.1,0.2,0.3]" in second_call_args
assert None in second_call_args # userId should be None for store agents
@pytest.mark.asyncio(loop_scope="session")

View File

@@ -12,7 +12,7 @@ from dataclasses import dataclass
from typing import Any, Literal
from prisma.enums import ContentType
from rank_bm25 import BM25Okapi # type: ignore[import-untyped]
from rank_bm25 import BM25Okapi
from backend.api.features.store.embeddings import (
EMBEDDING_DIM,
@@ -363,7 +363,9 @@ async def unified_hybrid_search(
LIMIT {limit_param} OFFSET {offset_param}
"""
results = await query_raw_with_schema(sql_query, *params)
results = await query_raw_with_schema(
sql_query, *params, set_public_search_path=True
)
total = results[0]["total_count"] if results else 0
# Apply BM25 reranking
@@ -686,7 +688,9 @@ async def hybrid_search(
LIMIT {limit_param} OFFSET {offset_param}
"""
results = await query_raw_with_schema(sql_query, *params)
results = await query_raw_with_schema(
sql_query, *params, set_public_search_path=True
)
total = results[0]["total_count"] if results else 0

View File

@@ -761,8 +761,10 @@ async def create_new_graph(
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
graph.validate_graph(for_run=False)
# The return value of the create graph & library function is intentionally not used here,
# as the graph already valid and no sub-graphs are returned back.
await graph_db.create_graph(graph, user_id=user_id)
await library_db.create_library_agent(graph, user_id)
await library_db.create_library_agent(graph, user_id=user_id)
activated_graph = await on_graph_activate(graph, user_id=user_id)
if create_graph.source == "builder":
@@ -886,19 +888,21 @@ async def set_graph_active_version(
async def _update_library_agent_version_and_settings(
user_id: str, agent_graph: graph_db.GraphModel
) -> library_model.LibraryAgent:
# Keep the library agent up to date with the new active version
library = await library_db.update_agent_version_in_library(
user_id, agent_graph.id, agent_graph.version
)
updated_settings = GraphSettings.from_graph(
graph=agent_graph,
hitl_safe_mode=library.settings.human_in_the_loop_safe_mode,
sensitive_action_safe_mode=library.settings.sensitive_action_safe_mode,
)
if updated_settings != library.settings:
library = await library_db.update_library_agent(
library_agent_id=library.id,
# If the graph has HITL node, initialize the setting if it's not already set.
if (
agent_graph.has_human_in_the_loop
and library.settings.human_in_the_loop_safe_mode is None
):
await library_db.update_library_agent_settings(
user_id=user_id,
settings=updated_settings,
agent_id=library.id,
settings=library.settings.model_copy(
update={"human_in_the_loop_safe_mode": True}
),
)
return library
@@ -915,18 +919,21 @@ async def update_graph_settings(
user_id: Annotated[str, Security(get_user_id)],
) -> GraphSettings:
"""Update graph settings for the user's library agent."""
# Get the library agent for this graph
library_agent = await library_db.get_library_agent_by_graph_id(
graph_id=graph_id, user_id=user_id
)
if not library_agent:
raise HTTPException(404, f"Graph #{graph_id} not found in user's library")
updated_agent = await library_db.update_library_agent(
library_agent_id=library_agent.id,
# Update the library agent settings
updated_agent = await library_db.update_library_agent_settings(
user_id=user_id,
agent_id=library_agent.id,
settings=settings,
)
# Return the updated settings
return GraphSettings.model_validate(updated_agent.settings)

View File

@@ -680,58 +680,3 @@ class ListIsEmptyBlock(Block):
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "is_empty", len(input_data.list) == 0
class ConcatenateListsBlock(Block):
class Input(BlockSchemaInput):
lists: List[List[Any]] = SchemaField(
description="A list of lists to concatenate together. All lists will be combined in order into a single list.",
placeholder="e.g., [[1, 2], [3, 4], [5, 6]]",
)
class Output(BlockSchemaOutput):
concatenated_list: List[Any] = SchemaField(
description="The concatenated list containing all elements from all input lists in order."
)
error: str = SchemaField(
description="Error message if concatenation failed due to invalid input types."
)
def __init__(self):
super().__init__(
id="3cf9298b-5817-4141-9d80-7c2cc5199c8e",
description="Concatenates multiple lists into a single list. All elements from all input lists are combined in order.",
categories={BlockCategory.BASIC},
input_schema=ConcatenateListsBlock.Input,
output_schema=ConcatenateListsBlock.Output,
test_input=[
{"lists": [[1, 2, 3], [4, 5, 6]]},
{"lists": [["a", "b"], ["c"], ["d", "e", "f"]]},
{"lists": [[1, 2], []]},
{"lists": []},
],
test_output=[
("concatenated_list", [1, 2, 3, 4, 5, 6]),
("concatenated_list", ["a", "b", "c", "d", "e", "f"]),
("concatenated_list", [1, 2]),
("concatenated_list", []),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
concatenated = []
for idx, lst in enumerate(input_data.lists):
if lst is None:
# Skip None values to avoid errors
continue
if not isinstance(lst, list):
# Type validation: each item must be a list
# Strings are iterable and would cause extend() to iterate character-by-character
# Non-iterable types would raise TypeError
yield "error", (
f"Invalid input at index {idx}: expected a list, got {type(lst).__name__}. "
f"All items in 'lists' must be lists (e.g., [[1, 2], [3, 4]])."
)
return
concatenated.extend(lst)
yield "concatenated_list", concatenated

View File

@@ -84,7 +84,7 @@ class HITLReviewHelper:
Exception: If review creation or status update fails
"""
# Skip review if safe mode is disabled - return auto-approved result
if not execution_context.human_in_the_loop_safe_mode:
if not execution_context.safe_mode:
logger.info(
f"Block {block_name} skipping review for node {node_exec_id} - safe mode disabled"
)

View File

@@ -104,7 +104,7 @@ class HumanInTheLoopBlock(Block):
execution_context: ExecutionContext,
**_kwargs,
) -> BlockOutput:
if not execution_context.human_in_the_loop_safe_mode:
if not execution_context.safe_mode:
logger.info(
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
)

View File

@@ -79,10 +79,6 @@ class ModelMetadata(NamedTuple):
provider: str
context_window: int
max_output_tokens: int | None
display_name: str
provider_name: str
creator_name: str
price_tier: Literal[1, 2, 3]
class LlmModelMeta(EnumMeta):
@@ -175,26 +171,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
V0_1_5_LG = "v0-1.5-lg"
V0_1_0_MD = "v0-1.0-md"
@classmethod
def __get_pydantic_json_schema__(cls, schema, handler):
json_schema = handler(schema)
llm_model_metadata = {}
for model in cls:
model_name = model.value
metadata = model.metadata
llm_model_metadata[model_name] = {
"creator": metadata.creator_name,
"creator_name": metadata.creator_name,
"title": metadata.display_name,
"provider": metadata.provider,
"provider_name": metadata.provider_name,
"name": model_name,
"price_tier": metadata.price_tier,
}
json_schema["llm_model"] = True
json_schema["llm_model_metadata"] = llm_model_metadata
return json_schema
@property
def metadata(self) -> ModelMetadata:
return MODEL_METADATA[self]
@@ -214,291 +190,119 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
MODEL_METADATA = {
# https://platform.openai.com/docs/models
LlmModel.O3: ModelMetadata("openai", 200000, 100000, "O3", "OpenAI", "OpenAI", 2),
LlmModel.O3_MINI: ModelMetadata(
"openai", 200000, 100000, "O3 Mini", "OpenAI", "OpenAI", 1
), # o3-mini-2025-01-31
LlmModel.O1: ModelMetadata(
"openai", 200000, 100000, "O1", "OpenAI", "OpenAI", 3
), # o1-2024-12-17
LlmModel.O1_MINI: ModelMetadata(
"openai", 128000, 65536, "O1 Mini", "OpenAI", "OpenAI", 2
), # o1-mini-2024-09-12
LlmModel.O3: ModelMetadata("openai", 200000, 100000),
LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
# GPT-5 models
LlmModel.GPT5_2: ModelMetadata(
"openai", 400000, 128000, "GPT-5.2", "OpenAI", "OpenAI", 3
),
LlmModel.GPT5_1: ModelMetadata(
"openai", 400000, 128000, "GPT-5.1", "OpenAI", "OpenAI", 2
),
LlmModel.GPT5: ModelMetadata(
"openai", 400000, 128000, "GPT-5", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_MINI: ModelMetadata(
"openai", 400000, 128000, "GPT-5 Mini", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_NANO: ModelMetadata(
"openai", 400000, 128000, "GPT-5 Nano", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_CHAT: ModelMetadata(
"openai", 400000, 16384, "GPT-5 Chat Latest", "OpenAI", "OpenAI", 2
),
LlmModel.GPT41: ModelMetadata(
"openai", 1047576, 32768, "GPT-4.1", "OpenAI", "OpenAI", 1
),
LlmModel.GPT41_MINI: ModelMetadata(
"openai", 1047576, 32768, "GPT-4.1 Mini", "OpenAI", "OpenAI", 1
),
LlmModel.GPT5_2: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_1: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_MINI: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_NANO: ModelMetadata("openai", 400000, 128000),
LlmModel.GPT5_CHAT: ModelMetadata("openai", 400000, 16384),
LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768),
LlmModel.GPT41_MINI: ModelMetadata("openai", 1047576, 32768),
LlmModel.GPT4O_MINI: ModelMetadata(
"openai", 128000, 16384, "GPT-4o Mini", "OpenAI", "OpenAI", 1
"openai", 128000, 16384
), # gpt-4o-mini-2024-07-18
LlmModel.GPT4O: ModelMetadata(
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
), # gpt-4o-2024-08-06
LlmModel.GPT4O: ModelMetadata("openai", 128000, 16384), # gpt-4o-2024-08-06
LlmModel.GPT4_TURBO: ModelMetadata(
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
"openai", 128000, 4096
), # gpt-4-turbo-2024-04-09
LlmModel.GPT3_5_TURBO: ModelMetadata(
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
), # gpt-3.5-turbo-0125
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125
# https://docs.anthropic.com/en/docs/about-claude/models
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
"anthropic", 200000, 32000
), # claude-opus-4-1-20250805
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
"anthropic", 200000, 32000, "Claude Opus 4", "Anthropic", "Anthropic", 3
"anthropic", 200000, 32000
), # claude-4-opus-20250514
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
"anthropic", 200000, 64000, "Claude Sonnet 4", "Anthropic", "Anthropic", 2
"anthropic", 200000, 64000
), # claude-4-sonnet-20250514
LlmModel.CLAUDE_4_5_OPUS: ModelMetadata(
"anthropic", 200000, 64000, "Claude Opus 4.5", "Anthropic", "Anthropic", 3
"anthropic", 200000, 64000
), # claude-opus-4-5-20251101
LlmModel.CLAUDE_4_5_SONNET: ModelMetadata(
"anthropic", 200000, 64000, "Claude Sonnet 4.5", "Anthropic", "Anthropic", 3
"anthropic", 200000, 64000
), # claude-sonnet-4-5-20250929
LlmModel.CLAUDE_4_5_HAIKU: ModelMetadata(
"anthropic", 200000, 64000, "Claude Haiku 4.5", "Anthropic", "Anthropic", 2
"anthropic", 200000, 64000
), # claude-haiku-4-5-20251001
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
"anthropic", 200000, 64000, "Claude 3.7 Sonnet", "Anthropic", "Anthropic", 2
"anthropic", 200000, 64000
), # claude-3-7-sonnet-20250219
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
"anthropic", 200000, 4096, "Claude 3 Haiku", "Anthropic", "Anthropic", 1
"anthropic", 200000, 4096
), # claude-3-haiku-20240307
# https://docs.aimlapi.com/api-overview/model-database/text-models
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata(
"aiml_api", 32000, 8000, "Qwen 2.5 72B Instruct Turbo", "AI/ML", "Qwen", 1
),
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata(
"aiml_api",
128000,
40000,
"Llama 3.1 Nemotron 70B Instruct",
"AI/ML",
"Nvidia",
1,
),
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata(
"aiml_api", 128000, None, "Llama 3.3 70B Instruct Turbo", "AI/ML", "Meta", 1
),
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata(
"aiml_api", 131000, 2000, "Llama 3.1 70B Instruct Turbo", "AI/ML", "Meta", 1
),
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata(
"aiml_api", 128000, None, "Llama 3.2 3B Instruct Turbo", "AI/ML", "Meta", 1
),
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000),
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000),
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None),
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000),
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None),
# https://console.groq.com/docs/models
LlmModel.LLAMA3_3_70B: ModelMetadata(
"groq", 128000, 32768, "Llama 3.3 70B Versatile", "Groq", "Meta", 1
),
LlmModel.LLAMA3_1_8B: ModelMetadata(
"groq", 128000, 8192, "Llama 3.1 8B Instant", "Groq", "Meta", 1
),
LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768),
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 128000, 8192),
# https://ollama.com/library
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata(
"ollama", 8192, None, "Llama 3.3", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata(
"ollama", 8192, None, "Llama 3.2", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata(
"ollama", 8192, None, "Llama 3", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata(
"ollama", 8192, None, "Llama 3.1 405B", "Ollama", "Meta", 1
),
LlmModel.OLLAMA_DOLPHIN: ModelMetadata(
"ollama", 32768, None, "Dolphin Mistral Latest", "Ollama", "Mistral AI", 1
),
LlmModel.OLLAMA_LLAMA3_3: ModelMetadata("ollama", 8192, None),
LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192, None),
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, None),
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, None),
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
# https://openrouter.ai/models
LlmModel.GEMINI_2_5_PRO: ModelMetadata(
"open_router",
1050000,
8192,
"Gemini 2.5 Pro Preview 03.25",
"OpenRouter",
"Google",
2,
),
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata(
"open_router", 1048576, 65535, "Gemini 3 Pro Preview", "OpenRouter", "Google", 2
),
LlmModel.GEMINI_2_5_FLASH: ModelMetadata(
"open_router", 1048576, 65535, "Gemini 2.5 Flash", "OpenRouter", "Google", 1
),
LlmModel.GEMINI_2_0_FLASH: ModelMetadata(
"open_router", 1048576, 8192, "Gemini 2.0 Flash 001", "OpenRouter", "Google", 1
),
LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535),
LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535),
LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192),
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
"open_router",
1048576,
65535,
"Gemini 2.5 Flash Lite Preview 06.17",
"OpenRouter",
"Google",
1,
),
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata(
"open_router",
1048576,
8192,
"Gemini 2.0 Flash Lite 001",
"OpenRouter",
"Google",
1,
),
LlmModel.MISTRAL_NEMO: ModelMetadata(
"open_router", 128000, 4096, "Mistral Nemo", "OpenRouter", "Mistral AI", 1
),
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata(
"open_router", 128000, 4096, "Command R 08.2024", "OpenRouter", "Cohere", 1
),
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata(
"open_router", 128000, 4096, "Command R Plus 08.2024", "OpenRouter", "Cohere", 2
),
LlmModel.DEEPSEEK_CHAT: ModelMetadata(
"open_router", 64000, 2048, "DeepSeek Chat", "OpenRouter", "DeepSeek", 1
),
LlmModel.DEEPSEEK_R1_0528: ModelMetadata(
"open_router", 163840, 163840, "DeepSeek R1 0528", "OpenRouter", "DeepSeek", 1
),
LlmModel.PERPLEXITY_SONAR: ModelMetadata(
"open_router", 127000, 8000, "Sonar", "OpenRouter", "Perplexity", 1
),
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata(
"open_router", 200000, 8000, "Sonar Pro", "OpenRouter", "Perplexity", 2
"open_router", 1048576, 65535
),
LlmModel.GEMINI_2_0_FLASH_LITE: ModelMetadata("open_router", 1048576, 8192),
LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096),
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096),
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096),
LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048),
LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840),
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000),
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
"open_router",
128000,
16000,
"Sonar Deep Research",
"OpenRouter",
"Perplexity",
3,
),
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
"open_router",
131000,
4096,
"Hermes 3 Llama 3.1 405B",
"OpenRouter",
"Nous Research",
1,
"open_router", 131000, 4096
),
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata(
"open_router",
12288,
12288,
"Hermes 3 Llama 3.1 70B",
"OpenRouter",
"Nous Research",
1,
),
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata(
"open_router", 131072, 131072, "GPT-OSS 120B", "OpenRouter", "OpenAI", 1
),
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata(
"open_router", 131072, 32768, "GPT-OSS 20B", "OpenRouter", "OpenAI", 1
),
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata(
"open_router", 300000, 5120, "Nova Lite V1", "OpenRouter", "Amazon", 1
),
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata(
"open_router", 128000, 5120, "Nova Micro V1", "OpenRouter", "Amazon", 1
),
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata(
"open_router", 300000, 5120, "Nova Pro V1", "OpenRouter", "Amazon", 1
),
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata(
"open_router", 65536, 4096, "WizardLM 2 8x22B", "OpenRouter", "Microsoft", 1
),
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata(
"open_router", 4096, 4096, "MythoMax L2 13B", "OpenRouter", "Gryphe", 1
),
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata(
"open_router", 131072, 131072, "Llama 4 Scout", "OpenRouter", "Meta", 1
),
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata(
"open_router", 1048576, 1000000, "Llama 4 Maverick", "OpenRouter", "Meta", 1
),
LlmModel.GROK_4: ModelMetadata(
"open_router", 256000, 256000, "Grok 4", "OpenRouter", "xAI", 3
),
LlmModel.GROK_4_FAST: ModelMetadata(
"open_router", 2000000, 30000, "Grok 4 Fast", "OpenRouter", "xAI", 1
),
LlmModel.GROK_4_1_FAST: ModelMetadata(
"open_router", 2000000, 30000, "Grok 4.1 Fast", "OpenRouter", "xAI", 1
),
LlmModel.GROK_CODE_FAST_1: ModelMetadata(
"open_router", 256000, 10000, "Grok Code Fast 1", "OpenRouter", "xAI", 1
),
LlmModel.KIMI_K2: ModelMetadata(
"open_router", 131000, 131000, "Kimi K2", "OpenRouter", "Moonshot AI", 1
),
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata(
"open_router",
262144,
262144,
"Qwen 3 235B A22B Thinking 2507",
"OpenRouter",
"Qwen",
1,
),
LlmModel.QWEN3_CODER: ModelMetadata(
"open_router", 262144, 262144, "Qwen 3 Coder", "OpenRouter", "Qwen", 3
"open_router", 12288, 12288
),
LlmModel.OPENAI_GPT_OSS_120B: ModelMetadata("open_router", 131072, 131072),
LlmModel.OPENAI_GPT_OSS_20B: ModelMetadata("open_router", 131072, 32768),
LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 300000, 5120),
LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 128000, 5120),
LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 300000, 5120),
LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 65536, 4096),
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096),
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
LlmModel.GROK_4: ModelMetadata("open_router", 256000, 256000),
LlmModel.GROK_4_FAST: ModelMetadata("open_router", 2000000, 30000),
LlmModel.GROK_4_1_FAST: ModelMetadata("open_router", 2000000, 30000),
LlmModel.GROK_CODE_FAST_1: ModelMetadata("open_router", 256000, 10000),
LlmModel.KIMI_K2: ModelMetadata("open_router", 131000, 131000),
LlmModel.QWEN3_235B_A22B_THINKING: ModelMetadata("open_router", 262144, 262144),
LlmModel.QWEN3_CODER: ModelMetadata("open_router", 262144, 262144),
# Llama API models
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata(
"llama_api",
128000,
4028,
"Llama 4 Scout 17B 16E Instruct FP8",
"Llama API",
"Meta",
1,
),
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata(
"llama_api",
128000,
4028,
"Llama 4 Maverick 17B 128E Instruct FP8",
"Llama API",
"Meta",
1,
),
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata(
"llama_api", 128000, 4028, "Llama 3.3 8B Instruct", "Llama API", "Meta", 1
),
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata(
"llama_api", 128000, 4028, "Llama 3.3 70B Instruct", "Llama API", "Meta", 1
),
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028),
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
# v0 by Vercel models
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000, "v0 1.5 MD", "V0", "V0", 1),
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000, "v0 1.5 LG", "V0", "V0", 1),
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000, "v0 1.0 MD", "V0", "V0", 1),
LlmModel.V0_1_5_MD: ModelMetadata("v0", 128000, 64000),
LlmModel.V0_1_5_LG: ModelMetadata("v0", 512000, 64000),
LlmModel.V0_1_0_MD: ModelMetadata("v0", 128000, 64000),
}
DEFAULT_LLM_MODEL = LlmModel.GPT5_2

View File

@@ -242,7 +242,7 @@ async def test_smart_decision_maker_tracks_llm_stats():
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -343,7 +343,7 @@ async def test_smart_decision_maker_parameter_validation():
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -409,7 +409,7 @@ async def test_smart_decision_maker_parameter_validation():
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -471,7 +471,7 @@ async def test_smart_decision_maker_parameter_validation():
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -535,7 +535,7 @@ async def test_smart_decision_maker_parameter_validation():
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -658,7 +658,7 @@ async def test_smart_decision_maker_raw_response_conversion():
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -730,7 +730,7 @@ async def test_smart_decision_maker_raw_response_conversion():
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -786,7 +786,7 @@ async def test_smart_decision_maker_raw_response_conversion():
outputs = {}
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests
@@ -905,7 +905,7 @@ async def test_smart_decision_maker_agent_mode():
# Create a mock execution context
mock_execution_context = ExecutionContext(
human_in_the_loop_safe_mode=False,
safe_mode=False,
)
# Create a mock execution processor for agent mode tests
@@ -1027,7 +1027,7 @@ async def test_smart_decision_maker_traditional_mode_default():
# Create execution context
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a mock execution processor for tests

View File

@@ -386,7 +386,7 @@ async def test_output_yielding_with_dynamic_fields():
outputs = {}
from backend.data.execution import ExecutionContext
mock_execution_context = ExecutionContext(human_in_the_loop_safe_mode=False)
mock_execution_context = ExecutionContext(safe_mode=False)
mock_execution_processor = MagicMock()
async for output_name, output_value in block.run(
@@ -609,9 +609,7 @@ async def test_validation_errors_dont_pollute_conversation():
outputs = {}
from backend.data.execution import ExecutionContext
mock_execution_context = ExecutionContext(
human_in_the_loop_safe_mode=False
)
mock_execution_context = ExecutionContext(safe_mode=False)
# Create a proper mock execution processor for agent mode
from collections import defaultdict

View File

@@ -474,7 +474,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
self.block_type = block_type
self.webhook_config = webhook_config
self.execution_stats: NodeExecutionStats = NodeExecutionStats()
self.is_sensitive_action: bool = False
self.requires_human_review: bool = False
if self.webhook_config:
if isinstance(self.webhook_config, BlockWebhookConfig):
@@ -637,9 +637,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
- should_pause: True if execution should be paused for review
- input_data_to_use: The input data to use (may be modified by reviewer)
"""
if not (
self.is_sensitive_action and execution_context.sensitive_action_safe_mode
):
# Skip review if not required or safe mode is disabled
if not self.requires_human_review or not execution_context.safe_mode:
return False, input_data
from backend.blocks.helpers.review import HITLReviewHelper

View File

@@ -99,15 +99,10 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.OPENAI_GPT_OSS_20B: 1,
LlmModel.GEMINI_2_5_PRO: 4,
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
LlmModel.GEMINI_2_5_FLASH: 1,
LlmModel.GEMINI_2_0_FLASH: 1,
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
LlmModel.MISTRAL_NEMO: 1,
LlmModel.COHERE_COMMAND_R_08_2024: 1,
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
LlmModel.DEEPSEEK_CHAT: 2,
LlmModel.DEEPSEEK_R1_0528: 1,
LlmModel.PERPLEXITY_SONAR: 1,
LlmModel.PERPLEXITY_SONAR_PRO: 5,
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: 10,
@@ -131,6 +126,11 @@ MODEL_COST: dict[LlmModel, int] = {
LlmModel.KIMI_K2: 1,
LlmModel.QWEN3_235B_A22B_THINKING: 1,
LlmModel.QWEN3_CODER: 9,
LlmModel.GEMINI_2_5_FLASH: 1,
LlmModel.GEMINI_2_0_FLASH: 1,
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: 1,
LlmModel.GEMINI_2_0_FLASH_LITE: 1,
LlmModel.DEEPSEEK_R1_0528: 1,
# v0 by Vercel models
LlmModel.V0_1_5_MD: 1,
LlmModel.V0_1_5_LG: 2,

View File

@@ -38,6 +38,20 @@ POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
if POOL_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
# Add public schema to search_path for pgvector type access
# The vector extension is in public schema, but search_path is determined by schema parameter
# Extract the schema from DATABASE_URL or default to 'public' (matching get_database_schema())
parsed_url = urlparse(DATABASE_URL)
url_params = dict(parse_qsl(parsed_url.query))
db_schema = url_params.get("schema", "public")
# Build search_path, avoiding duplicates if db_schema is already 'public'
search_path_schemas = list(
dict.fromkeys([db_schema, "public"])
) # Preserves order, removes duplicates
search_path = ",".join(search_path_schemas)
# This allows using ::vector without schema qualification
DATABASE_URL = add_param(DATABASE_URL, "options", f"-c search_path={search_path}")
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
prisma = Prisma(
@@ -113,48 +127,38 @@ async def _raw_with_schema(
*args,
execute: bool = False,
client: Prisma | None = None,
set_public_search_path: bool = False,
) -> list[dict] | int:
"""Internal: Execute raw SQL with proper schema handling.
Use query_raw_with_schema() or execute_raw_with_schema() instead.
Supports placeholders:
- {schema_prefix}: Table/type prefix (e.g., "platform".)
- {schema}: Raw schema name for application tables (e.g., platform)
Note on pgvector types:
Use unqualified ::vector and <=> operator in queries. PostgreSQL resolves
these via search_path, which includes the schema where pgvector is installed
on all environments (local, CI, dev).
Args:
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
query_template: SQL query with {schema_prefix} placeholder
*args: Query parameters
execute: If False, executes SELECT query. If True, executes INSERT/UPDATE/DELETE.
client: Optional Prisma client for transactions (only used when execute=True).
set_public_search_path: If True, sets search_path to include public schema.
Needed for pgvector types and other public schema objects.
Returns:
- list[dict] if execute=False (query results)
- int if execute=True (number of affected rows)
Example with vector type:
await execute_raw_with_schema(
'INSERT INTO {schema_prefix}"Embedding" (vec) VALUES ($1::vector)',
embedding_data
)
"""
schema = get_database_schema()
schema_prefix = f'"{schema}".' if schema != "public" else ""
formatted_query = query_template.format(
schema_prefix=schema_prefix,
schema=schema,
)
formatted_query = query_template.format(schema_prefix=schema_prefix)
import prisma as prisma_module
db_client = client if client else prisma_module.get_client()
# Set search_path to include public schema if requested
# Prisma doesn't support the 'options' connection parameter, so we set it per-session
# This is idempotent and safe to call multiple times
if set_public_search_path:
await db_client.execute_raw(f"SET search_path = {schema}, public") # type: ignore
if execute:
result = await db_client.execute_raw(formatted_query, *args) # type: ignore
else:
@@ -163,12 +167,16 @@ async def _raw_with_schema(
return result
async def query_raw_with_schema(query_template: str, *args) -> list[dict]:
async def query_raw_with_schema(
query_template: str, *args, set_public_search_path: bool = False
) -> list[dict]:
"""Execute raw SQL SELECT query with proper schema handling.
Args:
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
query_template: SQL query with {schema_prefix} placeholder
*args: Query parameters
set_public_search_path: If True, sets search_path to include public schema.
Needed for pgvector types and other public schema objects.
Returns:
List of result rows as dictionaries
@@ -179,20 +187,23 @@ async def query_raw_with_schema(query_template: str, *args) -> list[dict]:
user_id
)
"""
return await _raw_with_schema(query_template, *args, execute=False) # type: ignore
return await _raw_with_schema(query_template, *args, execute=False, set_public_search_path=set_public_search_path) # type: ignore
async def execute_raw_with_schema(
query_template: str,
*args,
client: Prisma | None = None,
set_public_search_path: bool = False,
) -> int:
"""Execute raw SQL command (INSERT/UPDATE/DELETE) with proper schema handling.
Args:
query_template: SQL query with {schema_prefix} and/or {schema} placeholders
query_template: SQL query with {schema_prefix} placeholder
*args: Query parameters
client: Optional Prisma client for transactions
set_public_search_path: If True, sets search_path to include public schema.
Needed for pgvector types and other public schema objects.
Returns:
Number of affected rows
@@ -204,7 +215,7 @@ async def execute_raw_with_schema(
client=tx # Optional transaction client
)
"""
return await _raw_with_schema(query_template, *args, execute=True, client=client) # type: ignore
return await _raw_with_schema(query_template, *args, execute=True, client=client, set_public_search_path=set_public_search_path) # type: ignore
class BaseDbModel(BaseModel):

View File

@@ -103,18 +103,8 @@ class RedisEventBus(BaseRedisEventBus[M], ABC):
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
"""
Publish an event to Redis. Gracefully handles connection failures
by logging the error instead of raising exceptions.
"""
try:
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
except Exception:
logger.exception(
f"Failed to publish event to Redis channel {channel_key}. "
"Event bus operation will continue without Redis connectivity."
)
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
@@ -138,19 +128,9 @@ class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
"""
Publish an event to Redis. Gracefully handles connection failures
by logging the error instead of raising exceptions.
"""
try:
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
except Exception:
logger.exception(
f"Failed to publish event to Redis channel {channel_key}. "
"Event bus operation will continue without Redis connectivity."
)
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, full_channel_name = self._get_pubsub_channel(

View File

@@ -1,56 +0,0 @@
"""
Tests for event_bus graceful degradation when Redis is unavailable.
"""
from unittest.mock import AsyncMock, patch
import pytest
from pydantic import BaseModel
from backend.data.event_bus import AsyncRedisEventBus
class TestEvent(BaseModel):
"""Test event model."""
message: str
class TestNotificationBus(AsyncRedisEventBus[TestEvent]):
"""Test implementation of AsyncRedisEventBus."""
Model = TestEvent
@property
def event_bus_name(self) -> str:
return "test_event_bus"
@pytest.mark.asyncio
async def test_publish_event_handles_connection_failure_gracefully():
"""Test that publish_event logs exception instead of raising when Redis is unavailable."""
bus = TestNotificationBus()
event = TestEvent(message="test message")
# Mock get_redis_async to raise connection error
with patch(
"backend.data.event_bus.redis.get_redis_async",
side_effect=ConnectionError("Authentication required."),
):
# Should not raise exception
await bus.publish_event(event, "test_channel")
@pytest.mark.asyncio
async def test_publish_event_works_with_redis_available():
"""Test that publish_event works normally when Redis is available."""
bus = TestNotificationBus()
event = TestEvent(message="test message")
# Mock successful Redis connection
mock_redis = AsyncMock()
mock_redis.publish = AsyncMock()
with patch("backend.data.event_bus.redis.get_redis_async", return_value=mock_redis):
await bus.publish_event(event, "test_channel")
mock_redis.publish.assert_called_once()

View File

@@ -81,10 +81,7 @@ class ExecutionContext(BaseModel):
This includes information needed by blocks, sub-graphs, and execution management.
"""
model_config = {"extra": "ignore"}
human_in_the_loop_safe_mode: bool = True
sensitive_action_safe_mode: bool = False
safe_mode: bool = True
user_timezone: str = "UTC"
root_execution_id: Optional[str] = None
parent_execution_id: Optional[str] = None

View File

@@ -3,7 +3,7 @@ import logging
import uuid
from collections import defaultdict
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Annotated, Any, Literal, Optional, cast
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
from prisma.enums import SubmissionStatus
from prisma.models import (
@@ -20,7 +20,7 @@ from prisma.types import (
AgentNodeLinkCreateInput,
StoreListingVersionWhereInput,
)
from pydantic import BaseModel, BeforeValidator, Field, create_model
from pydantic import BaseModel, Field, create_model
from pydantic.fields import computed_field
from backend.blocks.agent import AgentExecutorBlock
@@ -62,31 +62,7 @@ logger = logging.getLogger(__name__)
class GraphSettings(BaseModel):
# Use Annotated with BeforeValidator to coerce None to default values.
# This handles cases where the database has null values for these fields.
model_config = {"extra": "ignore"}
human_in_the_loop_safe_mode: Annotated[
bool, BeforeValidator(lambda v: v if v is not None else True)
] = True
sensitive_action_safe_mode: Annotated[
bool, BeforeValidator(lambda v: v if v is not None else False)
] = False
@classmethod
def from_graph(
cls,
graph: "GraphModel",
hitl_safe_mode: bool | None = None,
sensitive_action_safe_mode: bool = False,
) -> "GraphSettings":
# Default to True if not explicitly set
if hitl_safe_mode is None:
hitl_safe_mode = True
return cls(
human_in_the_loop_safe_mode=hitl_safe_mode,
sensitive_action_safe_mode=sensitive_action_safe_mode,
)
human_in_the_loop_safe_mode: bool | None = None
class Link(BaseDbModel):
@@ -268,14 +244,10 @@ class BaseGraph(BaseDbModel):
return any(
node.block_id
for node in self.nodes
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
)
@computed_field
@property
def has_sensitive_action(self) -> bool:
return any(
node.block_id for node in self.nodes if node.block.is_sensitive_action
if (
node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
or node.block.requires_human_review
)
)
@property

View File

@@ -328,8 +328,6 @@ async def clear_business_understanding(user_id: str) -> bool:
def format_understanding_for_prompt(understanding: BusinessUnderstanding) -> str:
"""Format business understanding as text for system prompt injection."""
if not understanding:
return ""
sections = []
# User info section

View File

@@ -309,7 +309,7 @@ def ensure_embeddings_coverage():
# Process in batches until no more missing embeddings
while True:
result = db_client.backfill_missing_embeddings(batch_size=100)
result = db_client.backfill_missing_embeddings(batch_size=10)
total_processed += result["processed"]
total_success += result["success"]

View File

@@ -873,8 +873,11 @@ async def add_graph_execution(
settings = await gdb.get_graph_settings(user_id=user_id, graph_id=graph_id)
execution_context = ExecutionContext(
human_in_the_loop_safe_mode=settings.human_in_the_loop_safe_mode,
sensitive_action_safe_mode=settings.sensitive_action_safe_mode,
safe_mode=(
settings.human_in_the_loop_safe_mode
if settings.human_in_the_loop_safe_mode is not None
else True
),
user_timezone=(
user.timezone if user.timezone != USER_TIMEZONE_NOT_SET else "UTC"
),

View File

@@ -386,7 +386,6 @@ async def test_add_graph_execution_is_repeatable(mocker: MockerFixture):
mock_user.timezone = "UTC"
mock_settings = mocker.MagicMock()
mock_settings.human_in_the_loop_safe_mode = True
mock_settings.sensitive_action_safe_mode = False
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)
@@ -652,7 +651,6 @@ async def test_add_graph_execution_with_nodes_to_skip(mocker: MockerFixture):
mock_user.timezone = "UTC"
mock_settings = mocker.MagicMock()
mock_settings.human_in_the_loop_safe_mode = True
mock_settings.sensitive_action_safe_mode = False
mock_udb.get_user_by_id = mocker.AsyncMock(return_value=mock_user)
mock_gdb.get_graph_settings = mocker.AsyncMock(return_value=mock_settings)

View File

@@ -1,37 +1,11 @@
-- CreateExtension
-- Supabase: pgvector must be enabled via Dashboard → Database → Extensions first
-- Ensures vector extension is in the current schema (from DATABASE_URL ?schema= param)
-- If it exists in a different schema (e.g., public), we drop and recreate it in the current schema
-- This ensures vector type is in the same schema as tables, making ::vector work without explicit qualification
-- Create in public schema so vector type is available across all schemas
DO $$
DECLARE
current_schema_name text;
vector_schema text;
BEGIN
-- Get the current schema from search_path
SELECT current_schema() INTO current_schema_name;
-- Check if vector extension exists and which schema it's in
SELECT n.nspname INTO vector_schema
FROM pg_extension e
JOIN pg_namespace n ON e.extnamespace = n.oid
WHERE e.extname = 'vector';
-- Handle removal if in wrong schema
IF vector_schema IS NOT NULL AND vector_schema != current_schema_name THEN
BEGIN
-- Vector exists in a different schema, drop it first
RAISE WARNING 'pgvector found in schema "%" but need it in "%". Dropping and reinstalling...',
vector_schema, current_schema_name;
EXECUTE 'DROP EXTENSION IF EXISTS vector CASCADE';
EXCEPTION WHEN OTHERS THEN
RAISE EXCEPTION 'Failed to drop pgvector from schema "%": %. You may need to drop it manually.',
vector_schema, SQLERRM;
END;
END IF;
-- Create extension in current schema (let it fail naturally if not available)
EXECUTE format('CREATE EXTENSION IF NOT EXISTS vector SCHEMA %I', current_schema_name);
CREATE EXTENSION IF NOT EXISTS "vector" WITH SCHEMA "public";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'vector extension not available or already exists, skipping';
END $$;
-- CreateEnum
@@ -45,7 +19,7 @@ CREATE TABLE "UnifiedContentEmbedding" (
"contentType" "ContentType" NOT NULL,
"contentId" TEXT NOT NULL,
"userId" TEXT,
"embedding" vector(1536) NOT NULL,
"embedding" public.vector(1536) NOT NULL,
"searchableText" TEXT NOT NULL,
"metadata" JSONB NOT NULL DEFAULT '{}',
@@ -71,4 +45,4 @@ CREATE UNIQUE INDEX "UnifiedContentEmbedding_contentType_contentId_userId_key" O
-- Uses cosine distance operator (<=>), which matches the query in hybrid_search.py
-- Note: Drop first in case Prisma created a btree index (Prisma doesn't support HNSW)
DROP INDEX IF EXISTS "UnifiedContentEmbedding_embedding_idx";
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" vector_cosine_ops);
CREATE INDEX "UnifiedContentEmbedding_embedding_idx" ON "UnifiedContentEmbedding" USING hnsw ("embedding" public.vector_cosine_ops);

View File

@@ -0,0 +1,71 @@
-- Acknowledge Supabase-managed extensions to prevent drift warnings
-- These extensions are pre-installed by Supabase in specific schemas
-- This migration ensures they exist where available (Supabase) or skips gracefully (CI)
-- Create schemas (safe in both CI and Supabase)
CREATE SCHEMA IF NOT EXISTS "extensions";
-- Extensions that exist in both CI and Supabase
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "pgcrypto" WITH SCHEMA "extensions";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'pgcrypto extension not available, skipping';
END $$;
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA "extensions";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'uuid-ossp extension not available, skipping';
END $$;
-- Supabase-specific extensions (skip gracefully in CI)
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements" WITH SCHEMA "extensions";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'pg_stat_statements extension not available, skipping';
END $$;
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "pg_net" WITH SCHEMA "extensions";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'pg_net extension not available, skipping';
END $$;
DO $$
BEGIN
CREATE EXTENSION IF NOT EXISTS "pgjwt" WITH SCHEMA "extensions";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'pgjwt extension not available, skipping';
END $$;
DO $$
BEGIN
CREATE SCHEMA IF NOT EXISTS "graphql";
CREATE EXTENSION IF NOT EXISTS "pg_graphql" WITH SCHEMA "graphql";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'pg_graphql extension not available, skipping';
END $$;
DO $$
BEGIN
CREATE SCHEMA IF NOT EXISTS "pgsodium";
CREATE EXTENSION IF NOT EXISTS "pgsodium" WITH SCHEMA "pgsodium";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'pgsodium extension not available, skipping';
END $$;
DO $$
BEGIN
CREATE SCHEMA IF NOT EXISTS "vault";
CREATE EXTENSION IF NOT EXISTS "supabase_vault" WITH SCHEMA "vault";
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'supabase_vault extension not available, skipping';
END $$;
-- Return to platform
CREATE SCHEMA IF NOT EXISTS "platform";

View File

@@ -366,12 +366,12 @@ def generate_block_markdown(
lines.append("")
# What it is (full description)
lines.append("### What it is")
lines.append(f"### What it is")
lines.append(block.description or "No description available.")
lines.append("")
# How it works (manual section)
lines.append("### How it works")
lines.append(f"### How it works")
how_it_works = manual_content.get(
"how_it_works", "_Add technical explanation here._"
)
@@ -383,7 +383,7 @@ def generate_block_markdown(
# Inputs table (auto-generated)
visible_inputs = [f for f in block.inputs if not f.hidden]
if visible_inputs:
lines.append("### Inputs")
lines.append(f"### Inputs")
lines.append("")
lines.append("| Input | Description | Type | Required |")
lines.append("|-------|-------------|------|----------|")
@@ -400,7 +400,7 @@ def generate_block_markdown(
# Outputs table (auto-generated)
visible_outputs = [f for f in block.outputs if not f.hidden]
if visible_outputs:
lines.append("### Outputs")
lines.append(f"### Outputs")
lines.append("")
lines.append("| Output | Description | Type |")
lines.append("|--------|-------------|------|")
@@ -414,7 +414,7 @@ def generate_block_markdown(
lines.append("")
# Possible use case (manual section)
lines.append("### Possible use case")
lines.append(f"### Possible use case")
use_case = manual_content.get("use_case", "_Add practical use case examples here._")
lines.append("<!-- MANUAL: use_case -->")
lines.append(use_case)

View File

@@ -11,7 +11,6 @@
"forked_from_version": null,
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"id": "graph-123",
"input_schema": {
"properties": {},

View File

@@ -11,7 +11,6 @@
"forked_from_version": null,
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"id": "graph-123",
"input_schema": {
"properties": {},

View File

@@ -27,8 +27,6 @@
"properties": {}
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"trigger_setup_info": null,
"new_output": false,
"can_access_graph": true,
@@ -36,8 +34,7 @@
"is_favorite": false,
"recommended_schedule_cron": null,
"settings": {
"human_in_the_loop_safe_mode": true,
"sensitive_action_safe_mode": false
"human_in_the_loop_safe_mode": null
},
"marketplace_listing": null
},
@@ -68,8 +65,6 @@
"properties": {}
},
"has_external_trigger": false,
"has_human_in_the_loop": false,
"has_sensitive_action": false,
"trigger_setup_info": null,
"new_output": false,
"can_access_graph": false,
@@ -77,8 +72,7 @@
"is_favorite": false,
"recommended_schedule_cron": null,
"settings": {
"human_in_the_loop_safe_mode": true,
"sensitive_action_safe_mode": false
"human_in_the_loop_safe_mode": null
},
"marketplace_listing": null
}

View File

@@ -175,8 +175,6 @@ While server components and actions are cool and cutting-edge, they introduce a
- Prefer [React Query](https://tanstack.com/query/latest/docs/framework/react/overview) for server state, colocated near consumers (see [state colocation](https://kentcdodds.com/blog/state-colocation-will-make-your-react-app-faster))
- Co-locate UI state inside components/hooks; keep global state minimal
- Avoid `useMemo` and `useCallback` unless you have a measured performance issue
- Do not abuse `useEffect`; prefer state colocation and derive values directly when possible
### Styling and components
@@ -551,48 +549,9 @@ Files:
Types:
- Prefer `interface` for object shapes
- Component props should be `interface Props { ... }` (not exported)
- Only use specific exported names (e.g., `export interface MyComponentProps`) when the interface needs to be used outside the component
- Keep type definitions inline with the component - do not create separate `types.ts` files unless types are shared across multiple files
- Component props should be `interface Props { ... }`
- Use precise types; avoid `any` and unsafe casts
**Props naming examples:**
```tsx
// ✅ Good - internal props, not exported
interface Props {
title: string;
onClose: () => void;
}
export function Modal({ title, onClose }: Props) {
// ...
}
// ✅ Good - exported when needed externally
export interface ModalProps {
title: string;
onClose: () => void;
}
export function Modal({ title, onClose }: ModalProps) {
// ...
}
// ❌ Bad - unnecessarily specific name for internal use
interface ModalComponentProps {
title: string;
onClose: () => void;
}
// ❌ Bad - separate types.ts file for single component
// types.ts
export interface ModalProps { ... }
// Modal.tsx
import type { ModalProps } from './types';
```
Parameters:
- If more than one parameter is needed, pass a single `Args` object for clarity

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 374 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 663 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

View File

@@ -1,58 +0,0 @@
"use client";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { Text } from "@/components/atoms/Text/Text";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { useRouter } from "next/navigation";
import { useEffect, useRef } from "react";
const LOGOUT_REDIRECT_DELAY_MS = 400;
function wait(ms: number): Promise<void> {
return new Promise(function resolveAfterDelay(resolve) {
setTimeout(resolve, ms);
});
}
export default function LogoutPage() {
const { logOut } = useSupabase();
const { toast } = useToast();
const router = useRouter();
const hasStartedRef = useRef(false);
useEffect(
function handleLogoutEffect() {
if (hasStartedRef.current) return;
hasStartedRef.current = true;
async function runLogout() {
try {
await logOut();
} catch {
toast({
title: "Failed to log out. Redirecting to login.",
variant: "destructive",
});
} finally {
await wait(LOGOUT_REDIRECT_DELAY_MS);
router.replace("/login");
}
}
void runLogout();
},
[logOut, router, toast],
);
return (
<div className="flex min-h-screen items-center justify-center px-4">
<div className="flex flex-col items-center justify-center gap-4 py-8">
<LoadingSpinner size="large" />
<Text variant="body" className="text-center">
Logging you out...
</Text>
</div>
</div>
);
}

View File

@@ -9,7 +9,7 @@ export async function GET(request: Request) {
const { searchParams, origin } = new URL(request.url);
const code = searchParams.get("code");
let next = "/";
let next = "/marketplace";
if (code) {
const supabase = await getServerSupabase();

View File

@@ -5,11 +5,10 @@ import {
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { CircleNotchIcon, PlayIcon, StopIcon } from "@phosphor-icons/react";
import { PlayIcon, StopIcon } from "@phosphor-icons/react";
import { useShallow } from "zustand/react/shallow";
import { RunInputDialog } from "../RunInputDialog/RunInputDialog";
import { useRunGraph } from "./useRunGraph";
import { cn } from "@/lib/utils";
export const RunGraph = ({ flowID }: { flowID: string | null }) => {
const {
@@ -25,31 +24,6 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
useShallow((state) => state.isGraphRunning),
);
const isLoading = isExecutingGraph || isTerminatingGraph || isSaving;
// Determine which icon to show with proper animation
const renderIcon = () => {
const iconClass = cn(
"size-4 transition-transform duration-200 ease-out",
!isLoading && "group-hover:scale-110",
);
if (isLoading) {
return (
<CircleNotchIcon
className={cn(iconClass, "animate-spin")}
weight="bold"
/>
);
}
if (isGraphRunning) {
return <StopIcon className={iconClass} weight="fill" />;
}
return <PlayIcon className={iconClass} weight="fill" />;
};
return (
<>
<Tooltip>
@@ -59,18 +33,18 @@ export const RunGraph = ({ flowID }: { flowID: string | null }) => {
variant={isGraphRunning ? "destructive" : "primary"}
data-id={isGraphRunning ? "stop-graph-button" : "run-graph-button"}
onClick={isGraphRunning ? handleStopGraph : handleRunGraph}
disabled={!flowID || isLoading}
className="group"
disabled={!flowID || isExecutingGraph || isTerminatingGraph}
loading={isExecutingGraph || isTerminatingGraph || isSaving}
>
{renderIcon()}
{!isGraphRunning ? (
<PlayIcon className="size-4" />
) : (
<StopIcon className="size-4" />
)}
</Button>
</TooltipTrigger>
<TooltipContent>
{isLoading
? "Processing..."
: isGraphRunning
? "Stop agent"
: "Run agent"}
{isGraphRunning ? "Stop agent" : "Run agent"}
</TooltipContent>
</Tooltip>
<RunInputDialog

View File

@@ -10,7 +10,6 @@ import { useRunInputDialog } from "./useRunInputDialog";
import { CronSchedulerDialog } from "../CronSchedulerDialog/CronSchedulerDialog";
import { useTutorialStore } from "@/app/(platform)/build/stores/tutorialStore";
import { useEffect } from "react";
import { CredentialsGroupedView } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/CredentialsGroupedView";
export const RunInputDialog = ({
isOpen,
@@ -24,17 +23,19 @@ export const RunInputDialog = ({
const hasInputs = useGraphStore((state) => state.hasInputs);
const hasCredentials = useGraphStore((state) => state.hasCredentials);
const inputSchema = useGraphStore((state) => state.inputSchema);
const credentialsSchema = useGraphStore(
(state) => state.credentialsInputSchema,
);
const {
credentialFields,
requiredCredentials,
credentialsUiSchema,
handleManualRun,
handleInputChange,
openCronSchedulerDialog,
setOpenCronSchedulerDialog,
inputValues,
credentialValues,
handleCredentialFieldChange,
handleCredentialChange,
isExecutingGraph,
} = useRunInputDialog({ setIsOpen });
@@ -61,67 +62,67 @@ export const RunInputDialog = ({
isOpen,
set: setIsOpen,
}}
styling={{ maxWidth: "700px", minWidth: "700px" }}
styling={{ maxWidth: "600px", minWidth: "600px" }}
>
<Dialog.Content>
<div
className="grid grid-cols-[1fr_auto] gap-10 p-1"
data-id="run-input-dialog-content"
>
<div className="space-y-6">
{/* Credentials Section */}
{hasCredentials() && credentialFields.length > 0 && (
<div data-id="run-input-credentials-section">
<div className="mb-4">
<Text variant="h4" className="text-gray-900">
Credentials
</Text>
</div>
<div className="px-2" data-id="run-input-credentials-form">
<CredentialsGroupedView
credentialFields={credentialFields}
requiredCredentials={requiredCredentials}
inputCredentials={credentialValues}
inputValues={inputValues}
onCredentialChange={handleCredentialFieldChange}
/>
</div>
<div className="space-y-6 p-1" data-id="run-input-dialog-content">
{/* Credentials Section */}
{hasCredentials() && (
<div data-id="run-input-credentials-section">
<div className="mb-4">
<Text variant="h4" className="text-gray-900">
Credentials
</Text>
</div>
)}
{/* Inputs Section */}
{hasInputs() && (
<div data-id="run-input-inputs-section">
<div className="mb-4">
<Text variant="h4" className="text-gray-900">
Inputs
</Text>
</div>
<div data-id="run-input-inputs-form">
<FormRenderer
jsonSchema={inputSchema as RJSFSchema}
handleChange={(v) => handleInputChange(v.formData)}
uiSchema={uiSchema}
initialValues={{}}
formContext={{
showHandles: false,
size: "large",
}}
/>
</div>
<div className="px-2" data-id="run-input-credentials-form">
<FormRenderer
jsonSchema={credentialsSchema as RJSFSchema}
handleChange={(v) => handleCredentialChange(v.formData)}
uiSchema={credentialsUiSchema}
initialValues={{}}
formContext={{
showHandles: false,
size: "large",
showOptionalToggle: false,
}}
/>
</div>
)}
</div>
</div>
)}
{/* Inputs Section */}
{hasInputs() && (
<div data-id="run-input-inputs-section">
<div className="mb-4">
<Text variant="h4" className="text-gray-900">
Inputs
</Text>
</div>
<div data-id="run-input-inputs-form">
<FormRenderer
jsonSchema={inputSchema as RJSFSchema}
handleChange={(v) => handleInputChange(v.formData)}
uiSchema={uiSchema}
initialValues={{}}
formContext={{
showHandles: false,
size: "large",
}}
/>
</div>
</div>
)}
{/* Action Button */}
<div
className="flex flex-col items-end justify-start"
className="flex justify-end pt-2"
data-id="run-input-actions-section"
>
{purpose === "run" && (
<Button
variant="primary"
size="large"
className="group h-fit min-w-0 gap-2 px-10"
className="group h-fit min-w-0 gap-2"
onClick={handleManualRun}
loading={isExecutingGraph}
data-id="run-input-manual-run-button"
@@ -136,7 +137,7 @@ export const RunInputDialog = ({
<Button
variant="primary"
size="large"
className="group h-fit min-w-0 gap-2 px-10"
className="group h-fit min-w-0 gap-2"
onClick={() => setOpenCronSchedulerDialog(true)}
data-id="run-input-schedule-button"
>

View File

@@ -7,11 +7,12 @@ import {
GraphExecutionMeta,
} from "@/lib/autogpt-server-api";
import { parseAsInteger, parseAsString, useQueryStates } from "nuqs";
import { useCallback, useMemo, useState } from "react";
import { useMemo, useState } from "react";
import { uiSchema } from "../../../FlowEditor/nodes/uiSchema";
import { isCredentialFieldSchema } from "@/components/renderers/InputRenderer/custom/CredentialField/helpers";
import { useNodeStore } from "@/app/(platform)/build/stores/nodeStore";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useReactFlow } from "@xyflow/react";
import type { CredentialField } from "@/components/contextual/CredentialsInput/components/CredentialsGroupedView/helpers";
export const useRunInputDialog = ({
setIsOpen,
@@ -119,32 +120,27 @@ export const useRunInputDialog = ({
},
});
// Convert credentials schema to credential fields array for CredentialsGroupedView
const credentialFields: CredentialField[] = useMemo(() => {
if (!credentialsSchema?.properties) return [];
return Object.entries(credentialsSchema.properties);
}, [credentialsSchema]);
// We are rendering the credentials field differently compared to other fields.
// In the node, we have the field name as "credential" - so our library catches it and renders it differently.
// But here we have a different name, something like `Firecrawl credentials`, so here we are telling the library that this field is a credential field type.
// Get required credentials as a Set
const requiredCredentials = useMemo(() => {
return new Set<string>(credentialsSchema?.required || []);
}, [credentialsSchema]);
const credentialsUiSchema = useMemo(() => {
const dynamicUiSchema: any = { ...uiSchema };
// Handler for individual credential changes
const handleCredentialFieldChange = useCallback(
(key: string, value?: CredentialsMetaInput) => {
setCredentialValues((prev) => {
if (value) {
return { ...prev, [key]: value };
} else {
const next = { ...prev };
delete next[key];
return next;
if (credentialsSchema?.properties) {
Object.keys(credentialsSchema.properties).forEach((fieldName) => {
const fieldSchema = credentialsSchema.properties[fieldName];
if (isCredentialFieldSchema(fieldSchema)) {
dynamicUiSchema[fieldName] = {
...dynamicUiSchema[fieldName],
"ui:field": "custom/credential_field",
};
}
});
},
[],
);
}
return dynamicUiSchema;
}, [credentialsSchema]);
const handleManualRun = async () => {
// Filter out incomplete credentials (those without a valid id)
@@ -177,14 +173,12 @@ export const useRunInputDialog = ({
};
return {
credentialFields,
requiredCredentials,
credentialsUiSchema,
inputValues,
credentialValues,
isExecutingGraph,
handleInputChange,
handleCredentialChange,
handleCredentialFieldChange,
handleManualRun,
openCronSchedulerDialog,
setOpenCronSchedulerDialog,

View File

@@ -18,118 +18,69 @@ interface Props {
fullWidth?: boolean;
}
interface SafeModeButtonProps {
isEnabled: boolean;
label: string;
tooltipEnabled: string;
tooltipDisabled: string;
onToggle: () => void;
isPending: boolean;
fullWidth?: boolean;
}
function SafeModeButton({
isEnabled,
label,
tooltipEnabled,
tooltipDisabled,
onToggle,
isPending,
fullWidth = false,
}: SafeModeButtonProps) {
return (
<Tooltip delayDuration={100}>
<TooltipTrigger asChild>
<Button
variant={isEnabled ? "primary" : "outline"}
size="small"
onClick={onToggle}
disabled={isPending}
className={cn("justify-start", fullWidth ? "w-full" : "")}
>
{isEnabled ? (
<>
<ShieldCheckIcon weight="bold" size={16} />
<Text variant="body" className="text-zinc-200">
{label}: ON
</Text>
</>
) : (
<>
<ShieldIcon weight="bold" size={16} />
<Text variant="body" className="text-zinc-600">
{label}: OFF
</Text>
</>
)}
</Button>
</TooltipTrigger>
<TooltipContent>
<div className="text-center">
<div className="font-medium">
{label}: {isEnabled ? "ON" : "OFF"}
</div>
<div className="mt-1 text-xs text-muted-foreground">
{isEnabled ? tooltipEnabled : tooltipDisabled}
</div>
</div>
</TooltipContent>
</Tooltip>
);
}
export function FloatingSafeModeToggle({
graph,
className,
fullWidth = false,
}: Props) {
const {
currentHITLSafeMode,
showHITLToggle,
isHITLStateUndetermined,
handleHITLToggle,
currentSensitiveActionSafeMode,
showSensitiveActionToggle,
handleSensitiveActionToggle,
currentSafeMode,
isPending,
shouldShowToggle,
isStateUndetermined,
handleToggle,
} = useAgentSafeMode(graph);
if (!shouldShowToggle || isPending) {
return null;
}
const showHITL = showHITLToggle && !isHITLStateUndetermined;
const showSensitive = showSensitiveActionToggle;
if (!showHITL && !showSensitive) {
if (!shouldShowToggle || isStateUndetermined || isPending) {
return null;
}
return (
<div className={cn("fixed z-50 flex flex-col gap-2", className)}>
{showHITL && (
<SafeModeButton
isEnabled={currentHITLSafeMode}
label="Human in the loop block approval"
tooltipEnabled="The agent will pause at human-in-the-loop blocks and wait for your approval"
tooltipDisabled="Human in the loop blocks will proceed automatically"
onToggle={handleHITLToggle}
isPending={isPending}
fullWidth={fullWidth}
/>
)}
{showSensitive && (
<SafeModeButton
isEnabled={currentSensitiveActionSafeMode}
label="Sensitive actions blocks approval"
tooltipEnabled="The agent will pause at sensitive action blocks and wait for your approval"
tooltipDisabled="Sensitive action blocks will proceed automatically"
onToggle={handleSensitiveActionToggle}
isPending={isPending}
fullWidth={fullWidth}
/>
)}
<div className={cn("fixed z-50", className)}>
<Tooltip delayDuration={100}>
<TooltipTrigger asChild>
<Button
variant={currentSafeMode! ? "primary" : "outline"}
key={graph.id}
size="small"
title={
currentSafeMode!
? "Safe Mode: ON. Human in the loop blocks require manual review"
: "Safe Mode: OFF. Human in the loop blocks proceed automatically"
}
onClick={handleToggle}
className={cn(fullWidth ? "w-full" : "")}
>
{currentSafeMode! ? (
<>
<ShieldCheckIcon weight="bold" size={16} />
<Text variant="body" className="text-zinc-200">
Safe Mode: ON
</Text>
</>
) : (
<>
<ShieldIcon weight="bold" size={16} />
<Text variant="body" className="text-zinc-600">
Safe Mode: OFF
</Text>
</>
)}
</Button>
</TooltipTrigger>
<TooltipContent>
<div className="text-center">
<div className="font-medium">
Safe Mode: {currentSafeMode! ? "ON" : "OFF"}
</div>
<div className="mt-1 text-xs text-muted-foreground">
{currentSafeMode!
? "Human in the loop blocks require manual review"
: "Human in the loop blocks proceed automatically"}
</div>
</div>
</TooltipContent>
</Tooltip>
</div>
);
}

View File

@@ -53,14 +53,14 @@ export const CustomControls = memo(
const controls = [
{
id: "zoom-in-button",
icon: <PlusIcon className="size-3.5 text-zinc-600" />,
icon: <PlusIcon className="size-4" />,
label: "Zoom In",
onClick: () => zoomIn(),
className: "h-10 w-10 border-none",
},
{
id: "zoom-out-button",
icon: <MinusIcon className="size-3.5 text-zinc-600" />,
icon: <MinusIcon className="size-4" />,
label: "Zoom Out",
onClick: () => zoomOut(),
className: "h-10 w-10 border-none",
@@ -68,9 +68,9 @@ export const CustomControls = memo(
{
id: "tutorial-button",
icon: isTutorialLoading ? (
<CircleNotchIcon className="size-3.5 animate-spin text-zinc-600" />
<CircleNotchIcon className="size-4 animate-spin" />
) : (
<ChalkboardIcon className="size-3.5 text-zinc-600" />
<ChalkboardIcon className="size-4" />
),
label: isTutorialLoading ? "Loading Tutorial..." : "Start Tutorial",
onClick: handleTutorialClick,
@@ -79,7 +79,7 @@ export const CustomControls = memo(
},
{
id: "fit-view-button",
icon: <FrameCornersIcon className="size-3.5 text-zinc-600" />,
icon: <FrameCornersIcon className="size-4" />,
label: "Fit View",
onClick: () => fitView({ padding: 0.2, duration: 800, maxZoom: 1 }),
className: "h-10 w-10 border-none",
@@ -87,9 +87,9 @@ export const CustomControls = memo(
{
id: "lock-button",
icon: !isLocked ? (
<LockOpenIcon className="size-3.5 text-zinc-600" />
<LockOpenIcon className="size-4" />
) : (
<LockIcon className="size-3.5 text-zinc-600" />
<LockIcon className="size-4" />
),
label: "Toggle Lock",
onClick: () => setIsLocked(!isLocked),

View File

@@ -139,6 +139,14 @@ export const useFlow = () => {
useNodeStore.getState().setNodes([]);
useNodeStore.getState().clearResolutionState();
addNodes(customNodes);
// Sync hardcoded values with handle IDs.
// If a keyvalue field has a key without a value, the backend omits it from hardcoded values.
// But if a handleId exists for that key, it causes inconsistency.
// This ensures hardcoded values stay in sync with handle IDs.
customNodes.forEach((node) => {
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
});
}
}, [customNodes, addNodes]);
@@ -150,14 +158,6 @@ export const useFlow = () => {
}
}, [graph?.links, addLinks]);
useEffect(() => {
if (customNodes.length > 0 && graph?.links) {
customNodes.forEach((node) => {
useNodeStore.getState().syncHardcodedValuesWithHandleIds(node.id);
});
}
}, [customNodes, graph?.links]);
// update node execution status in nodes
useEffect(() => {
if (

View File

@@ -19,8 +19,6 @@ export type CustomEdgeData = {
beadUp?: number;
beadDown?: number;
beadData?: Map<string, NodeExecutionResult["status"]>;
edgeColorClass?: string;
edgeHexColor?: string;
};
export type CustomEdge = XYEdge<CustomEdgeData, "custom">;
@@ -38,6 +36,7 @@ const CustomEdge = ({
selected,
}: EdgeProps<CustomEdge>) => {
const removeConnection = useEdgeStore((state) => state.removeEdge);
// Subscribe to the brokenEdgeIDs map and check if this edge is broken across any node
const isBroken = useNodeStore((state) => state.isEdgeBroken(id));
const [isHovered, setIsHovered] = useState(false);
@@ -53,7 +52,6 @@ const CustomEdge = ({
const isStatic = data?.isStatic ?? false;
const beadUp = data?.beadUp ?? 0;
const beadDown = data?.beadDown ?? 0;
const edgeColorClass = data?.edgeColorClass;
const handleRemoveEdge = () => {
removeConnection(id);
@@ -72,9 +70,7 @@ const CustomEdge = ({
? "!stroke-red-500 !stroke-[2px] [stroke-dasharray:4]"
: selected
? "stroke-zinc-800"
: edgeColorClass
? cn(edgeColorClass, "opacity-70 hover:opacity-100")
: "stroke-zinc-500/50 hover:stroke-zinc-500",
: "stroke-zinc-500/50 hover:stroke-zinc-500",
)}
/>
<JSBeads

View File

@@ -8,7 +8,6 @@ import { useCallback } from "react";
import { useNodeStore } from "../../../stores/nodeStore";
import { useHistoryStore } from "../../../stores/historyStore";
import { CustomEdge } from "./CustomEdge";
import { getEdgeColorFromOutputType } from "../nodes/helpers";
export const useCustomEdge = () => {
const edges = useEdgeStore((s) => s.edges);
@@ -35,13 +34,8 @@ export const useCustomEdge = () => {
if (exists) return;
const nodes = useNodeStore.getState().nodes;
const sourceNode = nodes.find((n) => n.id === conn.source);
const isStatic = sourceNode?.data?.staticOutput;
const { colorClass, hexColor } = getEdgeColorFromOutputType(
sourceNode?.data?.outputSchema,
conn.sourceHandle,
);
const isStatic = nodes.find((n) => n.id === conn.source)?.data
?.staticOutput;
addEdge({
source: conn.source,
@@ -50,8 +44,6 @@ export const useCustomEdge = () => {
targetHandle: conn.targetHandle,
data: {
isStatic,
edgeColorClass: colorClass,
edgeHexColor: hexColor,
},
});
},

View File

@@ -1,21 +1,22 @@
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import {
Accordion,
AccordionContent,
AccordionItem,
AccordionTrigger,
} from "@/components/molecules/Accordion/Accordion";
import { beautifyString, cn } from "@/lib/utils";
import { CopyIcon, CheckIcon } from "@phosphor-icons/react";
import { CaretDownIcon, CopyIcon, CheckIcon } from "@phosphor-icons/react";
import { NodeDataViewer } from "./components/NodeDataViewer/NodeDataViewer";
import { ContentRenderer } from "./components/ContentRenderer";
import { useNodeOutput } from "./useNodeOutput";
import { ViewMoreData } from "./components/ViewMoreData";
export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
const { outputData, copiedKey, handleCopy, executionResultId, inputData } =
useNodeOutput(nodeId);
const {
outputData,
isExpanded,
setIsExpanded,
copiedKey,
handleCopy,
executionResultId,
inputData,
} = useNodeOutput(nodeId);
if (Object.keys(outputData).length === 0) {
return null;
@@ -24,117 +25,122 @@ export const NodeDataRenderer = ({ nodeId }: { nodeId: string }) => {
return (
<div
data-tutorial-id={`node-output`}
className="rounded-b-xl border-t border-zinc-200 px-4 py-2"
className="flex flex-col gap-3 rounded-b-xl border-t border-zinc-200 px-4 py-4"
>
<Accordion type="single" collapsible defaultValue="node-output">
<AccordionItem value="node-output" className="border-none">
<AccordionTrigger className="py-2 hover:no-underline">
<Text
variant="body-medium"
className="!font-semibold text-slate-700"
>
Node Output
</Text>
</AccordionTrigger>
<AccordionContent className="pt-2">
<div className="flex max-w-[350px] flex-col gap-4">
<div className="space-y-2">
<Text variant="small-medium">Input</Text>
<div className="flex items-center justify-between">
<Text variant="body-medium" className="!font-semibold text-slate-700">
Node Output
</Text>
<Button
variant="ghost"
size="small"
onClick={() => setIsExpanded(!isExpanded)}
className="h-fit min-w-0 p-1 text-slate-600 hover:text-slate-900"
>
<CaretDownIcon
size={16}
weight="bold"
className={`transition-transform ${isExpanded ? "rotate-180" : ""}`}
/>
</Button>
</div>
<ContentRenderer value={inputData} shortContent={false} />
{isExpanded && (
<>
<div className="flex max-w-[350px] flex-col gap-4">
<div className="space-y-2">
<Text variant="small-medium">Input</Text>
<div className="mt-1 flex justify-end gap-1">
<NodeDataViewer
data={inputData}
pinName="Input"
execId={executionResultId}
/>
<Button
variant="secondary"
size="small"
onClick={() => handleCopy("input", inputData)}
className={cn(
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
copiedKey === "input" &&
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
)}
>
{copiedKey === "input" ? (
<CheckIcon size={12} className="text-green-600" />
) : (
<CopyIcon size={12} />
)}
</Button>
</div>
<ContentRenderer value={inputData} shortContent={false} />
<div className="mt-1 flex justify-end gap-1">
<NodeDataViewer
data={inputData}
pinName="Input"
execId={executionResultId}
/>
<Button
variant="secondary"
size="small"
onClick={() => handleCopy("input", inputData)}
className={cn(
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
copiedKey === "input" &&
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
)}
>
{copiedKey === "input" ? (
<CheckIcon size={12} className="text-green-600" />
) : (
<CopyIcon size={12} />
)}
</Button>
</div>
</div>
{Object.entries(outputData)
.slice(0, 2)
.map(([key, value]) => (
<div key={key} className="flex flex-col gap-2">
<div className="flex items-center gap-2">
<Text
variant="small-medium"
className="!font-semibold text-slate-600"
>
Pin:
</Text>
<Text variant="small" className="text-slate-700">
{beautifyString(key)}
</Text>
</div>
<div className="w-full space-y-2">
<Text
variant="small"
className="!font-semibold text-slate-600"
>
Data:
</Text>
<div className="relative space-y-2">
{value.map((item, index) => (
<div key={index}>
<ContentRenderer value={item} shortContent={true} />
</div>
))}
<div className="mt-1 flex justify-end gap-1">
<NodeDataViewer
data={value}
pinName={key}
execId={executionResultId}
/>
<Button
variant="secondary"
size="small"
onClick={() => handleCopy(key, value)}
className={cn(
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
copiedKey === key &&
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
)}
>
{copiedKey === key ? (
<CheckIcon size={12} className="text-green-600" />
) : (
<CopyIcon size={12} />
)}
</Button>
{Object.entries(outputData)
.slice(0, 2)
.map(([key, value]) => (
<div key={key} className="flex flex-col gap-2">
<div className="flex items-center gap-2">
<Text
variant="small-medium"
className="!font-semibold text-slate-600"
>
Pin:
</Text>
<Text variant="small" className="text-slate-700">
{beautifyString(key)}
</Text>
</div>
<div className="w-full space-y-2">
<Text
variant="small"
className="!font-semibold text-slate-600"
>
Data:
</Text>
<div className="relative space-y-2">
{value.map((item, index) => (
<div key={index}>
<ContentRenderer value={item} shortContent={true} />
</div>
))}
<div className="mt-1 flex justify-end gap-1">
<NodeDataViewer
data={value}
pinName={key}
execId={executionResultId}
/>
<Button
variant="secondary"
size="small"
onClick={() => handleCopy(key, value)}
className={cn(
"h-fit min-w-0 gap-1.5 border border-zinc-200 p-2 text-black hover:text-slate-900",
copiedKey === key &&
"border-green-400 bg-green-100 hover:border-green-400 hover:bg-green-200",
)}
>
{copiedKey === key ? (
<CheckIcon size={12} className="text-green-600" />
) : (
<CopyIcon size={12} />
)}
</Button>
</div>
</div>
</div>
))}
</div>
</div>
))}
</div>
{Object.keys(outputData).length > 2 && (
<ViewMoreData
outputData={outputData}
execId={executionResultId}
/>
)}
</AccordionContent>
</AccordionItem>
</Accordion>
{Object.keys(outputData).length > 2 && (
<ViewMoreData outputData={outputData} execId={executionResultId} />
)}
</>
)}
</div>
);
};

View File

@@ -4,6 +4,7 @@ import { useShallow } from "zustand/react/shallow";
import { useState } from "react";
export const useNodeOutput = (nodeId: string) => {
const [isExpanded, setIsExpanded] = useState(true);
const [copiedKey, setCopiedKey] = useState<string | null>(null);
const { toast } = useToast();
@@ -36,10 +37,13 @@ export const useNodeOutput = (nodeId: string) => {
}
};
return {
outputData,
inputData,
copiedKey,
handleCopy,
outputData: outputData,
inputData: inputData,
isExpanded: isExpanded,
setIsExpanded: setIsExpanded,
copiedKey: copiedKey,
setCopiedKey: setCopiedKey,
handleCopy: handleCopy,
executionResultId: nodeExecutionResult?.node_exec_id,
};
};

View File

@@ -187,38 +187,3 @@ export const getTypeDisplayInfo = (schema: any) => {
hexColor,
};
};
export function getEdgeColorFromOutputType(
outputSchema: RJSFSchema | undefined,
sourceHandle: string,
): { colorClass: string; hexColor: string } {
const defaultColor = {
colorClass: "stroke-zinc-500/50",
hexColor: "#6b7280",
};
if (!outputSchema?.properties) return defaultColor;
const properties = outputSchema.properties as Record<string, unknown>;
const handleParts = sourceHandle.split("_#_");
let currentSchema: Record<string, unknown> = properties;
for (let i = 0; i < handleParts.length; i++) {
const part = handleParts[i];
const fieldSchema = currentSchema[part] as Record<string, unknown>;
if (!fieldSchema) return defaultColor;
if (i === handleParts.length - 1) {
const { hexColor, colorClass } = getTypeDisplayInfo(fieldSchema);
return { colorClass: colorClass.replace("!text-", "stroke-"), hexColor };
}
if (fieldSchema.properties) {
currentSchema = fieldSchema.properties as Record<string, unknown>;
} else {
return defaultColor;
}
}
return defaultColor;
}

View File

@@ -1,32 +1,7 @@
type IconOptions = {
size?: number;
color?: string;
};
const DEFAULT_SIZE = 16;
const DEFAULT_COLOR = "#52525b"; // zinc-600
const iconPaths = {
ClickIcon: `M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z`,
Keyboard: `M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z`,
Drag: `M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z`,
};
function createIcon(path: string, options: IconOptions = {}): string {
const size = options.size ?? DEFAULT_SIZE;
const color = options.color ?? DEFAULT_COLOR;
return `<svg xmlns="http://www.w3.org/2000/svg" width="${size}" height="${size}" fill="${color}" viewBox="0 0 256 256"><path d="${path}"></path></svg>`;
}
// These are SVG Phosphor icons
export const ICONS = {
ClickIcon: createIcon(iconPaths.ClickIcon),
Keyboard: createIcon(iconPaths.Keyboard),
Drag: createIcon(iconPaths.Drag),
ClickIcon: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M88,24V16a8,8,0,0,1,16,0v8a8,8,0,0,1-16,0ZM16,104h8a8,8,0,0,0,0-16H16a8,8,0,0,0,0,16ZM124.42,39.16a8,8,0,0,0,10.74-3.58l8-16a8,8,0,0,0-14.31-7.16l-8,16A8,8,0,0,0,124.42,39.16Zm-96,81.69-16,8a8,8,0,0,0,7.16,14.31l16-8a8,8,0,1,0-7.16-14.31ZM219.31,184a16,16,0,0,1,0,22.63l-12.68,12.68a16,16,0,0,1-22.63,0L132.7,168,115,214.09c0,.1-.08.21-.13.32a15.83,15.83,0,0,1-14.6,9.59l-.79,0a15.83,15.83,0,0,1-14.41-11L32.8,52.92A16,16,0,0,1,52.92,32.8L213,85.07a16,16,0,0,1,1.41,29.8l-.32.13L168,132.69ZM208,195.31,156.69,144h0a16,16,0,0,1,4.93-26l.32-.14,45.95-17.64L48,48l52.2,159.86,17.65-46c0-.11.08-.22.13-.33a16,16,0,0,1,11.69-9.34,16.72,16.72,0,0,1,3-.28,16,16,0,0,1,11.3,4.69L195.31,208Z"></path></svg>`,
Keyboard: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M224,48H32A16,16,0,0,0,16,64V192a16,16,0,0,0,16,16H224a16,16,0,0,0,16-16V64A16,16,0,0,0,224,48Zm0,144H32V64H224V192Zm-16-64a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,128Zm0-32a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16H200A8,8,0,0,1,208,96ZM72,160a8,8,0,0,1-8,8H56a8,8,0,0,1,0-16h8A8,8,0,0,1,72,160Zm96,0a8,8,0,0,1-8,8H96a8,8,0,0,1,0-16h64A8,8,0,0,1,168,160Zm40,0a8,8,0,0,1-8,8h-8a8,8,0,0,1,0-16h8A8,8,0,0,1,208,160Z"></path></svg>`,
Drag: `<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="#000000" viewBox="0 0 256 256"><path d="M188,80a27.79,27.79,0,0,0-13.36,3.4,28,28,0,0,0-46.64-11A28,28,0,0,0,80,92v20H68a28,28,0,0,0-28,28v12a88,88,0,0,0,176,0V108A28,28,0,0,0,188,80Zm12,72a72,72,0,0,1-144,0V140a12,12,0,0,1,12-12H80v24a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V92a12,12,0,0,1,24,0v28a8,8,0,0,0,16,0V108a12,12,0,0,1,24,0Z"></path></svg>`,
};
export function getIcon(
name: keyof typeof iconPaths,
options?: IconOptions,
): string {
return createIcon(iconPaths[name], options);
}

View File

@@ -11,7 +11,6 @@ import {
} from "./helpers";
import { useNodeStore } from "../../../stores/nodeStore";
import { useEdgeStore } from "../../../stores/edgeStore";
import { useTutorialStore } from "../../../stores/tutorialStore";
let isTutorialLoading = false;
let tutorialLoadingCallback: ((loading: boolean) => void) | null = null;
@@ -61,14 +60,12 @@ export const startTutorial = async () => {
handleTutorialComplete();
removeTutorialStyles();
clearPrefetchedBlocks();
useTutorialStore.getState().setIsTutorialRunning(false);
});
tour.on("cancel", () => {
handleTutorialCancel(tour);
removeTutorialStyles();
clearPrefetchedBlocks();
useTutorialStore.getState().setIsTutorialRunning(false);
});
for (const step of tour.steps) {

View File

@@ -61,18 +61,12 @@ export const convertNodesPlusBlockInfoIntoCustomNodes = (
return customNode;
};
const isToolSourceName = (sourceName: string): boolean =>
sourceName.startsWith("tools_^_");
const cleanupSourceName = (sourceName: string): string =>
isToolSourceName(sourceName) ? "tools" : sourceName;
export const linkToCustomEdge = (link: Link): CustomEdge => ({
id: link.id ?? "",
type: "custom" as const,
source: link.source_id,
target: link.sink_id,
sourceHandle: cleanupSourceName(link.source_name),
sourceHandle: link.source_name,
targetHandle: link.sink_name,
data: {
isStatic: link.is_static,

View File

@@ -0,0 +1,134 @@
"use client";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { cn } from "@/lib/utils";
import { List } from "@phosphor-icons/react";
import React, { useState } from "react";
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatErrorState } from "./components/ChatErrorState/ChatErrorState";
import { ChatLoadingState } from "./components/ChatLoadingState/ChatLoadingState";
import { SessionsDrawer } from "./components/SessionsDrawer/SessionsDrawer";
import { useChat } from "./useChat";
export interface ChatProps {
className?: string;
headerTitle?: React.ReactNode;
showHeader?: boolean;
showSessionInfo?: boolean;
showNewChatButton?: boolean;
onNewChat?: () => void;
headerActions?: React.ReactNode;
}
export function Chat({
className,
headerTitle = "AutoGPT Copilot",
showHeader = true,
showSessionInfo = true,
showNewChatButton = true,
onNewChat,
headerActions,
}: ChatProps) {
const {
messages,
isLoading,
isCreating,
error,
sessionId,
createSession,
clearSession,
loadSession,
} = useChat();
const [isSessionsDrawerOpen, setIsSessionsDrawerOpen] = useState(false);
const handleNewChat = () => {
clearSession();
onNewChat?.();
};
const handleSelectSession = async (sessionId: string) => {
try {
await loadSession(sessionId);
} catch (err) {
console.error("Failed to load session:", err);
}
};
return (
<div className={cn("flex h-full flex-col", className)}>
{/* Header */}
{showHeader && (
<header className="shrink-0 border-t border-zinc-200 bg-white p-3">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<button
aria-label="View sessions"
onClick={() => setIsSessionsDrawerOpen(true)}
className="flex size-8 items-center justify-center rounded hover:bg-zinc-100"
>
<List width="1.25rem" height="1.25rem" />
</button>
{typeof headerTitle === "string" ? (
<Text variant="h2" className="text-lg font-semibold">
{headerTitle}
</Text>
) : (
headerTitle
)}
</div>
<div className="flex items-center gap-3">
{showSessionInfo && sessionId && (
<>
{showNewChatButton && (
<Button
variant="outline"
size="small"
onClick={handleNewChat}
>
New Chat
</Button>
)}
</>
)}
{headerActions}
</div>
</div>
</header>
)}
{/* Main Content */}
<main className="flex min-h-0 flex-1 flex-col overflow-hidden">
{/* Loading State - show when explicitly loading/creating OR when we don't have a session yet and no error */}
{(isLoading || isCreating || (!sessionId && !error)) && (
<ChatLoadingState
message={isCreating ? "Creating session..." : "Loading..."}
/>
)}
{/* Error State */}
{error && !isLoading && (
<ChatErrorState error={error} onRetry={createSession} />
)}
{/* Session Content */}
{sessionId && !isLoading && !error && (
<ChatContainer
sessionId={sessionId}
initialMessages={messages}
className="flex-1"
/>
)}
</main>
{/* Sessions Drawer */}
<SessionsDrawer
isOpen={isSessionsDrawerOpen}
onClose={() => setIsSessionsDrawerOpen(false)}
onSelectSession={handleSelectSession}
currentSessionId={sessionId}
/>
</div>
);
}

View File

@@ -21,7 +21,7 @@ export function AuthPromptWidget({
message,
sessionId,
agentInfo,
returnUrl = "/copilot/chat",
returnUrl = "/chat",
className,
}: AuthPromptWidgetProps) {
const router = useRouter();

View File

@@ -0,0 +1,88 @@
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import { cn } from "@/lib/utils";
import { useCallback } from "react";
import { usePageContext } from "../../usePageContext";
import { ChatInput } from "../ChatInput/ChatInput";
import { MessageList } from "../MessageList/MessageList";
import { QuickActionsWelcome } from "../QuickActionsWelcome/QuickActionsWelcome";
import { useChatContainer } from "./useChatContainer";
export interface ChatContainerProps {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
className?: string;
}
export function ChatContainer({
sessionId,
initialMessages,
className,
}: ChatContainerProps) {
const { messages, streamingChunks, isStreaming, sendMessage } =
useChatContainer({
sessionId,
initialMessages,
});
const { capturePageContext } = usePageContext();
// Wrap sendMessage to automatically capture page context
const sendMessageWithContext = useCallback(
async (content: string, isUserMessage: boolean = true) => {
const context = capturePageContext();
await sendMessage(content, isUserMessage, context);
},
[sendMessage, capturePageContext],
);
const quickActions = [
"Find agents for social media management",
"Show me agents for content creation",
"Help me automate my business",
"What can you help me with?",
];
return (
<div
className={cn("flex h-full min-h-0 flex-col", className)}
style={{
backgroundColor: "#ffffff",
backgroundImage:
"radial-gradient(#e5e5e5 0.5px, transparent 0.5px), radial-gradient(#e5e5e5 0.5px, #ffffff 0.5px)",
backgroundSize: "20px 20px",
backgroundPosition: "0 0, 10px 10px",
}}
>
{/* Messages or Welcome Screen */}
<div className="flex min-h-0 flex-1 flex-col overflow-hidden pb-24">
{messages.length === 0 ? (
<QuickActionsWelcome
title="Welcome to AutoGPT Copilot"
description="Start a conversation to discover and run AI agents."
actions={quickActions}
onActionClick={sendMessageWithContext}
disabled={isStreaming || !sessionId}
/>
) : (
<MessageList
messages={messages}
streamingChunks={streamingChunks}
isStreaming={isStreaming}
onSendMessage={sendMessageWithContext}
className="flex-1"
/>
)}
</div>
{/* Input - Always visible */}
<div className="fixed bottom-0 left-0 right-0 z-50 border-t border-zinc-200 bg-white p-4">
<ChatInput
onSend={sendMessageWithContext}
disabled={isStreaming || !sessionId}
placeholder={
sessionId ? "Type your message..." : "Creating session..."
}
/>
</div>
</div>
);
}

View File

@@ -1,6 +1,6 @@
import { toast } from "sonner";
import { StreamChunk } from "../../useChatStream";
import type { HandlerDependencies } from "./handlers";
import type { HandlerDependencies } from "./useChatContainer.handlers";
import {
handleError,
handleLoginNeeded,
@@ -9,30 +9,12 @@ import {
handleTextEnded,
handleToolCallStart,
handleToolResponse,
isRegionBlockedError,
} from "./handlers";
} from "./useChatContainer.handlers";
export function createStreamEventDispatcher(
deps: HandlerDependencies,
): (chunk: StreamChunk) => void {
return function dispatchStreamEvent(chunk: StreamChunk): void {
if (
chunk.type === "text_chunk" ||
chunk.type === "tool_call_start" ||
chunk.type === "tool_response" ||
chunk.type === "login_needed" ||
chunk.type === "need_login" ||
chunk.type === "error"
) {
if (!deps.hasResponseRef.current) {
console.info("[ChatStream] First response chunk:", {
type: chunk.type,
sessionId: deps.sessionId,
});
}
deps.hasResponseRef.current = true;
}
switch (chunk.type) {
case "text_chunk":
handleTextChunk(chunk, deps);
@@ -56,23 +38,15 @@ export function createStreamEventDispatcher(
break;
case "stream_end":
console.info("[ChatStream] Stream ended:", {
sessionId: deps.sessionId,
hasResponse: deps.hasResponseRef.current,
chunkCount: deps.streamingChunksRef.current.length,
});
handleStreamEnd(chunk, deps);
break;
case "error":
const isRegionBlocked = isRegionBlockedError(chunk);
handleError(chunk, deps);
// Show toast at dispatcher level to avoid circular dependencies
if (!isRegionBlocked) {
toast.error("Chat Error", {
description: chunk.message || chunk.content || "An error occurred",
});
}
toast.error("Chat Error", {
description: chunk.message || chunk.content || "An error occurred",
});
break;
case "usage":

View File

@@ -1,33 +1,6 @@
import { SessionKey, sessionStorage } from "@/services/storage/session-storage";
import type { ToolResult } from "@/types/chat";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
export function hasSentInitialPrompt(sessionId: string): boolean {
try {
const sent = JSON.parse(
sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}",
);
return sent[sessionId] === true;
} catch {
return false;
}
}
export function markInitialPromptSent(sessionId: string): void {
try {
const sent = JSON.parse(
sessionStorage.get(SessionKey.CHAT_SENT_INITIAL_PROMPTS) || "{}",
);
sent[sessionId] = true;
sessionStorage.set(
SessionKey.CHAT_SENT_INITIAL_PROMPTS,
JSON.stringify(sent),
);
} catch {
// Ignore storage errors
}
}
export function removePageContext(content: string): string {
// Remove "Page URL: ..." pattern at start of line (case insensitive, handles various formats)
let cleaned = content.replace(/^\s*Page URL:\s*[^\n\r]*/gim, "");
@@ -234,22 +207,12 @@ export function parseToolResponse(
if (responseType === "setup_requirements") {
return null;
}
if (responseType === "understanding_updated") {
return {
type: "tool_response",
toolId,
toolName,
result: (parsedResult || result) as ToolResult,
success: true,
timestamp: timestamp || new Date(),
};
}
}
return {
type: "tool_response",
toolId,
toolName,
result: parsedResult ? (parsedResult as ToolResult) : result,
result,
success: true,
timestamp: timestamp || new Date(),
};
@@ -304,34 +267,23 @@ export function extractCredentialsNeeded(
| undefined;
if (missingCreds && Object.keys(missingCreds).length > 0) {
const agentName = (setupInfo?.agent_name as string) || "this block";
const credentials = Object.values(missingCreds).map((credInfo) => {
// Normalize to array at boundary - prefer 'types' array, fall back to single 'type'
const typesArray = credInfo.types as
| Array<"api_key" | "oauth2" | "user_password" | "host_scoped">
| undefined;
const singleType =
const credentials = Object.values(missingCreds).map((credInfo) => ({
provider: (credInfo.provider as string) || "unknown",
providerName:
(credInfo.provider_name as string) ||
(credInfo.provider as string) ||
"Unknown Provider",
credentialType:
(credInfo.type as
| "api_key"
| "oauth2"
| "user_password"
| "host_scoped"
| undefined) || "api_key";
const credentialTypes =
typesArray && typesArray.length > 0 ? typesArray : [singleType];
return {
provider: (credInfo.provider as string) || "unknown",
providerName:
(credInfo.provider_name as string) ||
(credInfo.provider as string) ||
"Unknown Provider",
credentialTypes,
title:
(credInfo.title as string) ||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
scopes: credInfo.scopes as string[] | undefined,
};
});
| "host_scoped") || "api_key",
title:
(credInfo.title as string) ||
`${(credInfo.provider_name as string) || (credInfo.provider as string)} credentials`,
scopes: credInfo.scopes as string[] | undefined,
}));
return {
type: "credentials_needed",
toolName,
@@ -406,14 +358,11 @@ export function extractInputsNeeded(
credentials.forEach((cred) => {
const id = cred.id as string;
if (id) {
const credentialTypes = Array.isArray(cred.types)
? cred.types
: [(cred.type as string) || "api_key"];
credentialsSchema[id] = {
type: "object",
properties: {},
credentials_provider: [cred.provider as string],
credentials_types: credentialTypes,
credentials_types: [(cred.type as string) || "api_key"],
credentials_scopes: cred.scopes as string[] | undefined,
};
}

View File

@@ -7,30 +7,15 @@ import {
parseToolResponse,
} from "./helpers";
function isToolCallMessage(
message: ChatMessageData,
): message is Extract<ChatMessageData, { type: "tool_call" }> {
return message.type === "tool_call";
}
export interface HandlerDependencies {
setHasTextChunks: Dispatch<SetStateAction<boolean>>;
setStreamingChunks: Dispatch<SetStateAction<string[]>>;
streamingChunksRef: MutableRefObject<string[]>;
hasResponseRef: MutableRefObject<boolean>;
setMessages: Dispatch<SetStateAction<ChatMessageData[]>>;
setIsStreamingInitiated: Dispatch<SetStateAction<boolean>>;
setIsRegionBlockedModalOpen: Dispatch<SetStateAction<boolean>>;
sessionId: string;
}
export function isRegionBlockedError(chunk: StreamChunk): boolean {
if (chunk.code === "MODEL_NOT_AVAILABLE_REGION") return true;
const message = chunk.message || chunk.content;
if (typeof message !== "string") return false;
return message.toLowerCase().includes("not available in your region");
}
export function handleTextChunk(chunk: StreamChunk, deps: HandlerDependencies) {
if (!chunk.content) return;
deps.setHasTextChunks(true);
@@ -45,17 +30,16 @@ export function handleTextEnded(
_chunk: StreamChunk,
deps: HandlerDependencies,
) {
console.log("[Text Ended] Saving streamed text as assistant message");
const completedText = deps.streamingChunksRef.current.join("");
if (completedText.trim()) {
deps.setMessages((prev) => {
const assistantMessage: ChatMessageData = {
type: "message",
role: "assistant",
content: completedText,
timestamp: new Date(),
};
return [...prev, assistantMessage];
});
const assistantMessage: ChatMessageData = {
type: "message",
role: "assistant",
content: completedText,
timestamp: new Date(),
};
deps.setMessages((prev) => [...prev, assistantMessage]);
}
deps.setStreamingChunks([]);
deps.streamingChunksRef.current = [];
@@ -66,45 +50,30 @@ export function handleToolCallStart(
chunk: StreamChunk,
deps: HandlerDependencies,
) {
const toolCallMessage: Extract<ChatMessageData, { type: "tool_call" }> = {
const toolCallMessage: ChatMessageData = {
type: "tool_call",
toolId: chunk.tool_id || `tool-${Date.now()}-${chunk.idx || 0}`,
toolName: chunk.tool_name || "Executing",
toolName: chunk.tool_name || "Executing...",
arguments: chunk.arguments || {},
timestamp: new Date(),
};
function updateToolCallMessages(prev: ChatMessageData[]) {
const existingIndex = prev.findIndex(function findToolCallIndex(msg) {
return isToolCallMessage(msg) && msg.toolId === toolCallMessage.toolId;
});
if (existingIndex === -1) {
return [...prev, toolCallMessage];
}
const nextMessages = [...prev];
const existing = nextMessages[existingIndex];
if (!isToolCallMessage(existing)) return prev;
const nextArguments =
toolCallMessage.arguments &&
Object.keys(toolCallMessage.arguments).length > 0
? toolCallMessage.arguments
: existing.arguments;
nextMessages[existingIndex] = {
...existing,
toolName: toolCallMessage.toolName || existing.toolName,
arguments: nextArguments,
timestamp: toolCallMessage.timestamp,
};
return nextMessages;
}
deps.setMessages(updateToolCallMessages);
deps.setMessages((prev) => [...prev, toolCallMessage]);
console.log("[Tool Call Start]", {
toolId: toolCallMessage.toolId,
toolName: toolCallMessage.toolName,
timestamp: new Date().toISOString(),
});
}
export function handleToolResponse(
chunk: StreamChunk,
deps: HandlerDependencies,
) {
console.log("[Tool Response] Received:", {
toolId: chunk.tool_id,
toolName: chunk.tool_name,
timestamp: new Date().toISOString(),
});
let toolName = chunk.tool_name || "unknown";
if (!chunk.tool_name || chunk.tool_name === "unknown") {
deps.setMessages((prev) => {
@@ -158,15 +127,22 @@ export function handleToolResponse(
const toolCallIndex = prev.findIndex(
(msg) => msg.type === "tool_call" && msg.toolId === chunk.tool_id,
);
const hasResponse = prev.some(
(msg) => msg.type === "tool_response" && msg.toolId === chunk.tool_id,
);
if (hasResponse) return prev;
if (toolCallIndex !== -1) {
const newMessages = [...prev];
newMessages.splice(toolCallIndex + 1, 0, responseMessage);
newMessages[toolCallIndex] = responseMessage;
console.log(
"[Tool Response] Replaced tool_call with matching tool_id:",
chunk.tool_id,
"at index:",
toolCallIndex,
);
return newMessages;
}
console.warn(
"[Tool Response] No tool_call found with tool_id:",
chunk.tool_id,
"appending instead",
);
return [...prev, responseMessage];
});
}
@@ -191,38 +167,55 @@ export function handleStreamEnd(
deps: HandlerDependencies,
) {
const completedContent = deps.streamingChunksRef.current.join("");
if (!completedContent.trim() && !deps.hasResponseRef.current) {
deps.setMessages((prev) => [
...prev,
{
type: "message",
role: "assistant",
content: "No response received. Please try again.",
timestamp: new Date(),
},
]);
}
// Only save message if there are uncommitted chunks
// (text_ended already saved if there were tool calls)
if (completedContent.trim()) {
console.log(
"[Stream End] Saving remaining streamed text as assistant message",
);
const assistantMessage: ChatMessageData = {
type: "message",
role: "assistant",
content: completedContent,
timestamp: new Date(),
};
deps.setMessages((prev) => [...prev, assistantMessage]);
deps.setMessages((prev) => {
const updated = [...prev, assistantMessage];
console.log("[Stream End] Final state:", {
localMessages: updated.map((m) => ({
type: m.type,
...(m.type === "message" && {
role: m.role,
contentLength: m.content.length,
}),
...(m.type === "tool_call" && {
toolId: m.toolId,
toolName: m.toolName,
}),
...(m.type === "tool_response" && {
toolId: m.toolId,
toolName: m.toolName,
success: m.success,
}),
})),
streamingChunks: deps.streamingChunksRef.current,
timestamp: new Date().toISOString(),
});
return updated;
});
} else {
console.log("[Stream End] No uncommitted chunks, message already saved");
}
deps.setStreamingChunks([]);
deps.streamingChunksRef.current = [];
deps.setHasTextChunks(false);
deps.setIsStreamingInitiated(false);
console.log("[Stream End] Stream complete, messages in local state");
}
export function handleError(chunk: StreamChunk, deps: HandlerDependencies) {
const errorMessage = chunk.message || chunk.content || "An error occurred";
console.error("Stream error:", errorMessage);
if (isRegionBlockedError(chunk)) {
deps.setIsRegionBlockedModalOpen(true);
}
deps.setIsStreamingInitiated(false);
deps.setHasTextChunks(false);
deps.setStreamingChunks([]);

View File

@@ -1,17 +1,14 @@
import type { SessionDetailResponse } from "@/app/api/__generated__/models/sessionDetailResponse";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useCallback, useMemo, useRef, useState } from "react";
import { toast } from "sonner";
import { useChatStream } from "../../useChatStream";
import { usePageContext } from "../../usePageContext";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { createStreamEventDispatcher } from "./createStreamEventDispatcher";
import {
createUserMessage,
filterAuthMessages,
hasSentInitialPrompt,
isToolCallArray,
isValidMessage,
markInitialPromptSent,
parseToolResponse,
removePageContext,
} from "./helpers";
@@ -19,45 +16,20 @@ import {
interface Args {
sessionId: string | null;
initialMessages: SessionDetailResponse["messages"];
initialPrompt?: string;
}
export function useChatContainer({
sessionId,
initialMessages,
initialPrompt,
}: Args) {
export function useChatContainer({ sessionId, initialMessages }: Args) {
const [messages, setMessages] = useState<ChatMessageData[]>([]);
const [streamingChunks, setStreamingChunks] = useState<string[]>([]);
const [hasTextChunks, setHasTextChunks] = useState(false);
const [isStreamingInitiated, setIsStreamingInitiated] = useState(false);
const [isRegionBlockedModalOpen, setIsRegionBlockedModalOpen] =
useState(false);
const hasResponseRef = useRef(false);
const streamingChunksRef = useRef<string[]>([]);
const previousSessionIdRef = useRef<string | null>(null);
const {
error,
sendMessage: sendStreamMessage,
stopStreaming,
} = useChatStream();
const { error, sendMessage: sendStreamMessage } = useChatStream();
const isStreaming = isStreamingInitiated || hasTextChunks;
useEffect(() => {
if (sessionId !== previousSessionIdRef.current) {
stopStreaming(previousSessionIdRef.current ?? undefined, true);
previousSessionIdRef.current = sessionId;
setMessages([]);
setStreamingChunks([]);
streamingChunksRef.current = [];
setHasTextChunks(false);
setIsStreamingInitiated(false);
hasResponseRef.current = false;
}
}, [sessionId, stopStreaming]);
const allMessages = useMemo(() => {
const processedInitialMessages: ChatMessageData[] = [];
// Map to track tool calls by their ID so we can look up tool names for tool responses
const toolCallMap = new Map<string, string>();
for (const msg of initialMessages) {
@@ -73,9 +45,13 @@ export function useChatContainer({
? new Date(msg.timestamp as string)
: undefined;
// Remove page context from user messages when loading existing sessions
if (role === "user") {
content = removePageContext(content);
if (!content.trim()) continue;
// Skip user messages that become empty after removing page context
if (!content.trim()) {
continue;
}
processedInitialMessages.push({
type: "message",
role: "user",
@@ -85,15 +61,19 @@ export function useChatContainer({
continue;
}
// Handle assistant messages first (before tool messages) to build tool call map
if (role === "assistant") {
// Strip <thinking> tags from content
content = content
.replace(/<thinking>[\s\S]*?<\/thinking>/gi, "")
.trim();
// If assistant has tool calls, create tool_call messages for each
if (toolCalls && isToolCallArray(toolCalls) && toolCalls.length > 0) {
for (const toolCall of toolCalls) {
const toolName = toolCall.function.name;
const toolId = toolCall.id;
// Store tool name for later lookup
toolCallMap.set(toolId, toolName);
try {
@@ -116,6 +96,7 @@ export function useChatContainer({
});
}
}
// Only add assistant message if there's content after stripping thinking tags
if (content.trim()) {
processedInitialMessages.push({
type: "message",
@@ -125,6 +106,7 @@ export function useChatContainer({
});
}
} else if (content.trim()) {
// Assistant message without tool calls, but with content
processedInitialMessages.push({
type: "message",
role: "assistant",
@@ -135,6 +117,7 @@ export function useChatContainer({
continue;
}
// Handle tool messages - look up tool name from tool call map
if (role === "tool") {
const toolCallId = (msg.tool_call_id as string) || "";
const toolName = toolCallMap.get(toolCallId) || "unknown";
@@ -150,6 +133,7 @@ export function useChatContainer({
continue;
}
// Handle other message types (system, etc.)
if (content.trim()) {
processedInitialMessages.push({
type: "message",
@@ -170,10 +154,9 @@ export function useChatContainer({
context?: { url: string; content: string },
) {
if (!sessionId) {
console.error("[useChatContainer] Cannot send message: no session ID");
console.error("Cannot send message: no session ID");
return;
}
setIsRegionBlockedModalOpen(false);
if (isUserMessage) {
const userMessage = createUserMessage(content);
setMessages((prev) => [...filterAuthMessages(prev), userMessage]);
@@ -184,19 +167,14 @@ export function useChatContainer({
streamingChunksRef.current = [];
setHasTextChunks(false);
setIsStreamingInitiated(true);
hasResponseRef.current = false;
const dispatcher = createStreamEventDispatcher({
setHasTextChunks,
setStreamingChunks,
streamingChunksRef,
hasResponseRef,
setMessages,
setIsRegionBlockedModalOpen,
sessionId,
setIsStreamingInitiated,
});
try {
await sendStreamMessage(
sessionId,
@@ -206,12 +184,8 @@ export function useChatContainer({
context,
);
} catch (err) {
console.error("[useChatContainer] Failed to send message:", err);
console.error("Failed to send message:", err);
setIsStreamingInitiated(false);
// Don't show error toast for AbortError (expected during cleanup)
if (err instanceof Error && err.name === "AbortError") return;
const errorMessage =
err instanceof Error ? err.message : "Failed to send message";
toast.error("Failed to send message", {
@@ -222,63 +196,11 @@ export function useChatContainer({
[sessionId, sendStreamMessage],
);
const handleStopStreaming = useCallback(() => {
stopStreaming();
setStreamingChunks([]);
streamingChunksRef.current = [];
setHasTextChunks(false);
setIsStreamingInitiated(false);
}, [stopStreaming]);
const { capturePageContext } = usePageContext();
// Send initial prompt if provided (for new sessions from homepage)
useEffect(
function handleInitialPrompt() {
if (!initialPrompt || !sessionId) return;
if (initialMessages.length > 0) return;
if (hasSentInitialPrompt(sessionId)) return;
markInitialPromptSent(sessionId);
const context = capturePageContext();
sendMessage(initialPrompt, true, context);
},
[
initialPrompt,
sessionId,
initialMessages.length,
sendMessage,
capturePageContext,
],
);
async function sendMessageWithContext(
content: string,
isUserMessage: boolean = true,
) {
const context = capturePageContext();
await sendMessage(content, isUserMessage, context);
}
function handleRegionModalOpenChange(open: boolean) {
setIsRegionBlockedModalOpen(open);
}
function handleRegionModalClose() {
setIsRegionBlockedModalOpen(false);
}
return {
messages: allMessages,
streamingChunks,
isStreaming,
error,
isRegionBlockedModalOpen,
setIsRegionBlockedModalOpen,
sendMessageWithContext,
handleRegionModalOpenChange,
handleRegionModalClose,
sendMessage,
stopStreaming: handleStopStreaming,
};
}

View File

@@ -9,9 +9,7 @@ import { useChatCredentialsSetup } from "./useChatCredentialsSetup";
export interface CredentialInfo {
provider: string;
providerName: string;
credentialTypes: Array<
"api_key" | "oauth2" | "user_password" | "host_scoped"
>;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
title: string;
scopes?: string[];
}
@@ -32,7 +30,7 @@ function createSchemaFromCredentialInfo(
type: "object",
properties: {},
credentials_provider: [credential.provider],
credentials_types: credential.credentialTypes,
credentials_types: [credential.credentialType],
credentials_scopes: credential.scopes,
discriminator: undefined,
discriminator_mapping: undefined,

View File

@@ -0,0 +1,64 @@
import { Input } from "@/components/atoms/Input/Input";
import { cn } from "@/lib/utils";
import { ArrowUpIcon } from "@phosphor-icons/react";
import { useChatInput } from "./useChatInput";
export interface ChatInputProps {
onSend: (message: string) => void;
disabled?: boolean;
placeholder?: string;
className?: string;
}
export function ChatInput({
onSend,
disabled = false,
placeholder = "Type your message...",
className,
}: ChatInputProps) {
const inputId = "chat-input";
const { value, setValue, handleKeyDown, handleSend } = useChatInput({
onSend,
disabled,
maxRows: 5,
inputId,
});
return (
<div className={cn("relative flex-1", className)}>
<Input
id={inputId}
label="Chat message input"
hideLabel
type="textarea"
value={value}
onChange={(e) => setValue(e.target.value)}
onKeyDown={handleKeyDown}
placeholder={placeholder}
disabled={disabled}
rows={1}
wrapperClassName="mb-0 relative"
className="pr-12"
/>
<span id="chat-input-hint" className="sr-only">
Press Enter to send, Shift+Enter for new line
</span>
<button
onClick={handleSend}
disabled={disabled || !value.trim()}
className={cn(
"absolute right-3 top-1/2 flex h-8 w-8 -translate-y-1/2 items-center justify-center rounded-full",
"border border-zinc-800 bg-zinc-800 text-white",
"hover:border-zinc-900 hover:bg-zinc-900",
"disabled:border-zinc-200 disabled:bg-zinc-200 disabled:text-white disabled:opacity-50",
"transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950",
"disabled:pointer-events-none",
)}
aria-label="Send message"
>
<ArrowUpIcon className="h-3 w-3" weight="bold" />
</button>
</div>
);
}

View File

@@ -0,0 +1,60 @@
import { KeyboardEvent, useCallback, useEffect, useState } from "react";
interface UseChatInputArgs {
onSend: (message: string) => void;
disabled?: boolean;
maxRows?: number;
inputId?: string;
}
export function useChatInput({
onSend,
disabled = false,
maxRows = 5,
inputId = "chat-input",
}: UseChatInputArgs) {
const [value, setValue] = useState("");
useEffect(() => {
const textarea = document.getElementById(inputId) as HTMLTextAreaElement;
if (!textarea) return;
textarea.style.height = "auto";
const lineHeight = parseInt(
window.getComputedStyle(textarea).lineHeight,
10,
);
const maxHeight = lineHeight * maxRows;
const newHeight = Math.min(textarea.scrollHeight, maxHeight);
textarea.style.height = `${newHeight}px`;
textarea.style.overflowY =
textarea.scrollHeight > maxHeight ? "auto" : "hidden";
}, [value, maxRows, inputId]);
const handleSend = useCallback(() => {
if (disabled || !value.trim()) return;
onSend(value.trim());
setValue("");
const textarea = document.getElementById(inputId) as HTMLTextAreaElement;
if (textarea) {
textarea.style.height = "auto";
}
}, [value, onSend, disabled, inputId]);
const handleKeyDown = useCallback(
(event: KeyboardEvent<HTMLInputElement | HTMLTextAreaElement>) => {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
handleSend();
}
// Shift+Enter allows default behavior (new line) - no need to handle explicitly
},
[handleSend],
);
return {
value,
setValue,
handleKeyDown,
handleSend,
};
}

View File

@@ -1,65 +1,48 @@
"use client";
import { useGetV2GetUserProfile } from "@/app/api/__generated__/endpoints/store/store";
import Avatar, {
AvatarFallback,
AvatarImage,
} from "@/components/atoms/Avatar/Avatar";
import { Button } from "@/components/atoms/Button/Button";
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
import { cn } from "@/lib/utils";
import {
ArrowsClockwiseIcon,
ArrowClockwise,
CheckCircleIcon,
CheckIcon,
CopyIcon,
RobotIcon,
} from "@phosphor-icons/react";
import { useRouter } from "next/navigation";
import { useCallback, useState } from "react";
import { getToolActionPhrase } from "../../helpers";
import { AgentCarouselMessage } from "../AgentCarouselMessage/AgentCarouselMessage";
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
import { AuthPromptWidget } from "../AuthPromptWidget/AuthPromptWidget";
import { ChatCredentialsSetup } from "../ChatCredentialsSetup/ChatCredentialsSetup";
import { ExecutionStartedMessage } from "../ExecutionStartedMessage/ExecutionStartedMessage";
import { MarkdownContent } from "../MarkdownContent/MarkdownContent";
import { MessageBubble } from "../MessageBubble/MessageBubble";
import { NoResultsMessage } from "../NoResultsMessage/NoResultsMessage";
import { ToolCallMessage } from "../ToolCallMessage/ToolCallMessage";
import { ToolResponseMessage } from "../ToolResponseMessage/ToolResponseMessage";
import { UserChatBubble } from "../UserChatBubble/UserChatBubble";
import { useChatMessage, type ChatMessageData } from "./useChatMessage";
function stripInternalReasoning(content: string): string {
const cleaned = content.replace(
/<internal_reasoning>[\s\S]*?<\/internal_reasoning>/gi,
"",
);
return cleaned.replace(/\n{3,}/g, "\n\n").trim();
}
function getDisplayContent(message: ChatMessageData, isUser: boolean): string {
if (message.type !== "message") return "";
if (isUser) return message.content;
return stripInternalReasoning(message.content);
}
export interface ChatMessageProps {
message: ChatMessageData;
messages?: ChatMessageData[];
index?: number;
isStreaming?: boolean;
className?: string;
onDismissLogin?: () => void;
onDismissCredentials?: () => void;
onSendMessage?: (content: string, isUserMessage?: boolean) => void;
agentOutput?: ChatMessageData;
isFinalMessage?: boolean;
}
export function ChatMessage({
message,
messages = [],
index = -1,
isStreaming = false,
className,
onDismissCredentials,
onSendMessage,
agentOutput,
isFinalMessage = true,
}: ChatMessageProps) {
const { user } = useSupabase();
const router = useRouter();
@@ -71,7 +54,14 @@ export function ChatMessage({
isLoginNeeded,
isCredentialsNeeded,
} = useChatMessage(message);
const displayContent = getDisplayContent(message, isUser);
const { data: profile } = useGetV2GetUserProfile({
query: {
select: (res) => (res.status === 200 ? res.data : null),
enabled: isUser && !!user,
queryKey: ["/api/store/profile", user?.id],
},
});
const handleAllCredentialsComplete = useCallback(
function handleAllCredentialsComplete() {
@@ -97,25 +87,17 @@ export function ChatMessage({
}
}
const handleCopy = useCallback(
async function handleCopy() {
if (message.type !== "message") return;
if (!displayContent) return;
const handleCopy = useCallback(async () => {
if (message.type !== "message") return;
try {
await navigator.clipboard.writeText(displayContent);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
} catch (error) {
console.error("Failed to copy:", error);
}
},
[displayContent, message],
);
function isLongResponse(content: string): boolean {
return content.split("\n").length > 5;
}
try {
await navigator.clipboard.writeText(message.content);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
} catch (error) {
console.error("Failed to copy:", error);
}
}, [message]);
const handleTryAgain = useCallback(() => {
if (message.type !== "message" || !onSendMessage) return;
@@ -187,45 +169,9 @@ export function ChatMessage({
// Render tool call messages
if (isToolCall && message.type === "tool_call") {
// Check if this tool call is currently streaming
// A tool call is streaming if:
// 1. isStreaming is true
// 2. This is the last tool_call message
// 3. There's no tool_response for this tool call yet
const isToolCallStreaming =
isStreaming &&
index >= 0 &&
(() => {
// Find the last tool_call index
let lastToolCallIndex = -1;
for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].type === "tool_call") {
lastToolCallIndex = i;
break;
}
}
// Check if this is the last tool_call and there's no response yet
if (index === lastToolCallIndex) {
// Check if there's a tool_response for this tool call
const hasResponse = messages
.slice(index + 1)
.some(
(msg) =>
msg.type === "tool_response" && msg.toolId === message.toolId,
);
return !hasResponse;
}
return false;
})();
return (
<div className={cn("px-4 py-2", className)}>
<ToolCallMessage
toolId={message.toolId}
toolName={message.toolName}
arguments={message.arguments}
isStreaming={isToolCallStreaming}
/>
<ToolCallMessage toolName={message.toolName} />
</div>
);
}
@@ -272,11 +218,27 @@ export function ChatMessage({
// Render tool response messages (but skip agent_output if it's being rendered inside assistant message)
if (isToolResponse && message.type === "tool_response") {
// Check if this is an agent_output that should be rendered inside assistant message
if (message.result) {
let parsedResult: Record<string, unknown> | null = null;
try {
parsedResult =
typeof message.result === "string"
? JSON.parse(message.result)
: (message.result as Record<string, unknown>);
} catch {
parsedResult = null;
}
if (parsedResult?.type === "agent_output") {
// Skip rendering - this will be rendered inside the assistant message
return null;
}
}
return (
<div className={cn("px-4 py-2", className)}>
<ToolResponseMessage
toolId={message.toolId}
toolName={message.toolName}
toolName={getToolActionPhrase(message.toolName)}
result={message.result}
/>
</div>
@@ -294,33 +256,40 @@ export function ChatMessage({
)}
>
<div className="flex w-full max-w-3xl gap-3">
{!isUser && (
<div className="flex-shrink-0">
<div className="flex h-7 w-7 items-center justify-center rounded-lg bg-indigo-500">
<RobotIcon className="h-4 w-4 text-indigo-50" />
</div>
</div>
)}
<div
className={cn(
"flex min-w-0 flex-1 flex-col",
isUser && "items-end",
)}
>
{isUser ? (
<UserChatBubble>
<MarkdownContent content={displayContent} />
</UserChatBubble>
) : (
<AIChatBubble>
<MarkdownContent content={displayContent} />
{agentOutput && agentOutput.type === "tool_response" && (
<MessageBubble variant={isUser ? "user" : "assistant"}>
<MarkdownContent content={message.content} />
{agentOutput &&
agentOutput.type === "tool_response" &&
!isUser && (
<div className="mt-4">
<ToolResponseMessage
toolId={agentOutput.toolId}
toolName={agentOutput.toolName || "Agent Output"}
toolName={
agentOutput.toolName
? getToolActionPhrase(agentOutput.toolName)
: "Agent Output"
}
result={agentOutput.result}
/>
</div>
)}
</AIChatBubble>
)}
</MessageBubble>
<div
className={cn(
"flex gap-0",
"mt-1 flex gap-1",
isUser ? "justify-end" : "justify-start",
)}
>
@@ -331,25 +300,37 @@ export function ChatMessage({
onClick={handleTryAgain}
aria-label="Try again"
>
<ArrowsClockwiseIcon className="size-4 text-zinc-600" />
</Button>
)}
{!isUser && isFinalMessage && isLongResponse(displayContent) && (
<Button
variant="ghost"
size="icon"
onClick={handleCopy}
aria-label="Copy message"
>
{copied ? (
<CheckIcon className="size-4 text-green-600" />
) : (
<CopyIcon className="size-4 text-zinc-600" />
)}
<ArrowClockwise className="size-3 text-neutral-500" />
</Button>
)}
<Button
variant="ghost"
size="icon"
onClick={handleCopy}
aria-label="Copy message"
>
{copied ? (
<CheckIcon className="size-3 text-green-600" />
) : (
<CopyIcon className="size-3 text-neutral-500" />
)}
</Button>
</div>
</div>
{isUser && (
<div className="flex-shrink-0">
<Avatar className="h-7 w-7">
<AvatarImage
src={profile?.avatar_url ?? ""}
alt={profile?.username ?? "User"}
/>
<AvatarFallback className="rounded-lg bg-neutral-200 text-neutral-600">
{profile?.username?.charAt(0)?.toUpperCase() || "U"}
</AvatarFallback>
</Avatar>
</div>
)}
</div>
</div>
);

View File

@@ -41,9 +41,7 @@ export type ChatMessageData =
credentials: Array<{
provider: string;
providerName: string;
credentialTypes: Array<
"api_key" | "oauth2" | "user_password" | "host_scoped"
>;
credentialType: "api_key" | "oauth2" | "user_password" | "host_scoped";
title: string;
scopes?: string[];
}>;

Some files were not shown because too many files have changed in this diff Show More