mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-16 09:46:07 -05:00
Compare commits
5 Commits
chore/remo
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d4dcbd9e0 | ||
|
|
074be7aea6 | ||
|
|
39d28b24fc | ||
|
|
bf79a7748a | ||
|
|
649d4ab7f5 |
9
.github/workflows/platform-backend-ci.yml
vendored
9
.github/workflows/platform-backend-ci.yml
vendored
@@ -41,13 +41,18 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- 6379:6379
|
- 6379:6379
|
||||||
rabbitmq:
|
rabbitmq:
|
||||||
image: rabbitmq:3.12-management
|
image: rabbitmq:4.1.4
|
||||||
ports:
|
ports:
|
||||||
- 5672:5672
|
- 5672:5672
|
||||||
- 15672:15672
|
|
||||||
env:
|
env:
|
||||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||||
|
options: >-
|
||||||
|
--health-cmd "rabbitmq-diagnostics -q ping"
|
||||||
|
--health-interval 30s
|
||||||
|
--health-timeout 10s
|
||||||
|
--health-retries 5
|
||||||
|
--health-start-period 10s
|
||||||
clamav:
|
clamav:
|
||||||
image: clamav/clamav-debian:latest
|
image: clamav/clamav-debian:latest
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
6
.github/workflows/platform-frontend-ci.yml
vendored
6
.github/workflows/platform-frontend-ci.yml
vendored
@@ -6,10 +6,16 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- ".github/workflows/platform-frontend-ci.yml"
|
- ".github/workflows/platform-frontend-ci.yml"
|
||||||
- "autogpt_platform/frontend/**"
|
- "autogpt_platform/frontend/**"
|
||||||
|
- "autogpt_platform/backend/Dockerfile"
|
||||||
|
- "autogpt_platform/docker-compose.yml"
|
||||||
|
- "autogpt_platform/docker-compose.platform.yml"
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/platform-frontend-ci.yml"
|
- ".github/workflows/platform-frontend-ci.yml"
|
||||||
- "autogpt_platform/frontend/**"
|
- "autogpt_platform/frontend/**"
|
||||||
|
- "autogpt_platform/backend/Dockerfile"
|
||||||
|
- "autogpt_platform/docker-compose.yml"
|
||||||
|
- "autogpt_platform/docker-compose.platform.yml"
|
||||||
merge_group:
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
|||||||
@@ -53,63 +53,6 @@ COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/parti
|
|||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
RUN poetry run prisma generate && poetry run gen-prisma-stub
|
||||||
|
|
||||||
# ============================== BACKEND SERVER ============================== #
|
|
||||||
|
|
||||||
FROM debian:13-slim AS server
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
ENV POETRY_HOME=/opt/poetry \
|
|
||||||
POETRY_NO_INTERACTION=1 \
|
|
||||||
POETRY_VIRTUALENVS_CREATE=true \
|
|
||||||
POETRY_VIRTUALENVS_IN_PROJECT=true \
|
|
||||||
DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV PATH=/opt/poetry/bin:$PATH
|
|
||||||
|
|
||||||
# Install Python, FFmpeg, ImageMagick, and CLI tools for agent use.
|
|
||||||
# bubblewrap provides OS-level sandbox (whitelist-only FS + no network)
|
|
||||||
# for the bash_exec MCP tool.
|
|
||||||
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
python3.13 \
|
|
||||||
python3-pip \
|
|
||||||
ffmpeg \
|
|
||||||
imagemagick \
|
|
||||||
jq \
|
|
||||||
ripgrep \
|
|
||||||
tree \
|
|
||||||
bubblewrap \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
|
||||||
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
|
||||||
# Copy Node.js installation for Prisma
|
|
||||||
COPY --from=builder /usr/bin/node /usr/bin/node
|
|
||||||
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
|
||||||
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
|
||||||
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
|
||||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
|
||||||
|
|
||||||
WORKDIR /app/autogpt_platform/backend
|
|
||||||
|
|
||||||
# Copy only the .venv from builder (not the entire /app directory)
|
|
||||||
# The .venv includes the generated Prisma client
|
|
||||||
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
|
||||||
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
|
||||||
|
|
||||||
# Copy dependency files + autogpt_libs (path dependency)
|
|
||||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
|
||||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
|
||||||
|
|
||||||
# Copy backend code + docs (for Copilot docs search)
|
|
||||||
COPY autogpt_platform/backend ./
|
|
||||||
COPY docs /app/docs
|
|
||||||
RUN poetry install --no-ansi --only-root
|
|
||||||
|
|
||||||
ENV PORT=8000
|
|
||||||
|
|
||||||
CMD ["poetry", "run", "rest"]
|
|
||||||
|
|
||||||
# =============================== DB MIGRATOR =============================== #
|
# =============================== DB MIGRATOR =============================== #
|
||||||
|
|
||||||
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
# Lightweight migrate stage - only needs Prisma CLI, not full Python environment
|
||||||
@@ -141,3 +84,59 @@ COPY autogpt_platform/backend/schema.prisma ./
|
|||||||
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
COPY autogpt_platform/backend/backend/data/partial_types.py ./backend/data/partial_types.py
|
||||||
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
COPY autogpt_platform/backend/gen_prisma_types_stub.py ./
|
||||||
COPY autogpt_platform/backend/migrations ./migrations
|
COPY autogpt_platform/backend/migrations ./migrations
|
||||||
|
|
||||||
|
# ============================== BACKEND SERVER ============================== #
|
||||||
|
|
||||||
|
FROM debian:13-slim AS server
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install Python, FFmpeg, ImageMagick, and CLI tools for agent use.
|
||||||
|
# bubblewrap provides OS-level sandbox (whitelist-only FS + no network)
|
||||||
|
# for the bash_exec MCP tool.
|
||||||
|
# Using --no-install-recommends saves ~650MB by skipping unnecessary deps like llvm, mesa, etc.
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
python3.13 \
|
||||||
|
python3-pip \
|
||||||
|
ffmpeg \
|
||||||
|
imagemagick \
|
||||||
|
jq \
|
||||||
|
ripgrep \
|
||||||
|
tree \
|
||||||
|
bubblewrap \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy poetry (build-time only, for `poetry install --only-root` to create entry points)
|
||||||
|
COPY --from=builder /usr/local/lib/python3* /usr/local/lib/python3*
|
||||||
|
COPY --from=builder /usr/local/bin/poetry /usr/local/bin/poetry
|
||||||
|
# Copy Node.js installation for Prisma
|
||||||
|
COPY --from=builder /usr/bin/node /usr/bin/node
|
||||||
|
COPY --from=builder /usr/lib/node_modules /usr/lib/node_modules
|
||||||
|
COPY --from=builder /usr/bin/npm /usr/bin/npm
|
||||||
|
COPY --from=builder /usr/bin/npx /usr/bin/npx
|
||||||
|
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||||
|
|
||||||
|
WORKDIR /app/autogpt_platform/backend
|
||||||
|
|
||||||
|
# Copy only the .venv from builder (not the entire /app directory)
|
||||||
|
# The .venv includes the generated Prisma client
|
||||||
|
COPY --from=builder /app/autogpt_platform/backend/.venv ./.venv
|
||||||
|
ENV PATH="/app/autogpt_platform/backend/.venv/bin:$PATH"
|
||||||
|
|
||||||
|
# Copy dependency files + autogpt_libs (path dependency)
|
||||||
|
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||||
|
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml ./
|
||||||
|
|
||||||
|
# Copy backend code + docs (for Copilot docs search)
|
||||||
|
COPY autogpt_platform/backend ./
|
||||||
|
COPY docs /app/docs
|
||||||
|
# Install the project package to create entry point scripts in .venv/bin/
|
||||||
|
# (e.g., rest, executor, ws, db, scheduler, notification - see [tool.poetry.scripts])
|
||||||
|
RUN POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true \
|
||||||
|
poetry install --no-ansi --only-root
|
||||||
|
|
||||||
|
ENV PORT=8000
|
||||||
|
|
||||||
|
CMD ["rest"]
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ from .model import (
|
|||||||
ChatSession,
|
ChatSession,
|
||||||
append_and_save_message,
|
append_and_save_message,
|
||||||
create_chat_session,
|
create_chat_session,
|
||||||
|
delete_chat_session,
|
||||||
get_chat_session,
|
get_chat_session,
|
||||||
get_user_sessions,
|
get_user_sessions,
|
||||||
)
|
)
|
||||||
@@ -211,6 +212,43 @@ async def create_session(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete(
|
||||||
|
"/sessions/{session_id}",
|
||||||
|
dependencies=[Security(auth.requires_user)],
|
||||||
|
status_code=204,
|
||||||
|
responses={404: {"description": "Session not found or access denied"}},
|
||||||
|
)
|
||||||
|
async def delete_session(
|
||||||
|
session_id: str,
|
||||||
|
user_id: Annotated[str, Security(auth.get_user_id)],
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Delete a chat session.
|
||||||
|
|
||||||
|
Permanently removes a chat session and all its messages.
|
||||||
|
Only the owner can delete their sessions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_id: The session ID to delete.
|
||||||
|
user_id: The authenticated user's ID.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
204 No Content on success.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
HTTPException: 404 if session not found or not owned by user.
|
||||||
|
"""
|
||||||
|
deleted = await delete_chat_session(session_id, user_id)
|
||||||
|
|
||||||
|
if not deleted:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=404,
|
||||||
|
detail=f"Session {session_id} not found or access denied",
|
||||||
|
)
|
||||||
|
|
||||||
|
return Response(status_code=204)
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
"/sessions/{session_id}",
|
"/sessions/{session_id}",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -106,6 +106,8 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
|||||||
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
GPT41_MINI = "gpt-4.1-mini-2025-04-14"
|
||||||
GPT4O_MINI = "gpt-4o-mini"
|
GPT4O_MINI = "gpt-4o-mini"
|
||||||
GPT4O = "gpt-4o"
|
GPT4O = "gpt-4o"
|
||||||
|
GPT4_TURBO = "gpt-4-turbo"
|
||||||
|
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||||
# Anthropic models
|
# Anthropic models
|
||||||
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
CLAUDE_4_1_OPUS = "claude-opus-4-1-20250805"
|
||||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||||
@@ -253,6 +255,12 @@ MODEL_METADATA = {
|
|||||||
LlmModel.GPT4O: ModelMetadata(
|
LlmModel.GPT4O: ModelMetadata(
|
||||||
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
"openai", 128000, 16384, "GPT-4o", "OpenAI", "OpenAI", 2
|
||||||
), # gpt-4o-2024-08-06
|
), # gpt-4o-2024-08-06
|
||||||
|
LlmModel.GPT4_TURBO: ModelMetadata(
|
||||||
|
"openai", 128000, 4096, "GPT-4 Turbo", "OpenAI", "OpenAI", 3
|
||||||
|
), # gpt-4-turbo-2024-04-09
|
||||||
|
LlmModel.GPT3_5_TURBO: ModelMetadata(
|
||||||
|
"openai", 16385, 4096, "GPT-3.5 Turbo", "OpenAI", "OpenAI", 1
|
||||||
|
), # gpt-3.5-turbo-0125
|
||||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||||
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
LlmModel.CLAUDE_4_1_OPUS: ModelMetadata(
|
||||||
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
"anthropic", 200000, 32000, "Claude Opus 4.1", "Anthropic", "Anthropic", 3
|
||||||
|
|||||||
@@ -75,6 +75,8 @@ MODEL_COST: dict[LlmModel, int] = {
|
|||||||
LlmModel.GPT41_MINI: 1,
|
LlmModel.GPT41_MINI: 1,
|
||||||
LlmModel.GPT4O_MINI: 1,
|
LlmModel.GPT4O_MINI: 1,
|
||||||
LlmModel.GPT4O: 3,
|
LlmModel.GPT4O: 3,
|
||||||
|
LlmModel.GPT4_TURBO: 10,
|
||||||
|
LlmModel.GPT3_5_TURBO: 1,
|
||||||
LlmModel.CLAUDE_4_1_OPUS: 21,
|
LlmModel.CLAUDE_4_1_OPUS: 21,
|
||||||
LlmModel.CLAUDE_4_OPUS: 21,
|
LlmModel.CLAUDE_4_OPUS: 21,
|
||||||
LlmModel.CLAUDE_4_SONNET: 5,
|
LlmModel.CLAUDE_4_SONNET: 5,
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
|||||||
node_exec_id="test_node_exec",
|
node_exec_id="test_node_exec",
|
||||||
block_id=AITextGeneratorBlock().id,
|
block_id=AITextGeneratorBlock().id,
|
||||||
inputs={
|
inputs={
|
||||||
"model": "gpt-4o",
|
"model": "gpt-4-turbo",
|
||||||
"credentials": {
|
"credentials": {
|
||||||
"id": openai_credentials.id,
|
"id": openai_credentials.id,
|
||||||
"provider": openai_credentials.provider,
|
"provider": openai_credentials.provider,
|
||||||
@@ -100,7 +100,7 @@ async def test_block_credit_usage(server: SpinTestServer):
|
|||||||
graph_exec_id="test_graph_exec",
|
graph_exec_id="test_graph_exec",
|
||||||
node_exec_id="test_node_exec",
|
node_exec_id="test_node_exec",
|
||||||
block_id=AITextGeneratorBlock().id,
|
block_id=AITextGeneratorBlock().id,
|
||||||
inputs={"model": "gpt-4o", "api_key": "owned_api_key"},
|
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
|
||||||
execution_context=ExecutionContext(user_timezone="UTC"),
|
execution_context=ExecutionContext(user_timezone="UTC"),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ services:
|
|||||||
|
|
||||||
rabbitmq:
|
rabbitmq:
|
||||||
<<: *agpt-services
|
<<: *agpt-services
|
||||||
image: rabbitmq:management
|
image: rabbitmq:4.1.4
|
||||||
container_name: rabbitmq
|
container_name: rabbitmq
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: rabbitmq-diagnostics -q ping
|
test: rabbitmq-diagnostics -q ping
|
||||||
@@ -66,7 +66,6 @@ services:
|
|||||||
- RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
- RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||||
ports:
|
ports:
|
||||||
- "5672:5672"
|
- "5672:5672"
|
||||||
- "15672:15672"
|
|
||||||
clamav:
|
clamav:
|
||||||
image: clamav/clamav-debian:latest
|
image: clamav/clamav-debian:latest
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
-- Migrate deprecated OpenAI GPT-4-turbo and GPT-3.5-turbo models
|
|
||||||
-- This updates all AgentNode blocks that use deprecated models
|
|
||||||
-- OpenAI is retiring these models:
|
|
||||||
-- - gpt-4-turbo: March 26, 2026 -> migrate to gpt-4o
|
|
||||||
-- - gpt-3.5-turbo: September 28, 2026 -> migrate to gpt-4o-mini
|
|
||||||
|
|
||||||
-- Update gpt-4-turbo to gpt-4o (staying in same capability tier)
|
|
||||||
UPDATE "AgentNode"
|
|
||||||
SET "constantInput" = JSONB_SET(
|
|
||||||
"constantInput"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
'"gpt-4o"'::jsonb
|
|
||||||
)
|
|
||||||
WHERE "constantInput"::jsonb->>'model' = 'gpt-4-turbo';
|
|
||||||
|
|
||||||
-- Update gpt-3.5-turbo to gpt-4o-mini (appropriate replacement for lightweight model)
|
|
||||||
UPDATE "AgentNode"
|
|
||||||
SET "constantInput" = JSONB_SET(
|
|
||||||
"constantInput"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
'"gpt-4o-mini"'::jsonb
|
|
||||||
)
|
|
||||||
WHERE "constantInput"::jsonb->>'model' = 'gpt-3.5-turbo';
|
|
||||||
|
|
||||||
-- Update AgentPreset input overrides (stored in AgentNodeExecutionInputOutput)
|
|
||||||
UPDATE "AgentNodeExecutionInputOutput"
|
|
||||||
SET "data" = JSONB_SET(
|
|
||||||
"data"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
'"gpt-4o"'::jsonb
|
|
||||||
)
|
|
||||||
WHERE "agentPresetId" IS NOT NULL
|
|
||||||
AND "data"::jsonb->>'model' = 'gpt-4-turbo';
|
|
||||||
|
|
||||||
UPDATE "AgentNodeExecutionInputOutput"
|
|
||||||
SET "data" = JSONB_SET(
|
|
||||||
"data"::jsonb,
|
|
||||||
'{model}',
|
|
||||||
'"gpt-4o-mini"'::jsonb
|
|
||||||
)
|
|
||||||
WHERE "agentPresetId" IS NOT NULL
|
|
||||||
AND "data"::jsonb->>'model' = 'gpt-3.5-turbo';
|
|
||||||
@@ -75,7 +75,7 @@ services:
|
|||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 5
|
retries: 5
|
||||||
rabbitmq:
|
rabbitmq:
|
||||||
image: rabbitmq:management
|
image: rabbitmq:4.1.4
|
||||||
container_name: rabbitmq
|
container_name: rabbitmq
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: rabbitmq-diagnostics -q ping
|
test: rabbitmq-diagnostics -q ping
|
||||||
@@ -88,14 +88,13 @@ services:
|
|||||||
<<: *backend-env
|
<<: *backend-env
|
||||||
ports:
|
ports:
|
||||||
- "5672:5672"
|
- "5672:5672"
|
||||||
- "15672:15672"
|
|
||||||
|
|
||||||
rest_server:
|
rest_server:
|
||||||
build:
|
build:
|
||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: server
|
target: server
|
||||||
command: ["python", "-m", "backend.rest"]
|
command: ["rest"] # points to entry in [tool.poetry.scripts] in pyproject.toml
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -128,7 +127,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: server
|
target: server
|
||||||
command: ["python", "-m", "backend.exec"]
|
command: ["executor"] # points to entry in [tool.poetry.scripts] in pyproject.toml
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -163,7 +162,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: server
|
target: server
|
||||||
command: ["python", "-m", "backend.ws"]
|
command: ["ws"] # points to entry in [tool.poetry.scripts] in pyproject.toml
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -196,7 +195,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: server
|
target: server
|
||||||
command: ["python", "-m", "backend.db"]
|
command: ["db"] # points to entry in [tool.poetry.scripts] in pyproject.toml
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -225,7 +224,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: server
|
target: server
|
||||||
command: ["python", "-m", "backend.scheduler"]
|
command: ["scheduler"] # points to entry in [tool.poetry.scripts] in pyproject.toml
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
@@ -273,7 +272,7 @@ services:
|
|||||||
context: ../
|
context: ../
|
||||||
dockerfile: autogpt_platform/backend/Dockerfile
|
dockerfile: autogpt_platform/backend/Dockerfile
|
||||||
target: server
|
target: server
|
||||||
command: ["python", "-m", "backend.notification"]
|
command: ["notification"] # points to entry in [tool.poetry.scripts] in pyproject.toml
|
||||||
develop:
|
develop:
|
||||||
watch:
|
watch:
|
||||||
- path: ./
|
- path: ./
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { SidebarProvider } from "@/components/ui/sidebar";
|
import { SidebarProvider } from "@/components/ui/sidebar";
|
||||||
|
// TODO: Replace with modern Dialog component when available
|
||||||
|
import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
|
||||||
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
|
||||||
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
|
||||||
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
|
||||||
@@ -31,6 +33,12 @@ export function CopilotPage() {
|
|||||||
handleDrawerOpenChange,
|
handleDrawerOpenChange,
|
||||||
handleSelectSession,
|
handleSelectSession,
|
||||||
handleNewChat,
|
handleNewChat,
|
||||||
|
// Delete functionality
|
||||||
|
sessionToDelete,
|
||||||
|
isDeleting,
|
||||||
|
handleDeleteClick,
|
||||||
|
handleConfirmDelete,
|
||||||
|
handleCancelDelete,
|
||||||
} = useCopilotPage();
|
} = useCopilotPage();
|
||||||
|
|
||||||
if (isUserLoading || !isLoggedIn) {
|
if (isUserLoading || !isLoggedIn) {
|
||||||
@@ -48,7 +56,19 @@ export function CopilotPage() {
|
|||||||
>
|
>
|
||||||
{!isMobile && <ChatSidebar />}
|
{!isMobile && <ChatSidebar />}
|
||||||
<div className="relative flex h-full w-full flex-col overflow-hidden bg-[#f8f8f9] px-0">
|
<div className="relative flex h-full w-full flex-col overflow-hidden bg-[#f8f8f9] px-0">
|
||||||
{isMobile && <MobileHeader onOpenDrawer={handleOpenDrawer} />}
|
{isMobile && (
|
||||||
|
<MobileHeader
|
||||||
|
onOpenDrawer={handleOpenDrawer}
|
||||||
|
showDelete={!!sessionId}
|
||||||
|
isDeleting={isDeleting}
|
||||||
|
onDelete={() => {
|
||||||
|
const session = sessions.find((s) => s.id === sessionId);
|
||||||
|
if (session) {
|
||||||
|
handleDeleteClick(session.id, session.title);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
<div className="flex-1 overflow-hidden">
|
<div className="flex-1 overflow-hidden">
|
||||||
<ChatContainer
|
<ChatContainer
|
||||||
messages={messages}
|
messages={messages}
|
||||||
@@ -75,6 +95,16 @@ export function CopilotPage() {
|
|||||||
onOpenChange={handleDrawerOpenChange}
|
onOpenChange={handleDrawerOpenChange}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
{/* Delete confirmation dialog - rendered at top level for proper z-index on mobile */}
|
||||||
|
{isMobile && (
|
||||||
|
<DeleteConfirmDialog
|
||||||
|
entityType="chat"
|
||||||
|
entityName={sessionToDelete?.title || "Untitled chat"}
|
||||||
|
open={!!sessionToDelete}
|
||||||
|
onOpenChange={(open) => !open && handleCancelDelete()}
|
||||||
|
onDoDelete={handleConfirmDelete}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</SidebarProvider>
|
</SidebarProvider>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,15 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
|
import {
|
||||||
|
getGetV2ListSessionsQueryKey,
|
||||||
|
useDeleteV2DeleteSession,
|
||||||
|
useGetV2ListSessions,
|
||||||
|
} from "@/app/api/__generated__/endpoints/chat/chat";
|
||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
|
||||||
import { Text } from "@/components/atoms/Text/Text";
|
import { Text } from "@/components/atoms/Text/Text";
|
||||||
|
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||||
|
// TODO: Replace with modern Dialog component when available
|
||||||
|
import DeleteConfirmDialog from "@/components/__legacy__/delete-confirm-dialog";
|
||||||
import {
|
import {
|
||||||
Sidebar,
|
Sidebar,
|
||||||
SidebarContent,
|
SidebarContent,
|
||||||
@@ -12,18 +19,52 @@ import {
|
|||||||
useSidebar,
|
useSidebar,
|
||||||
} from "@/components/ui/sidebar";
|
} from "@/components/ui/sidebar";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { PlusCircleIcon, PlusIcon } from "@phosphor-icons/react";
|
import { PlusCircleIcon, PlusIcon, TrashIcon } from "@phosphor-icons/react";
|
||||||
|
import { useQueryClient } from "@tanstack/react-query";
|
||||||
import { motion } from "framer-motion";
|
import { motion } from "framer-motion";
|
||||||
|
import { useState } from "react";
|
||||||
import { parseAsString, useQueryState } from "nuqs";
|
import { parseAsString, useQueryState } from "nuqs";
|
||||||
|
|
||||||
export function ChatSidebar() {
|
export function ChatSidebar() {
|
||||||
const { state } = useSidebar();
|
const { state } = useSidebar();
|
||||||
const isCollapsed = state === "collapsed";
|
const isCollapsed = state === "collapsed";
|
||||||
const [sessionId, setSessionId] = useQueryState("sessionId", parseAsString);
|
const [sessionId, setSessionId] = useQueryState("sessionId", parseAsString);
|
||||||
|
const [sessionToDelete, setSessionToDelete] = useState<{
|
||||||
|
id: string;
|
||||||
|
title: string | null | undefined;
|
||||||
|
} | null>(null);
|
||||||
|
|
||||||
|
const queryClient = useQueryClient();
|
||||||
|
|
||||||
const { data: sessionsResponse, isLoading: isLoadingSessions } =
|
const { data: sessionsResponse, isLoading: isLoadingSessions } =
|
||||||
useGetV2ListSessions({ limit: 50 });
|
useGetV2ListSessions({ limit: 50 });
|
||||||
|
|
||||||
|
const { mutate: deleteSession, isPending: isDeleting } =
|
||||||
|
useDeleteV2DeleteSession({
|
||||||
|
mutation: {
|
||||||
|
onSuccess: () => {
|
||||||
|
// Invalidate sessions list to refetch
|
||||||
|
queryClient.invalidateQueries({
|
||||||
|
queryKey: getGetV2ListSessionsQueryKey(),
|
||||||
|
});
|
||||||
|
// If we deleted the current session, clear selection
|
||||||
|
if (sessionToDelete?.id === sessionId) {
|
||||||
|
setSessionId(null);
|
||||||
|
}
|
||||||
|
setSessionToDelete(null);
|
||||||
|
},
|
||||||
|
onError: (error) => {
|
||||||
|
toast({
|
||||||
|
title: "Failed to delete chat",
|
||||||
|
description:
|
||||||
|
error instanceof Error ? error.message : "An error occurred",
|
||||||
|
variant: "destructive",
|
||||||
|
});
|
||||||
|
setSessionToDelete(null);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
const sessions =
|
const sessions =
|
||||||
sessionsResponse?.status === 200 ? sessionsResponse.data.sessions : [];
|
sessionsResponse?.status === 200 ? sessionsResponse.data.sessions : [];
|
||||||
|
|
||||||
@@ -35,6 +76,22 @@ export function ChatSidebar() {
|
|||||||
setSessionId(id);
|
setSessionId(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function handleDeleteClick(
|
||||||
|
e: React.MouseEvent,
|
||||||
|
id: string,
|
||||||
|
title: string | null | undefined,
|
||||||
|
) {
|
||||||
|
e.stopPropagation(); // Prevent session selection
|
||||||
|
if (isDeleting) return; // Prevent double-click during deletion
|
||||||
|
setSessionToDelete({ id, title });
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleConfirmDelete() {
|
||||||
|
if (sessionToDelete) {
|
||||||
|
deleteSession({ sessionId: sessionToDelete.id });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function formatDate(dateString: string) {
|
function formatDate(dateString: string) {
|
||||||
const date = new Date(dateString);
|
const date = new Date(dateString);
|
||||||
const now = new Date();
|
const now = new Date();
|
||||||
@@ -61,128 +118,152 @@ export function ChatSidebar() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Sidebar
|
<>
|
||||||
variant="inset"
|
<Sidebar
|
||||||
collapsible="icon"
|
variant="inset"
|
||||||
className="!top-[50px] !h-[calc(100vh-50px)] border-r border-zinc-100 px-0"
|
collapsible="icon"
|
||||||
>
|
className="!top-[50px] !h-[calc(100vh-50px)] border-r border-zinc-100 px-0"
|
||||||
{isCollapsed && (
|
>
|
||||||
<SidebarHeader
|
{isCollapsed && (
|
||||||
className={cn(
|
<SidebarHeader
|
||||||
"flex",
|
className={cn(
|
||||||
isCollapsed
|
"flex",
|
||||||
? "flex-row items-center justify-between gap-y-4 md:flex-col md:items-start md:justify-start"
|
isCollapsed
|
||||||
: "flex-row items-center justify-between",
|
? "flex-row items-center justify-between gap-y-4 md:flex-col md:items-start md:justify-start"
|
||||||
)}
|
: "flex-row items-center justify-between",
|
||||||
>
|
|
||||||
<motion.div
|
|
||||||
key={isCollapsed ? "header-collapsed" : "header-expanded"}
|
|
||||||
className="flex flex-col items-center gap-3 pt-4"
|
|
||||||
initial={{ opacity: 0, filter: "blur(3px)" }}
|
|
||||||
animate={{ opacity: 1, filter: "blur(0px)" }}
|
|
||||||
transition={{ type: "spring", bounce: 0.2 }}
|
|
||||||
>
|
|
||||||
<div className="flex flex-col items-center gap-2">
|
|
||||||
<SidebarTrigger />
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
onClick={handleNewChat}
|
|
||||||
style={{ minWidth: "auto", width: "auto" }}
|
|
||||||
>
|
|
||||||
<PlusCircleIcon className="!size-5" />
|
|
||||||
<span className="sr-only">New Chat</span>
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</motion.div>
|
|
||||||
</SidebarHeader>
|
|
||||||
)}
|
|
||||||
<SidebarContent className="gap-4 overflow-y-auto px-4 py-4 [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
|
||||||
{!isCollapsed && (
|
|
||||||
<motion.div
|
|
||||||
initial={{ opacity: 0 }}
|
|
||||||
animate={{ opacity: 1 }}
|
|
||||||
transition={{ duration: 0.2, delay: 0.1 }}
|
|
||||||
className="flex items-center justify-between px-3"
|
|
||||||
>
|
|
||||||
<Text variant="h3" size="body-medium">
|
|
||||||
Your chats
|
|
||||||
</Text>
|
|
||||||
<div className="relative left-6">
|
|
||||||
<SidebarTrigger />
|
|
||||||
</div>
|
|
||||||
</motion.div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{!isCollapsed && (
|
|
||||||
<motion.div
|
|
||||||
initial={{ opacity: 0 }}
|
|
||||||
animate={{ opacity: 1 }}
|
|
||||||
transition={{ duration: 0.2, delay: 0.15 }}
|
|
||||||
className="mt-4 flex flex-col gap-1"
|
|
||||||
>
|
|
||||||
{isLoadingSessions ? (
|
|
||||||
<div className="flex min-h-[30rem] items-center justify-center py-4">
|
|
||||||
<LoadingSpinner size="small" className="text-neutral-600" />
|
|
||||||
</div>
|
|
||||||
) : sessions.length === 0 ? (
|
|
||||||
<p className="py-4 text-center text-sm text-neutral-500">
|
|
||||||
No conversations yet
|
|
||||||
</p>
|
|
||||||
) : (
|
|
||||||
sessions.map((session) => (
|
|
||||||
<button
|
|
||||||
key={session.id}
|
|
||||||
onClick={() => handleSelectSession(session.id)}
|
|
||||||
className={cn(
|
|
||||||
"w-full rounded-lg px-3 py-2.5 text-left transition-colors",
|
|
||||||
session.id === sessionId
|
|
||||||
? "bg-zinc-100"
|
|
||||||
: "hover:bg-zinc-50",
|
|
||||||
)}
|
|
||||||
>
|
|
||||||
<div className="flex min-w-0 max-w-full flex-col overflow-hidden">
|
|
||||||
<div className="min-w-0 max-w-full">
|
|
||||||
<Text
|
|
||||||
variant="body"
|
|
||||||
className={cn(
|
|
||||||
"truncate font-normal",
|
|
||||||
session.id === sessionId
|
|
||||||
? "text-zinc-600"
|
|
||||||
: "text-zinc-800",
|
|
||||||
)}
|
|
||||||
>
|
|
||||||
{session.title || `Untitled chat`}
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
<Text variant="small" className="text-neutral-400">
|
|
||||||
{formatDate(session.updated_at)}
|
|
||||||
</Text>
|
|
||||||
</div>
|
|
||||||
</button>
|
|
||||||
))
|
|
||||||
)}
|
)}
|
||||||
</motion.div>
|
|
||||||
)}
|
|
||||||
</SidebarContent>
|
|
||||||
{!isCollapsed && sessionId && (
|
|
||||||
<SidebarFooter className="shrink-0 bg-zinc-50 p-3 pb-1 shadow-[0_-4px_6px_-1px_rgba(0,0,0,0.05)]">
|
|
||||||
<motion.div
|
|
||||||
initial={{ opacity: 0 }}
|
|
||||||
animate={{ opacity: 1 }}
|
|
||||||
transition={{ duration: 0.2, delay: 0.2 }}
|
|
||||||
>
|
>
|
||||||
<Button
|
<motion.div
|
||||||
variant="primary"
|
key={isCollapsed ? "header-collapsed" : "header-expanded"}
|
||||||
size="small"
|
className="flex flex-col items-center gap-3 pt-4"
|
||||||
onClick={handleNewChat}
|
initial={{ opacity: 0, filter: "blur(3px)" }}
|
||||||
className="w-full"
|
animate={{ opacity: 1, filter: "blur(0px)" }}
|
||||||
leftIcon={<PlusIcon className="h-4 w-4" weight="bold" />}
|
transition={{ type: "spring", bounce: 0.2 }}
|
||||||
>
|
>
|
||||||
New Chat
|
<div className="flex flex-col items-center gap-2">
|
||||||
</Button>
|
<SidebarTrigger />
|
||||||
</motion.div>
|
<Button
|
||||||
</SidebarFooter>
|
variant="ghost"
|
||||||
)}
|
onClick={handleNewChat}
|
||||||
</Sidebar>
|
style={{ minWidth: "auto", width: "auto" }}
|
||||||
|
>
|
||||||
|
<PlusCircleIcon className="!size-5" />
|
||||||
|
<span className="sr-only">New Chat</span>
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</motion.div>
|
||||||
|
</SidebarHeader>
|
||||||
|
)}
|
||||||
|
<SidebarContent className="gap-4 overflow-y-auto px-4 py-4 [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
|
||||||
|
{!isCollapsed && (
|
||||||
|
<motion.div
|
||||||
|
initial={{ opacity: 0 }}
|
||||||
|
animate={{ opacity: 1 }}
|
||||||
|
transition={{ duration: 0.2, delay: 0.1 }}
|
||||||
|
className="flex items-center justify-between px-3"
|
||||||
|
>
|
||||||
|
<Text variant="h3" size="body-medium">
|
||||||
|
Your chats
|
||||||
|
</Text>
|
||||||
|
<div className="relative left-6">
|
||||||
|
<SidebarTrigger />
|
||||||
|
</div>
|
||||||
|
</motion.div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{!isCollapsed && (
|
||||||
|
<motion.div
|
||||||
|
initial={{ opacity: 0 }}
|
||||||
|
animate={{ opacity: 1 }}
|
||||||
|
transition={{ duration: 0.2, delay: 0.15 }}
|
||||||
|
className="mt-4 flex flex-col gap-1"
|
||||||
|
>
|
||||||
|
{isLoadingSessions ? (
|
||||||
|
<div className="flex min-h-[30rem] items-center justify-center py-4">
|
||||||
|
<LoadingSpinner size="small" className="text-neutral-600" />
|
||||||
|
</div>
|
||||||
|
) : sessions.length === 0 ? (
|
||||||
|
<p className="py-4 text-center text-sm text-neutral-500">
|
||||||
|
No conversations yet
|
||||||
|
</p>
|
||||||
|
) : (
|
||||||
|
sessions.map((session) => (
|
||||||
|
<div
|
||||||
|
key={session.id}
|
||||||
|
className={cn(
|
||||||
|
"group relative w-full rounded-lg transition-colors",
|
||||||
|
session.id === sessionId
|
||||||
|
? "bg-zinc-100"
|
||||||
|
: "hover:bg-zinc-50",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<button
|
||||||
|
onClick={() => handleSelectSession(session.id)}
|
||||||
|
className="w-full px-3 py-2.5 pr-10 text-left"
|
||||||
|
>
|
||||||
|
<div className="flex min-w-0 max-w-full flex-col overflow-hidden">
|
||||||
|
<div className="min-w-0 max-w-full">
|
||||||
|
<Text
|
||||||
|
variant="body"
|
||||||
|
className={cn(
|
||||||
|
"truncate font-normal",
|
||||||
|
session.id === sessionId
|
||||||
|
? "text-zinc-600"
|
||||||
|
: "text-zinc-800",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
{session.title || `Untitled chat`}
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
<Text variant="small" className="text-neutral-400">
|
||||||
|
{formatDate(session.updated_at)}
|
||||||
|
</Text>
|
||||||
|
</div>
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
onClick={(e) =>
|
||||||
|
handleDeleteClick(e, session.id, session.title)
|
||||||
|
}
|
||||||
|
disabled={isDeleting}
|
||||||
|
className="absolute right-2 top-1/2 -translate-y-1/2 rounded p-1.5 text-zinc-400 opacity-0 transition-all group-hover:opacity-100 hover:bg-red-100 hover:text-red-600 focus-visible:opacity-100 disabled:cursor-not-allowed disabled:opacity-50"
|
||||||
|
aria-label="Delete chat"
|
||||||
|
>
|
||||||
|
<TrashIcon className="h-4 w-4" />
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
))
|
||||||
|
)}
|
||||||
|
</motion.div>
|
||||||
|
)}
|
||||||
|
</SidebarContent>
|
||||||
|
{!isCollapsed && sessionId && (
|
||||||
|
<SidebarFooter className="shrink-0 bg-zinc-50 p-3 pb-1 shadow-[0_-4px_6px_-1px_rgba(0,0,0,0.05)]">
|
||||||
|
<motion.div
|
||||||
|
initial={{ opacity: 0 }}
|
||||||
|
animate={{ opacity: 1 }}
|
||||||
|
transition={{ duration: 0.2, delay: 0.2 }}
|
||||||
|
>
|
||||||
|
<Button
|
||||||
|
variant="primary"
|
||||||
|
size="small"
|
||||||
|
onClick={handleNewChat}
|
||||||
|
className="w-full"
|
||||||
|
leftIcon={<PlusIcon className="h-4 w-4" weight="bold" />}
|
||||||
|
>
|
||||||
|
New Chat
|
||||||
|
</Button>
|
||||||
|
</motion.div>
|
||||||
|
</SidebarFooter>
|
||||||
|
)}
|
||||||
|
</Sidebar>
|
||||||
|
|
||||||
|
<DeleteConfirmDialog
|
||||||
|
entityType="chat"
|
||||||
|
entityName={sessionToDelete?.title || "Untitled chat"}
|
||||||
|
open={!!sessionToDelete}
|
||||||
|
onOpenChange={(open) => !open && setSessionToDelete(null)}
|
||||||
|
onDoDelete={handleConfirmDelete}
|
||||||
|
/>
|
||||||
|
</>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,22 +1,46 @@
|
|||||||
import { Button } from "@/components/atoms/Button/Button";
|
import { Button } from "@/components/atoms/Button/Button";
|
||||||
import { NAVBAR_HEIGHT_PX } from "@/lib/constants";
|
import { NAVBAR_HEIGHT_PX } from "@/lib/constants";
|
||||||
import { ListIcon } from "@phosphor-icons/react";
|
import { ListIcon, TrashIcon } from "@phosphor-icons/react";
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
onOpenDrawer: () => void;
|
onOpenDrawer: () => void;
|
||||||
|
showDelete?: boolean;
|
||||||
|
isDeleting?: boolean;
|
||||||
|
onDelete?: () => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function MobileHeader({ onOpenDrawer }: Props) {
|
export function MobileHeader({
|
||||||
|
onOpenDrawer,
|
||||||
|
showDelete,
|
||||||
|
isDeleting,
|
||||||
|
onDelete,
|
||||||
|
}: Props) {
|
||||||
return (
|
return (
|
||||||
<Button
|
<div
|
||||||
variant="icon"
|
className="fixed z-50 flex gap-2"
|
||||||
size="icon"
|
|
||||||
aria-label="Open sessions"
|
|
||||||
onClick={onOpenDrawer}
|
|
||||||
className="fixed z-50 bg-white shadow-md"
|
|
||||||
style={{ left: "1rem", top: `${NAVBAR_HEIGHT_PX + 20}px` }}
|
style={{ left: "1rem", top: `${NAVBAR_HEIGHT_PX + 20}px` }}
|
||||||
>
|
>
|
||||||
<ListIcon width="1.25rem" height="1.25rem" />
|
<Button
|
||||||
</Button>
|
variant="icon"
|
||||||
|
size="icon"
|
||||||
|
aria-label="Open sessions"
|
||||||
|
onClick={onOpenDrawer}
|
||||||
|
className="bg-white shadow-md"
|
||||||
|
>
|
||||||
|
<ListIcon width="1.25rem" height="1.25rem" />
|
||||||
|
</Button>
|
||||||
|
{showDelete && onDelete && (
|
||||||
|
<Button
|
||||||
|
variant="icon"
|
||||||
|
size="icon"
|
||||||
|
aria-label="Delete current chat"
|
||||||
|
onClick={onDelete}
|
||||||
|
disabled={isDeleting}
|
||||||
|
className="bg-white text-red-500 shadow-md hover:bg-red-50 hover:text-red-600 disabled:opacity-50"
|
||||||
|
>
|
||||||
|
<TrashIcon width="1.25rem" height="1.25rem" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
|
import {
|
||||||
|
getGetV2ListSessionsQueryKey,
|
||||||
|
useDeleteV2DeleteSession,
|
||||||
|
useGetV2ListSessions,
|
||||||
|
} from "@/app/api/__generated__/endpoints/chat/chat";
|
||||||
import { toast } from "@/components/molecules/Toast/use-toast";
|
import { toast } from "@/components/molecules/Toast/use-toast";
|
||||||
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
import { useBreakpoint } from "@/lib/hooks/useBreakpoint";
|
||||||
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
import { useSupabase } from "@/lib/supabase/hooks/useSupabase";
|
||||||
import { useChat } from "@ai-sdk/react";
|
import { useChat } from "@ai-sdk/react";
|
||||||
|
import { useQueryClient } from "@tanstack/react-query";
|
||||||
import { DefaultChatTransport } from "ai";
|
import { DefaultChatTransport } from "ai";
|
||||||
import { useEffect, useMemo, useRef, useState } from "react";
|
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||||
import { useChatSession } from "./useChatSession";
|
import { useChatSession } from "./useChatSession";
|
||||||
import { useLongRunningToolPolling } from "./hooks/useLongRunningToolPolling";
|
import { useLongRunningToolPolling } from "./hooks/useLongRunningToolPolling";
|
||||||
|
|
||||||
@@ -14,6 +19,11 @@ export function useCopilotPage() {
|
|||||||
const { isUserLoading, isLoggedIn } = useSupabase();
|
const { isUserLoading, isLoggedIn } = useSupabase();
|
||||||
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
|
||||||
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
const [pendingMessage, setPendingMessage] = useState<string | null>(null);
|
||||||
|
const [sessionToDelete, setSessionToDelete] = useState<{
|
||||||
|
id: string;
|
||||||
|
title: string | null | undefined;
|
||||||
|
} | null>(null);
|
||||||
|
const queryClient = useQueryClient();
|
||||||
|
|
||||||
const {
|
const {
|
||||||
sessionId,
|
sessionId,
|
||||||
@@ -24,6 +34,30 @@ export function useCopilotPage() {
|
|||||||
isCreatingSession,
|
isCreatingSession,
|
||||||
} = useChatSession();
|
} = useChatSession();
|
||||||
|
|
||||||
|
const { mutate: deleteSessionMutation, isPending: isDeleting } =
|
||||||
|
useDeleteV2DeleteSession({
|
||||||
|
mutation: {
|
||||||
|
onSuccess: () => {
|
||||||
|
queryClient.invalidateQueries({
|
||||||
|
queryKey: getGetV2ListSessionsQueryKey(),
|
||||||
|
});
|
||||||
|
if (sessionToDelete?.id === sessionId) {
|
||||||
|
setSessionId(null);
|
||||||
|
}
|
||||||
|
setSessionToDelete(null);
|
||||||
|
},
|
||||||
|
onError: (error) => {
|
||||||
|
toast({
|
||||||
|
title: "Failed to delete chat",
|
||||||
|
description:
|
||||||
|
error instanceof Error ? error.message : "An error occurred",
|
||||||
|
variant: "destructive",
|
||||||
|
});
|
||||||
|
setSessionToDelete(null);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
const breakpoint = useBreakpoint();
|
const breakpoint = useBreakpoint();
|
||||||
const isMobile =
|
const isMobile =
|
||||||
breakpoint === "base" || breakpoint === "sm" || breakpoint === "md";
|
breakpoint === "base" || breakpoint === "sm" || breakpoint === "md";
|
||||||
@@ -143,6 +177,24 @@ export function useCopilotPage() {
|
|||||||
if (isMobile) setIsDrawerOpen(false);
|
if (isMobile) setIsDrawerOpen(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const handleDeleteClick = useCallback(
|
||||||
|
(id: string, title: string | null | undefined) => {
|
||||||
|
if (isDeleting) return;
|
||||||
|
setSessionToDelete({ id, title });
|
||||||
|
},
|
||||||
|
[isDeleting],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleConfirmDelete = useCallback(() => {
|
||||||
|
if (sessionToDelete) {
|
||||||
|
deleteSessionMutation({ sessionId: sessionToDelete.id });
|
||||||
|
}
|
||||||
|
}, [sessionToDelete, deleteSessionMutation]);
|
||||||
|
|
||||||
|
const handleCancelDelete = useCallback(() => {
|
||||||
|
setSessionToDelete(null);
|
||||||
|
}, []);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
sessionId,
|
sessionId,
|
||||||
messages,
|
messages,
|
||||||
@@ -165,5 +217,11 @@ export function useCopilotPage() {
|
|||||||
handleDrawerOpenChange,
|
handleDrawerOpenChange,
|
||||||
handleSelectSession,
|
handleSelectSession,
|
||||||
handleNewChat,
|
handleNewChat,
|
||||||
|
// Delete functionality
|
||||||
|
sessionToDelete,
|
||||||
|
isDeleting,
|
||||||
|
handleDeleteClick,
|
||||||
|
handleConfirmDelete,
|
||||||
|
handleCancelDelete,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1151,6 +1151,36 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/chat/sessions/{session_id}": {
|
"/api/chat/sessions/{session_id}": {
|
||||||
|
"delete": {
|
||||||
|
"tags": ["v2", "chat", "chat"],
|
||||||
|
"summary": "Delete Session",
|
||||||
|
"description": "Delete a chat session.\n\nPermanently removes a chat session and all its messages.\nOnly the owner can delete their sessions.\n\nArgs:\n session_id: The session ID to delete.\n user_id: The authenticated user's ID.\n\nReturns:\n 204 No Content on success.\n\nRaises:\n HTTPException: 404 if session not found or not owned by user.",
|
||||||
|
"operationId": "deleteV2DeleteSession",
|
||||||
|
"security": [{ "HTTPBearerJWT": [] }],
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"name": "session_id",
|
||||||
|
"in": "path",
|
||||||
|
"required": true,
|
||||||
|
"schema": { "type": "string", "title": "Session Id" }
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"204": { "description": "Successful Response" },
|
||||||
|
"401": {
|
||||||
|
"$ref": "#/components/responses/HTTP401NotAuthenticatedError"
|
||||||
|
},
|
||||||
|
"404": { "description": "Session not found or access denied" },
|
||||||
|
"422": {
|
||||||
|
"description": "Validation Error",
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": { "$ref": "#/components/schemas/HTTPValidationError" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"get": {
|
"get": {
|
||||||
"tags": ["v2", "chat", "chat"],
|
"tags": ["v2", "chat", "chat"],
|
||||||
"summary": "Get Session",
|
"summary": "Get Session",
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ const DialogFooter = ({
|
|||||||
}: React.HTMLAttributes<HTMLDivElement>) => (
|
}: React.HTMLAttributes<HTMLDivElement>) => (
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2",
|
"flex flex-col-reverse gap-2 sm:flex-row sm:justify-end",
|
||||||
className,
|
className,
|
||||||
)}
|
)}
|
||||||
{...props}
|
{...props}
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ The result routes data to yes_output or no_output, enabling intelligent branchin
|
|||||||
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
| condition | A plaintext English description of the condition to evaluate | str | Yes |
|
||||||
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
| yes_value | (Optional) Value to output if the condition is true. If not provided, input_value will be used. | Yes Value | No |
|
||||||
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
| no_value | (Optional) Value to output if the condition is false. If not provided, input_value will be used. | No Value | No |
|
||||||
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for evaluating the condition. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
|
|
||||||
### Outputs
|
### Outputs
|
||||||
|
|
||||||
@@ -103,7 +103,7 @@ The block sends the entire conversation history to the chosen LLM, including sys
|
|||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| prompt | The prompt to send to the language model. | str | No |
|
| prompt | The prompt to send to the language model. | str | No |
|
||||||
| messages | List of messages in the conversation. | List[Any] | Yes |
|
| messages | List of messages in the conversation. | List[Any] | Yes |
|
||||||
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for the conversation. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||||
| ollama_host | Ollama host for local models | str | No |
|
| ollama_host | Ollama host for local models | str | No |
|
||||||
|
|
||||||
@@ -257,7 +257,7 @@ The block formulates a prompt based on the given focus or source data, sends it
|
|||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| focus | The focus of the list to generate. | str | No |
|
| focus | The focus of the list to generate. | str | No |
|
||||||
| source_data | The data to generate the list from. | str | No |
|
| source_data | The data to generate the list from. | str | No |
|
||||||
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for generating the list. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
| max_retries | Maximum number of retries for generating a valid list. | int | No |
|
||||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||||
@@ -424,7 +424,7 @@ The block sends the input prompt to a chosen LLM, along with any system prompts
|
|||||||
| prompt | The prompt to send to the language model. | str | Yes |
|
| prompt | The prompt to send to the language model. | str | Yes |
|
||||||
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
| expected_format | Expected format of the response. If provided, the response will be validated against this format. The keys should be the expected fields in the response, and the values should be the description of the field. | Dict[str, str] | Yes |
|
||||||
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
| list_result | Whether the response should be a list of objects in the expected format. | bool | No |
|
||||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
| force_json_output | Whether to force the LLM to produce a JSON-only response. This can increase the block's reliability, but may also reduce the quality of the response because it prohibits the LLM from reasoning before providing its JSON response. | bool | No |
|
||||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||||
@@ -464,7 +464,7 @@ The block sends the input prompt to a chosen LLM, processes the response, and re
|
|||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
| prompt | The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces. | str | Yes |
|
||||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||||
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
| retry | Number of times to retry the LLM call if the response does not match the expected format. | int | No |
|
||||||
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
| prompt_values | Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}. | Dict[str, str] | No |
|
||||||
@@ -501,7 +501,7 @@ The block splits the input text into smaller chunks, sends each chunk to an LLM
|
|||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| text | The text to summarize. | str | Yes |
|
| text | The text to summarize. | str | Yes |
|
||||||
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for summarizing the text. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| focus | The topic to focus on in the summary | str | No |
|
| focus | The topic to focus on in the summary | str | No |
|
||||||
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
| style | The style of the summary to generate. | "concise" \| "detailed" \| "bullet points" \| "numbered list" | No |
|
||||||
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
| max_tokens | The maximum number of tokens to generate in the chat completion. | int | No |
|
||||||
@@ -763,7 +763,7 @@ Configure agent_mode_max_iterations to control loop behavior: 0 for single decis
|
|||||||
| Input | Description | Type | Required |
|
| Input | Description | Type | Required |
|
||||||
|-------|-------------|------|----------|
|
|-------|-------------|------|----------|
|
||||||
| prompt | The prompt to send to the language model. | str | Yes |
|
| prompt | The prompt to send to the language model. | str | Yes |
|
||||||
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
| model | The language model to use for answering the prompt. | "o3-mini" \| "o3-2025-04-16" \| "o1" \| "o1-mini" \| "gpt-5.2-2025-12-11" \| "gpt-5.1-2025-11-13" \| "gpt-5-2025-08-07" \| "gpt-5-mini-2025-08-07" \| "gpt-5-nano-2025-08-07" \| "gpt-5-chat-latest" \| "gpt-4.1-2025-04-14" \| "gpt-4.1-mini-2025-04-14" \| "gpt-4o-mini" \| "gpt-4o" \| "gpt-4-turbo" \| "gpt-3.5-turbo" \| "claude-opus-4-1-20250805" \| "claude-opus-4-20250514" \| "claude-sonnet-4-20250514" \| "claude-opus-4-5-20251101" \| "claude-sonnet-4-5-20250929" \| "claude-haiku-4-5-20251001" \| "claude-opus-4-6" \| "claude-3-haiku-20240307" \| "Qwen/Qwen2.5-72B-Instruct-Turbo" \| "nvidia/llama-3.1-nemotron-70b-instruct" \| "meta-llama/Llama-3.3-70B-Instruct-Turbo" \| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" \| "meta-llama/Llama-3.2-3B-Instruct-Turbo" \| "llama-3.3-70b-versatile" \| "llama-3.1-8b-instant" \| "llama3.3" \| "llama3.2" \| "llama3" \| "llama3.1:405b" \| "dolphin-mistral:latest" \| "openai/gpt-oss-120b" \| "openai/gpt-oss-20b" \| "google/gemini-2.5-pro-preview-03-25" \| "google/gemini-3-pro-preview" \| "google/gemini-2.5-flash" \| "google/gemini-2.0-flash-001" \| "google/gemini-2.5-flash-lite-preview-06-17" \| "google/gemini-2.0-flash-lite-001" \| "mistralai/mistral-nemo" \| "cohere/command-r-08-2024" \| "cohere/command-r-plus-08-2024" \| "deepseek/deepseek-chat" \| "deepseek/deepseek-r1-0528" \| "perplexity/sonar" \| "perplexity/sonar-pro" \| "perplexity/sonar-deep-research" \| "nousresearch/hermes-3-llama-3.1-405b" \| "nousresearch/hermes-3-llama-3.1-70b" \| "amazon/nova-lite-v1" \| "amazon/nova-micro-v1" \| "amazon/nova-pro-v1" \| "microsoft/wizardlm-2-8x22b" \| "gryphe/mythomax-l2-13b" \| "meta-llama/llama-4-scout" \| "meta-llama/llama-4-maverick" \| "x-ai/grok-4" \| "x-ai/grok-4-fast" \| "x-ai/grok-4.1-fast" \| "x-ai/grok-code-fast-1" \| "moonshotai/kimi-k2" \| "qwen/qwen3-235b-a22b-thinking-2507" \| "qwen/qwen3-coder" \| "Llama-4-Scout-17B-16E-Instruct-FP8" \| "Llama-4-Maverick-17B-128E-Instruct-FP8" \| "Llama-3.3-8B-Instruct" \| "Llama-3.3-70B-Instruct" \| "v0-1.5-md" \| "v0-1.5-lg" \| "v0-1.0-md" | No |
|
||||||
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
| multiple_tool_calls | Whether to allow multiple tool calls in a single response. | bool | No |
|
||||||
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
| sys_prompt | The system prompt to provide additional context to the model. | str | No |
|
||||||
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
| conversation_history | The conversation history to provide context for the prompt. | List[Dict[str, Any]] | No |
|
||||||
|
|||||||
Reference in New Issue
Block a user