mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-03-17 03:00:27 -04:00
Compare commits
32 Commits
fix/agent-
...
swiftyos/m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
726542472a | ||
|
|
757ec1f064 | ||
|
|
9442c648a4 | ||
|
|
1c51dd18aa | ||
|
|
6f4f80871d | ||
|
|
e8cca6cd9a | ||
|
|
bf6308e87c | ||
|
|
4e59143d16 | ||
|
|
d5efb6915b | ||
|
|
b9aac42056 | ||
|
|
95651d33da | ||
|
|
b30418d833 | ||
|
|
ed729ddbe2 | ||
|
|
8c7030af0b | ||
|
|
195b14286a | ||
|
|
29ca034e40 | ||
|
|
1d9dd782a8 | ||
|
|
a1cb3d2a91 | ||
|
|
1b91327034 | ||
|
|
c7cdb40c5b | ||
|
|
77fb4419d0 | ||
|
|
9f002ce8f6 | ||
|
|
74691076c6 | ||
|
|
b15ad0df9b | ||
|
|
2136defea8 | ||
|
|
6e61cb103c | ||
|
|
0e72e1f5e7 | ||
|
|
163b0b3c9d | ||
|
|
ef42b17e3b | ||
|
|
a18ffd0b21 | ||
|
|
e40c8c70ce | ||
|
|
9cdcd6793f |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -180,4 +180,6 @@ autogpt_platform/backend/settings.py
|
||||
.claude/settings.local.json
|
||||
CLAUDE.local.md
|
||||
/autogpt_platform/backend/logs
|
||||
.next
|
||||
.next
|
||||
# Implementation plans (generated by AI agents)
|
||||
plans/
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
default_install_hook_types:
|
||||
- pre-commit
|
||||
- pre-push
|
||||
- post-checkout
|
||||
|
||||
default_stages: [pre-commit]
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
@@ -17,6 +24,7 @@ repos:
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
exclude: pnpm-lock\.yaml$
|
||||
stages: [pre-push]
|
||||
|
||||
- repo: local
|
||||
@@ -26,49 +34,106 @@ repos:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$" || exit 0;
|
||||
poetry -C autogpt_platform/backend install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/autogpt_libs/poetry\.lock$" || exit 0;
|
||||
poetry -C autogpt_platform/autogpt_libs install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: pnpm-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Frontend
|
||||
alias: pnpm-install-platform-frontend
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/frontend/pnpm-lock\.yaml$" || exit 0;
|
||||
pnpm --prefix autogpt_platform/frontend install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^classic/(original_autogpt|forge)/poetry\.lock$" || exit 0;
|
||||
poetry -C classic/original_autogpt install
|
||||
'
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^classic/forge/poetry\.lock$" || exit 0;
|
||||
poetry -C classic/forge install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^classic/benchmark/poetry\.lock$" || exit 0;
|
||||
poetry -C classic/benchmark install
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
@@ -76,12 +141,54 @@ repos:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
entry: >
|
||||
bash -c '
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --name-only "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF"
|
||||
else
|
||||
git diff --cached --name-only
|
||||
fi | grep -qE "^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema\.prisma)$" || exit 0;
|
||||
cd autogpt_platform/backend
|
||||
&& poetry run prisma generate
|
||||
&& poetry run gen-prisma-stub
|
||||
'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- id: export-api-schema
|
||||
name: Export API schema - AutoGPT Platform - Backend -> Frontend
|
||||
alias: export-api-schema-platform
|
||||
entry: >
|
||||
bash -c '
|
||||
cd autogpt_platform/backend
|
||||
&& poetry run export-api-schema --output ../frontend/src/app/api/openapi.json
|
||||
&& cd ../frontend
|
||||
&& pnpm prettier --write ./src/app/api/openapi.json
|
||||
'
|
||||
files: ^autogpt_platform/backend/
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: generate-api-client
|
||||
name: Generate API client - AutoGPT Platform - Frontend
|
||||
alias: generate-api-client-platform-frontend
|
||||
entry: >
|
||||
bash -c '
|
||||
SCHEMA=autogpt_platform/frontend/src/app/api/openapi.json;
|
||||
if [ -n "$PRE_COMMIT_FROM_REF" ]; then
|
||||
git diff --quiet "$PRE_COMMIT_FROM_REF" "$PRE_COMMIT_TO_REF" -- "$SCHEMA" && exit 0
|
||||
else
|
||||
git diff --quiet HEAD -- "$SCHEMA" && exit 0
|
||||
fi;
|
||||
cd autogpt_platform/frontend && pnpm generate:api
|
||||
'
|
||||
always_run: true
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [pre-commit, post-checkout]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
|
||||
@@ -1,572 +0,0 @@
|
||||
2026-02-21 20:31:19,811 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:19,812 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:19,812 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:19,812 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:19,812 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:20,051 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:20,051 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:20,051 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:21,578 [33mWARNING[0m [33mProvider LINEAR implements OAuth but the required env vars LINEAR_CLIENT_ID and LINEAR_CLIENT_SECRET are not both set[0m
|
||||
2026-02-21 20:31:21,623 [33mWARNING[0m [33mAuthentication error: Langfuse client initialized without public_key. Client will be disabled. Provide a public_key parameter or set LANGFUSE_PUBLIC_KEY environment variable. [0m
|
||||
2026-02-21 20:31:21,796 [34mINFO[0m Metrics endpoint exposed at /metrics for external-api
|
||||
2026-02-21 20:31:21,800 [34mINFO[0m Metrics endpoint exposed at /metrics for rest-api
|
||||
2026-02-21 20:31:21,881 [34mINFO[0m Metrics endpoint exposed at /metrics for websocket-server
|
||||
2026-02-21 20:31:21,913 [33mWARNING[0m [33mPostmark server API token not found, email sending disabled[0m
|
||||
2026-02-21 20:31:21,956 [34mINFO[0m [DatabaseManager] started with PID 6089
|
||||
2026-02-21 20:31:21,958 [34mINFO[0m [Scheduler] started with PID 6090
|
||||
2026-02-21 20:31:21,959 [34mINFO[0m [NotificationManager] started with PID 6091
|
||||
2026-02-21 20:31:21,960 [34mINFO[0m [WebsocketServer] started with PID 6092
|
||||
2026-02-21 20:31:21,961 [34mINFO[0m [AgentServer] started with PID 6093
|
||||
2026-02-21 20:31:21,962 [34mINFO[0m [ExecutionManager] started with PID 6094
|
||||
2026-02-21 20:31:21,963 [34mINFO[0m [CoPilotExecutor] Starting...
|
||||
2026-02-21 20:31:21,963 [34mINFO[0m [CoPilotExecutor] Pod assigned executor_id: fb7d76b3-8dc3-40a4-947e-a93bfad207da
|
||||
2026-02-21 20:31:21,963 [34mINFO[0m [CoPilotExecutor] Spawn max-5 workers...
|
||||
2026-02-21 20:31:21,970 [34mINFO[0m [PID-6048|THREAD-77685505|CoPilotExecutor|RabbitMQ-124e33d7-4877-4745-9778-6b6b06de92d2] Acquiring connection started...
|
||||
2026-02-21 20:31:21,971 [34mINFO[0m [PID-6048|THREAD-77685506|CoPilotExecutor|RabbitMQ-124e33d7-4877-4745-9778-6b6b06de92d2] Acquiring connection started...
|
||||
2026-02-21 20:31:21,973 [34mINFO[0m Pika version 1.3.2 connecting to ('::1', 5672, 0, 0)
|
||||
2026-02-21 20:31:21,973 [34mINFO[0m Pika version 1.3.2 connecting to ('::1', 5672, 0, 0)
|
||||
2026-02-21 20:31:21,974 [34mINFO[0m Socket connected: <socket.socket fd=30, family=30, type=1, proto=6, laddr=('::1', 55999, 0, 0), raddr=('::1', 5672, 0, 0)>
|
||||
2026-02-21 20:31:21,975 [34mINFO[0m Socket connected: <socket.socket fd=29, family=30, type=1, proto=6, laddr=('::1', 55998, 0, 0), raddr=('::1', 5672, 0, 0)>
|
||||
2026-02-21 20:31:21,975 [34mINFO[0m Streaming transport linked up: (<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120f5eba0>, _StreamingProtocolShim: <SelectConnection PROTOCOL transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120f5eba0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>).
|
||||
2026-02-21 20:31:21,976 [34mINFO[0m Streaming transport linked up: (<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120fa0410>, _StreamingProtocolShim: <SelectConnection PROTOCOL transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120fa0410> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>).
|
||||
2026-02-21 20:31:21,990 [34mINFO[0m AMQPConnector - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120fa0410> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:21,991 [34mINFO[0m AMQPConnectionWorkflow - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120fa0410> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:21,991 [34mINFO[0m AMQPConnector - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120f5eba0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:21,991 [34mINFO[0m Connection workflow succeeded: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120fa0410> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:21,991 [34mINFO[0m AMQPConnectionWorkflow - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120f5eba0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:21,991 [34mINFO[0m Created channel=1
|
||||
2026-02-21 20:31:21,992 [34mINFO[0m Connection workflow succeeded: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x120f5eba0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:21,992 [34mINFO[0m Created channel=1
|
||||
2026-02-21 20:31:22,005 [34mINFO[0m [PID-6048|THREAD-77685505|CoPilotExecutor|RabbitMQ-124e33d7-4877-4745-9778-6b6b06de92d2] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:22,005 [34mINFO[0m [PID-6048|THREAD-77685506|CoPilotExecutor|RabbitMQ-124e33d7-4877-4745-9778-6b6b06de92d2] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:22,007 [34mINFO[0m [CoPilotExecutor] Starting to consume cancel messages...
|
||||
2026-02-21 20:31:22,008 [34mINFO[0m [CoPilotExecutor] Starting to consume run messages...
|
||||
2026-02-21 20:31:23,199 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:23,201 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:23,202 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:23,202 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:23,202 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:23,331 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:23,331 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:23,332 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:23,891 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:23,892 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:23,893 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:23,893 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:23,893 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:23,946 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:23,947 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:23,947 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:23,947 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:23,948 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,017 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:24,017 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:24,017 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:24,065 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:24,065 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:24,065 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:24,707 [34mINFO[0m [NotificationManager] Starting...
|
||||
2026-02-21 20:31:24,750 [34mINFO[0m Metrics endpoint exposed at /metrics for NotificationManager
|
||||
2026-02-21 20:31:24,754 [34mINFO[0m [PID-6091|THREAD-77685702|NotificationManager|FastAPI server-d17271ed-e3a2-4e93-900b-a0d3bd2b8100] Running FastAPI server started...
|
||||
2026-02-21 20:31:24,755 [34mINFO[0m [NotificationManager] Starting RPC server at http://localhost:8007
|
||||
2026-02-21 20:31:24,756 [34mINFO[0m [NotificationManager] [NotificationManager] ⏳ Configuring RabbitMQ...
|
||||
2026-02-21 20:31:24,757 [34mINFO[0m [PID-6091|THREAD-77685703|NotificationManager|AsyncRabbitMQ-7963c91c-c443-4479-a55e-5e9a8d7d942d] Acquiring async connection started...
|
||||
2026-02-21 20:31:24,775 [34mINFO[0m Started server process [6091]
|
||||
2026-02-21 20:31:24,775 [34mINFO[0m Waiting for application startup.
|
||||
2026-02-21 20:31:24,776 [34mINFO[0m Application startup complete.
|
||||
2026-02-21 20:31:24,777 [31mERROR[0m [31m[Errno 48] error while attempting to bind on address ('::1', 8007, 0, 0): [errno 48] address already in use[0m
|
||||
2026-02-21 20:31:24,781 [34mINFO[0m Waiting for application shutdown.
|
||||
2026-02-21 20:31:24,781 [34mINFO[0m [NotificationManager] ✅ FastAPI has finished
|
||||
2026-02-21 20:31:24,782 [34mINFO[0m Application shutdown complete.
|
||||
2026-02-21 20:31:24,783 [34mINFO[0m [NotificationManager] 🛑 Shared event loop stopped
|
||||
2026-02-21 20:31:24,783 [34mINFO[0m [NotificationManager] 🧹 Running cleanup
|
||||
2026-02-21 20:31:24,783 [34mINFO[0m [NotificationManager] ⏳ Disconnecting RabbitMQ...
|
||||
Process NotificationManager:
|
||||
Traceback (most recent call last):
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/multiprocessing/process.py", line 313, in _bootstrap
|
||||
self.run()
|
||||
~~~~~~~~^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/multiprocessing/process.py", line 108, in run
|
||||
self._target(*self._args, **self._kwargs)
|
||||
~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/util/process.py", line 83, in execute_run_command
|
||||
self.cleanup()
|
||||
~~~~~~~~~~~~^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/notifications/notifications.py", line 1094, in cleanup
|
||||
self.run_and_wait(self.rabbitmq_service.disconnect())
|
||||
~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/util/service.py", line 136, in run_and_wait
|
||||
return asyncio.run_coroutine_threadsafe(coro, self.shared_event_loop).result()
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/tasks.py", line 1003, in run_coroutine_threadsafe
|
||||
loop.call_soon_threadsafe(callback)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py", line 873, in call_soon_threadsafe
|
||||
self._check_closed()
|
||||
~~~~~~~~~~~~~~~~~~^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py", line 551, in _check_closed
|
||||
raise RuntimeError('Event loop is closed')
|
||||
RuntimeError: Event loop is closed
|
||||
/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/multiprocessing/process.py:327: RuntimeWarning: coroutine 'AsyncRabbitMQ.disconnect' was never awaited
|
||||
traceback.print_exc()
|
||||
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
|
||||
2026-02-21 20:31:24,846 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:24,848 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:24,848 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,849 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:24,849 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,857 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:24,858 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:24,858 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,858 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:24,858 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,862 [34mINFO[0m Initializing LaunchDarkly Client 9.15.0
|
||||
2026-02-21 20:31:24,863 [34mINFO[0m Starting event processor
|
||||
2026-02-21 20:31:24,864 [34mINFO[0m Starting StreamingUpdateProcessor connecting to uri: https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,864 [34mINFO[0m Waiting up to 5 seconds for LaunchDarkly client to initialize...
|
||||
2026-02-21 20:31:24,864 [34mINFO[0m Connecting to stream at https://stream.launchdarkly.com/all
|
||||
2026-02-21 20:31:24,966 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:24,967 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:24,967 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:24,976 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:24,976 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:24,976 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:24,989 [34mINFO[0m StreamingUpdateProcessor initialized ok.
|
||||
2026-02-21 20:31:24,989 [34mINFO[0m Started LaunchDarkly Client: OK
|
||||
2026-02-21 20:31:24,989 [34mINFO[0m LaunchDarkly client initialized successfully
|
||||
2026-02-21 20:31:25,035 [34mINFO[0m Metrics endpoint exposed at /metrics for websocket-server
|
||||
2026-02-21 20:31:25,036 [34mINFO[0m [WebsocketServer] Starting...
|
||||
2026-02-21 20:31:25,036 [34mINFO[0m CORS allow origins: ['http://localhost:3000', 'http://127.0.0.1:3000']
|
||||
2026-02-21 20:31:25,076 [34mINFO[0m Started server process [6092]
|
||||
2026-02-21 20:31:25,076 [34mINFO[0m Waiting for application startup.
|
||||
2026-02-21 20:31:25,077 [34mINFO[0m Application startup complete.
|
||||
2026-02-21 20:31:25,077 [34mINFO[0m [PID-6092|THREAD-77685501|WebsocketServer|AsyncRedis-b6fb3c5c-0070-4c5c-90eb-922d4f2152c2] Acquiring connection started...
|
||||
2026-02-21 20:31:25,077 [34mINFO[0m [PID-6092|THREAD-77685501|WebsocketServer|AsyncRedis-b6fb3c5c-0070-4c5c-90eb-922d4f2152c2] Acquiring connection started...
|
||||
2026-02-21 20:31:25,078 [31mERROR[0m [31m[Errno 48] error while attempting to bind on address ('0.0.0.0', 8001): address already in use[0m
|
||||
2026-02-21 20:31:25,080 [34mINFO[0m Waiting for application shutdown.
|
||||
2026-02-21 20:31:25,080 [34mINFO[0m Application shutdown complete.
|
||||
2026-02-21 20:31:25,080 [34mINFO[0m Event broadcaster stopped
|
||||
2026-02-21 20:31:25,081 [33mWARNING[0m [33m[WebsocketServer] 🛑 Terminating because of SystemExit: 1[0m
|
||||
2026-02-21 20:31:25,081 [34mINFO[0m [WebsocketServer] 🧹 Running cleanup
|
||||
2026-02-21 20:31:25,081 [34mINFO[0m [WebsocketServer] ✅ Cleanup done
|
||||
2026-02-21 20:31:25,081 [34mINFO[0m [WebsocketServer] 🛑 Terminated
|
||||
2026-02-21 20:31:25,915 [34mINFO[0m [DatabaseManager] Starting...
|
||||
2026-02-21 20:31:25,947 [34mINFO[0m Metrics endpoint exposed at /metrics for DatabaseManager
|
||||
2026-02-21 20:31:25,970 [34mINFO[0m [ExecutionManager] Starting...
|
||||
2026-02-21 20:31:25,970 [34mINFO[0m [GraphExecutor] [ExecutionManager] 🆔 Pod assigned executor_id: 90ff5962-bdc8-456d-a864-01c5f4f199bd
|
||||
2026-02-21 20:31:25,971 [34mINFO[0m [GraphExecutor] [ExecutionManager] ⏳ Spawn max-10 workers...
|
||||
2026-02-21 20:31:25,973 [34mINFO[0m [Scheduler] Starting...
|
||||
2026-02-21 20:31:25,971 [33mWARNING[0m [33m[ExecutionManager] 🛑 Terminating because of OSError: [Errno 48] Address already in use[0m
|
||||
Traceback (most recent call last):
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/util/process.py", line 65, in execute_run_command
|
||||
self.run()
|
||||
~~~~~~~~^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/executor/manager.py", line 1554, in run
|
||||
start_http_server(settings.config.execution_manager_port)
|
||||
~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/prometheus_client/exposition.py", line 251, in start_wsgi_server
|
||||
httpd = make_server(addr, port, app, TmpServer, handler_class=_SilentHandler)
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/wsgiref/simple_server.py", line 150, in make_server
|
||||
server = server_class((host, port), handler_class)
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/socketserver.py", line 457, in __init__
|
||||
self.server_bind()
|
||||
~~~~~~~~~~~~~~~~^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/wsgiref/simple_server.py", line 50, in server_bind
|
||||
HTTPServer.server_bind(self)
|
||||
~~~~~~~~~~~~~~~~~~~~~~^^^^^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/http/server.py", line 136, in server_bind
|
||||
socketserver.TCPServer.server_bind(self)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/socketserver.py", line 473, in server_bind
|
||||
self.socket.bind(self.server_address)
|
||||
~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^
|
||||
OSError: [Errno 48] Address already in use
|
||||
2026-02-21 20:31:25,978 [34mINFO[0m [ExecutionManager] 🧹 Running cleanup
|
||||
2026-02-21 20:31:25,978 [34mINFO[0m [GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] 🧹 Starting graceful shutdown...
|
||||
2026-02-21 20:31:25,978 [34mINFO[0m [PID-6094|THREAD-77685503|ExecutionManager|RabbitMQ-5b203f2b-8b80-46b1-8e47-481497e68a82] Acquiring connection started...
|
||||
2026-02-21 20:31:25,980 [34mINFO[0m Pika version 1.3.2 connecting to ('::1', 5672, 0, 0)
|
||||
2026-02-21 20:31:25,981 [34mINFO[0m Socket connected: <socket.socket fd=14, family=30, type=1, proto=6, laddr=('::1', 56040, 0, 0), raddr=('::1', 5672, 0, 0)>
|
||||
2026-02-21 20:31:25,982 [34mINFO[0m Streaming transport linked up: (<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1316cd550>, _StreamingProtocolShim: <SelectConnection PROTOCOL transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1316cd550> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>).
|
||||
2026-02-21 20:31:25,991 [34mINFO[0m AMQPConnector - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1316cd550> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:25,991 [34mINFO[0m AMQPConnectionWorkflow - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1316cd550> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:25,991 [34mINFO[0m Connection workflow succeeded: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1316cd550> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:25,991 [34mINFO[0m Created channel=1
|
||||
2026-02-21 20:31:26,001 [34mINFO[0m [PID-6094|THREAD-77685503|ExecutionManager|RabbitMQ-5b203f2b-8b80-46b1-8e47-481497e68a82] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:26,001 [34mINFO[0m [GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] ✅ Exec consumer has been signaled to stop
|
||||
2026-02-21 20:31:26,001 [34mINFO[0m [GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] ✅ Executor shutdown completed
|
||||
2026-02-21 20:31:26,001 [34mINFO[0m [GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] ✅ Released execution locks
|
||||
2026-02-21 20:31:26,001 [31mERROR[0m [31m[GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] [run-consumer] ⚠️ Error disconnecting run client: <class 'RuntimeError'> cannot join thread before it is started [0m
|
||||
2026-02-21 20:31:26,003 [34mINFO[0m [PID-6094|THREAD-77685503|ExecutionManager|RabbitMQ-5b203f2b-8b80-46b1-8e47-481497e68a82] Acquiring connection started...
|
||||
2026-02-21 20:31:26,005 [34mINFO[0m Pika version 1.3.2 connecting to ('::1', 5672, 0, 0)
|
||||
2026-02-21 20:31:26,005 [34mINFO[0m Socket connected: <socket.socket fd=20, family=30, type=1, proto=6, laddr=('::1', 56043, 0, 0), raddr=('::1', 5672, 0, 0)>
|
||||
2026-02-21 20:31:26,006 [34mINFO[0m Streaming transport linked up: (<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1318e4cd0>, _StreamingProtocolShim: <SelectConnection PROTOCOL transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1318e4cd0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>).
|
||||
2026-02-21 20:31:26,009 [34mINFO[0m Metrics endpoint exposed at /metrics for Scheduler
|
||||
2026-02-21 20:31:26,010 [34mINFO[0m AMQPConnector - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1318e4cd0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:26,010 [34mINFO[0m AMQPConnectionWorkflow - reporting success: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1318e4cd0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:26,010 [34mINFO[0m Connection workflow succeeded: <SelectConnection OPEN transport=<pika.adapters.utils.io_services_utils._AsyncPlaintextTransport object at 0x1318e4cd0> params=<ConnectionParameters host=localhost port=5672 virtual_host=/ ssl=False>>
|
||||
2026-02-21 20:31:26,011 [34mINFO[0m Created channel=1
|
||||
2026-02-21 20:31:26,015 [34mINFO[0m [PID-6090|THREAD-77685897|Scheduler|FastAPI server-6caca9cc-c4c1-417f-8b83-d96f02472df9] Running FastAPI server started...
|
||||
2026-02-21 20:31:26,016 [34mINFO[0m [Scheduler] Starting RPC server at http://localhost:8003
|
||||
2026-02-21 20:31:26,016 [34mINFO[0m [PID-6094|THREAD-77685503|ExecutionManager|RabbitMQ-5b203f2b-8b80-46b1-8e47-481497e68a82] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:26,016 [31mERROR[0m [31m[GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] [cancel-consumer] ⚠️ Error disconnecting run client: <class 'RuntimeError'> cannot join thread before it is started [0m
|
||||
2026-02-21 20:31:26,019 [34mINFO[0m [GraphExecutor] [ExecutionManager][on_graph_executor_stop 6094] ✅ Finished GraphExec cleanup
|
||||
2026-02-21 20:31:26,019 [34mINFO[0m [ExecutionManager] ✅ Cleanup done
|
||||
2026-02-21 20:31:26,019 [34mINFO[0m [ExecutionManager] 🛑 Terminated
|
||||
2026-02-21 20:31:26,188 [34mINFO[0m [PID-6089|THREAD-77685901|DatabaseManager|FastAPI server-7019e67b-30c1-4d08-a0ec-4f0175629d0e] Running FastAPI server started...
|
||||
2026-02-21 20:31:26,189 [34mINFO[0m [DatabaseManager] Starting RPC server at http://localhost:8005
|
||||
2026-02-21 20:31:26,197 [34mINFO[0m [DatabaseManager] ⏳ Connecting to Database...
|
||||
2026-02-21 20:31:26,197 [34mINFO[0m [PID-6089|THREAD-77685902|DatabaseManager|Prisma-64fcde85-3de3-4783-b2c6-789775451cd0] Acquiring connection started...
|
||||
2026-02-21 20:31:26,254 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,255 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,255 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,255 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,255 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,255 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,256 [34mINFO[0m [Scheduler] [APScheduler] Adding job tentatively -- it will be properly scheduled when the scheduler starts
|
||||
2026-02-21 20:31:26,346 [34mINFO[0m [PID-6089|THREAD-77685902|DatabaseManager|Prisma-64fcde85-3de3-4783-b2c6-789775451cd0] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:26,346 [34mINFO[0m [DatabaseManager] ✅ Ready
|
||||
2026-02-21 20:31:26,347 [31mERROR[0m [31m[Errno 48] error while attempting to bind on address ('::1', 8005, 0, 0): [errno 48] address already in use[0m
|
||||
2026-02-21 20:31:26,349 [34mINFO[0m [DatabaseManager] ⏳ Disconnecting Database...
|
||||
2026-02-21 20:31:26,349 [34mINFO[0m [PID-6089|THREAD-77685902|DatabaseManager|Prisma-2397ec31-7da6-4598-a012-6c48f17ea97f] Releasing connection started...
|
||||
2026-02-21 20:31:26,350 [34mINFO[0m [PID-6089|THREAD-77685902|DatabaseManager|Prisma-2397ec31-7da6-4598-a012-6c48f17ea97f] Releasing connection completed successfully.
|
||||
2026-02-21 20:31:26,351 [34mINFO[0m [DatabaseManager] ✅ FastAPI has finished
|
||||
2026-02-21 20:31:26,351 [34mINFO[0m [DatabaseManager] 🛑 Shared event loop stopped
|
||||
2026-02-21 20:31:26,351 [34mINFO[0m [DatabaseManager] 🧹 Running cleanup
|
||||
Process DatabaseManager:
|
||||
Traceback (most recent call last):
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/multiprocessing/process.py", line 313, in _bootstrap
|
||||
self.run()
|
||||
~~~~~~~~^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/multiprocessing/process.py", line 108, in run
|
||||
self._target(*self._args, **self._kwargs)
|
||||
~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/util/process.py", line 83, in execute_run_command
|
||||
self.cleanup()
|
||||
~~~~~~~~~~~~^^
|
||||
File "/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/util/service.py", line 153, in cleanup
|
||||
self.shared_event_loop.call_soon_threadsafe(self.shared_event_loop.stop)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py", line 873, in call_soon_threadsafe
|
||||
self._check_closed()
|
||||
~~~~~~~~~~~~~~~~~~^^
|
||||
File "/opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/base_events.py", line 551, in _check_closed
|
||||
raise RuntimeError('Event loop is closed')
|
||||
RuntimeError: Event loop is closed
|
||||
2026-02-21 20:31:26,382 [34mINFO[0m [Scheduler] [APScheduler] Added job "process_weekly_summary" to job store "weekly_notifications"
|
||||
2026-02-21 20:31:26,390 [34mINFO[0m [Scheduler] [APScheduler] Added job "report_late_executions" to job store "execution"
|
||||
2026-02-21 20:31:26,392 [34mINFO[0m [Scheduler] [APScheduler] Added job "report_block_error_rates" to job store "execution"
|
||||
2026-02-21 20:31:26,395 [34mINFO[0m [Scheduler] [APScheduler] Added job "cleanup_expired_files" to job store "execution"
|
||||
2026-02-21 20:31:26,397 [34mINFO[0m [Scheduler] [APScheduler] Added job "cleanup_oauth_tokens" to job store "execution"
|
||||
2026-02-21 20:31:26,399 [34mINFO[0m [Scheduler] [APScheduler] Added job "execution_accuracy_alerts" to job store "execution"
|
||||
2026-02-21 20:31:26,401 [34mINFO[0m [Scheduler] [APScheduler] Added job "ensure_embeddings_coverage" to job store "execution"
|
||||
2026-02-21 20:31:26,401 [34mINFO[0m [Scheduler] [APScheduler] Scheduler started
|
||||
2026-02-21 20:31:26,402 [34mINFO[0m [Scheduler] Running embedding backfill on startup...
|
||||
2026-02-21 20:31:26,440 [33mWARNING[0m [33mProvider LINEAR implements OAuth but the required env vars LINEAR_CLIENT_ID and LINEAR_CLIENT_SECRET are not both set[0m
|
||||
2026-02-21 20:31:26,468 [34mINFO[0m [PID-6090|THREAD-77685499|Scheduler|AppService client-24942e64-d380-4d36-a245-5c41172e5293] Creating service client started...
|
||||
2026-02-21 20:31:26,468 [34mINFO[0m [PID-6090|THREAD-77685499|Scheduler|AppService client-24942e64-d380-4d36-a245-5c41172e5293] Creating service client completed successfully.
|
||||
2026-02-21 20:31:26,485 [33mWARNING[0m [33mAuthentication error: Langfuse client initialized without public_key. Client will be disabled. Provide a public_key parameter or set LANGFUSE_PUBLIC_KEY environment variable. [0m
|
||||
2026-02-21 20:31:26,652 [34mINFO[0m Metrics endpoint exposed at /metrics for external-api
|
||||
2026-02-21 20:31:26,655 [34mINFO[0m Metrics endpoint exposed at /metrics for rest-api
|
||||
2026-02-21 20:31:26,735 [34mINFO[0m [AgentServer] Starting...
|
||||
2026-02-21 20:31:26,745 [34mINFO[0m Started server process [6093]
|
||||
2026-02-21 20:31:26,745 [34mINFO[0m Waiting for application startup.
|
||||
2026-02-21 20:31:26,746 [33mWARNING[0m [33m⚠️ JWT_SIGN_ALGORITHM is set to 'HS256', a symmetric shared-key signature algorithm. We highly recommend using an asymmetric algorithm such as ES256, because when leaked, a shared secret would allow anyone to forge valid tokens and impersonate users. More info: https://supabase.com/docs/guides/auth/signing-keys#choosing-the-right-signing-algorithm[0m
|
||||
2026-02-21 20:31:26,747 [34mINFO[0m [PID-6093|THREAD-77685502|AgentServer|Prisma-9d930243-0262-4697-b4af-e0bcbec281c4] Acquiring connection started...
|
||||
2026-02-21 20:31:26,812 [34mINFO[0m [PID-6093|THREAD-77685502|AgentServer|Prisma-9d930243-0262-4697-b4af-e0bcbec281c4] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:26,825 [34mINFO[0m Thread pool size set to 60 for sync endpoint/dependency performance
|
||||
2026-02-21 20:31:26,825 [34mINFO[0m Successfully patched IntegrationCredentialsStore.get_all_creds
|
||||
2026-02-21 20:31:26,825 [34mINFO[0m Syncing provider costs to blocks...
|
||||
2026-02-21 20:31:27,576 [33mWARNING[0m [33mProvider WORDPRESS implements OAuth but the required env vars WORDPRESS_CLIENT_ID and WORDPRESS_CLIENT_SECRET are not both set[0m
|
||||
2026-02-21 20:31:27,631 [34mINFO[0m Registered 1 custom costs for block FirecrawlExtractBlock
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/backend/blocks/exa/helpers.py:56: UserWarning: Field name "schema" in "SummarySettings" shadows an attribute in parent "BaseModel"
|
||||
class SummarySettings(BaseModel):
|
||||
2026-02-21 20:31:27,954 [33mWARNING[0m [33mProvider AIRTABLE implements OAuth but the required env vars AIRTABLE_CLIENT_ID and AIRTABLE_CLIENT_SECRET are not both set[0m
|
||||
2026-02-21 20:31:29,238 [34mINFO[0m Successfully patched IntegrationCredentialsStore.get_all_creds
|
||||
2026-02-21 20:31:29,397 [33mWARNING[0m [33mBlock WordPressCreatePostBlock credential input 'credentials' provider 'wordpress' has no authentication methods configured - Disabling[0m
|
||||
2026-02-21 20:31:29,397 [33mWARNING[0m [33mBlock WordPressGetAllPostsBlock credential input 'credentials' provider 'wordpress' has no authentication methods configured - Disabling[0m
|
||||
2026-02-21 20:31:29,465 [34mINFO[0m Synced 82 costs to 82 blocks
|
||||
2026-02-21 20:31:29,466 [33mWARNING[0m [33mExecuting <Task pending name='Task-2' coro=<LifespanOn.main() running at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/lifespan/on.py:86> created at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/lifespan/on.py:51> took 2.654 seconds[0m
|
||||
2026-02-21 20:31:29,511 [34mINFO[0m [Scheduler] All content has embeddings, skipping backfill
|
||||
2026-02-21 20:31:29,512 [34mINFO[0m [Scheduler] Running cleanup for orphaned embeddings (blocks/docs)...
|
||||
2026-02-21 20:31:29,542 [34mINFO[0m [Scheduler] Cleanup completed: no orphaned embeddings found
|
||||
2026-02-21 20:31:29,542 [34mINFO[0m [Scheduler] Startup embedding backfill complete: {'backfill': {'processed': 0, 'success': 0, 'failed': 0}, 'cleanup': {'deleted': 0}}
|
||||
2026-02-21 20:31:29,553 [34mINFO[0m Started server process [6090]
|
||||
2026-02-21 20:31:29,553 [34mINFO[0m Waiting for application startup.
|
||||
2026-02-21 20:31:29,554 [34mINFO[0m Application startup complete.
|
||||
2026-02-21 20:31:29,555 [34mINFO[0m Uvicorn running on http://localhost:8003 (Press CTRL+C to quit)
|
||||
2026-02-21 20:31:31,074 [34mINFO[0m Migrating integration credentials for 0 users
|
||||
2026-02-21 20:31:31,087 [34mINFO[0m Fixing LLM credential inputs on 0 nodes
|
||||
2026-02-21 20:31:31,087 [34mINFO[0m Migrating LLM models
|
||||
2026-02-21 20:31:31,107 [34mINFO[0m Migrated 0 node triggers to triggered presets
|
||||
2026-02-21 20:31:31,107 [34mINFO[0m [PID-6093|THREAD-77685502|AgentServer|AsyncRedis-f8b888fc-8b03-4807-adfd-c93710c11c85] Acquiring connection started...
|
||||
2026-02-21 20:31:31,114 [34mINFO[0m [PID-6093|THREAD-77685502|AgentServer|AsyncRedis-f8b888fc-8b03-4807-adfd-c93710c11c85] Acquiring connection completed successfully.
|
||||
2026-02-21 20:31:31,115 [34mINFO[0m Created consumer group 'chat_consumers' on stream 'chat:completions'
|
||||
2026-02-21 20:31:31,115 [34mINFO[0m Chat completion consumer started (consumer: consumer-2f92959a)
|
||||
2026-02-21 20:31:31,116 [34mINFO[0m Application startup complete.
|
||||
2026-02-21 20:31:31,117 [34mINFO[0m Uvicorn running on http://0.0.0.0:8006 (Press CTRL+C to quit)
|
||||
2026-02-21 20:31:45,616 [34mINFO[0m 127.0.0.1:56174 - "GET /api/health HTTP/1.1" 404
|
||||
2026-02-21 20:32:07,632 [34mINFO[0m 127.0.0.1:56317 - "GET /openapi.json HTTP/1.1" 200
|
||||
2026-02-21 20:32:07,635 [33mWARNING[0m [33mExecuting <Task finished name='Task-7' coro=<RequestResponseCycle.run_asgi() done, defined at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:414> result=None created at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:295> took 0.346 seconds[0m
|
||||
2026-02-21 20:32:41,502 [34mINFO[0m 127.0.0.1:56681 - "POST /api/v2/chat/sessions HTTP/1.1" 404
|
||||
2026-02-21 20:32:50,005 [34mINFO[0m 127.0.0.1:56736 - "GET /api/docs HTTP/1.1" 404
|
||||
2026-02-21 20:33:10,267 [34mINFO[0m 127.0.0.1:56898 - "GET /openapi.json HTTP/1.1" 200
|
||||
2026-02-21 20:33:28,399 [34mINFO[0m 127.0.0.1:56993 - "POST /api/chat/sessions HTTP/1.1" 401
|
||||
2026-02-21 20:34:20,913 [34mINFO[0m 127.0.0.1:57313 - "GET /openapi.json HTTP/1.1" 200
|
||||
2026-02-21 20:36:26,326 [34mINFO[0m Running job "report_late_executions (trigger: interval[0:05:00], next run at: 2026-02-21 13:36:26 UTC)" (scheduled at 2026-02-21 13:36:26.255260+00:00)
|
||||
2026-02-21 20:36:26,333 [34mINFO[0m [PID-6090|THREAD-77695300|Scheduler|AppService client-24942e64-d380-4d36-a245-5c41172e5293] Creating service client started...
|
||||
2026-02-21 20:36:26,336 [34mINFO[0m [PID-6090|THREAD-77695300|Scheduler|AppService client-24942e64-d380-4d36-a245-5c41172e5293] Creating service client completed successfully.
|
||||
2026-02-21 20:36:26,336 [34mINFO[0m [PID-6090|THREAD-77695300|Scheduler|AppService client-24942e64-d380-4d36-a245-5c41172e5293] Creating service client started...
|
||||
2026-02-21 20:36:26,340 [34mINFO[0m [PID-6090|THREAD-77695300|Scheduler|AppService client-24942e64-d380-4d36-a245-5c41172e5293] Creating service client completed successfully.
|
||||
2026-02-21 20:36:26,439 [33mWARNING[0m [33mService communication: Retry attempt 1 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:36:27,802 [33mWARNING[0m [33mService communication: Retry attempt 2 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:36:30,362 [33mWARNING[0m [33mService communication: Retry attempt 3 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:36:34,885 [33mWARNING[0m [33mService communication: Retry attempt 4 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:36:43,438 [33mWARNING[0m [33mService communication: Retry attempt 5 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:36:59,905 [33mWARNING[0m [33mService communication: Retry attempt 6 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:12,581 [33mWARNING[0m [33mExecuting <Task pending name='Task-13' coro=<RequestResponseCycle.run_asgi() running at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:416> cb=[set.discard()] created at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:295> took 0.109 seconds[0m
|
||||
2026-02-21 20:37:12,767 [34mINFO[0m 127.0.0.1:58472 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:12,886 [34mINFO[0m 127.0.0.1:58469 - "GET /api/chat/sessions?limit=50 HTTP/1.1" 200
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/expressions/parser.py:72: PyparsingDeprecationWarning: 'enablePackrat' deprecated - use 'enable_packrat'
|
||||
ParserElement.enablePackrat()
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/expressions/parser.py:85: PyparsingDeprecationWarning: 'escChar' argument is deprecated, use 'esc_char'
|
||||
quoted_identifier = QuotedString('"', escChar="\\", unquoteResults=True)
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/expressions/parser.py:85: PyparsingDeprecationWarning: 'unquoteResults' argument is deprecated, use 'unquote_results'
|
||||
quoted_identifier = QuotedString('"', escChar="\\", unquoteResults=True)
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:365: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:494: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:498: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:502: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:506: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:538: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:542: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:546: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
/Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/pyiceberg/table/metadata.py:550: PydanticDeprecatedSince212: Using `@model_validator` with mode='after' on a classmethod is deprecated. Instead, use an instance method. See the documentation at https://docs.pydantic.dev/2.12/concepts/validators/#model-after-validator. Deprecated in Pydantic V2.12 to be removed in V3.0.
|
||||
@model_validator(mode="after")
|
||||
2026-02-21 20:37:14,074 [34mINFO[0m 127.0.0.1:58470 - "GET /api/executions HTTP/1.1" 200
|
||||
2026-02-21 20:37:14,081 [33mWARNING[0m [33mExecuting <Task finished name='Task-14' coro=<RequestResponseCycle.run_asgi() done, defined at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:414> result=None created at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:295> took 1.169 seconds[0m
|
||||
2026-02-21 20:37:15,102 [33mWARNING[0m [33mExecuting <Task pending name='Task-1' coro=<Server.serve() running at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/server.py:71> wait_for=<Future pending cb=[Task.task_wakeup()] created at /opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/tasks.py:713> cb=[run_until_complete.<locals>.done_cb()] created at /opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:100> took 0.224 seconds[0m
|
||||
2026-02-21 20:37:17,085 [34mINFO[0m 127.0.0.1:58530 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:20,772 [33mWARNING[0m [33mExecuting <Task pending name='Task-1' coro=<Server.serve() running at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/server.py:71> wait_for=<Future pending cb=[Task.task_wakeup()] created at /opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/tasks.py:713> cb=[run_until_complete.<locals>.done_cb()] created at /opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/asyncio/runners.py:100> took 0.261 seconds[0m
|
||||
2026-02-21 20:37:21,276 [34mINFO[0m 127.0.0.1:58568 - "GET /api/integrations/providers/system HTTP/1.1" 200
|
||||
2026-02-21 20:37:21,309 [33mWARNING[0m [33mExecuting <Task finished name='Task-23' coro=<RequestResponseCycle.run_asgi() done, defined at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:414> result=None created at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:295> took 0.158 seconds[0m
|
||||
2026-02-21 20:37:21,329 [34mINFO[0m 127.0.0.1:58570 - "GET /api/integrations/providers HTTP/1.1" 200
|
||||
2026-02-21 20:37:21,421 [33mWARNING[0m [33mExecuting <Task finished name='Task-24' coro=<RequestResponseCycle.run_asgi() done, defined at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:414> result=None created at /Users/majdyz/Code/AutoGPT/autogpt_platform/backend/.venv/lib/python3.13/site-packages/uvicorn/protocols/http/httptools_impl.py:295> took 0.110 seconds[0m
|
||||
2026-02-21 20:37:22,406 [34mINFO[0m 127.0.0.1:58590 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:22,430 [34mINFO[0m 127.0.0.1:58588 - "GET /api/onboarding HTTP/1.1" 200
|
||||
2026-02-21 20:37:22,453 [34mINFO[0m 127.0.0.1:58570 - "GET /api/executions HTTP/1.1" 200
|
||||
2026-02-21 20:37:22,476 [34mINFO[0m Loaded session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from DB: has_messages=True, message_count=11, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool']
|
||||
2026-02-21 20:37:22,485 [34mINFO[0m Cached session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from database
|
||||
2026-02-21 20:37:22,510 [34mINFO[0m 127.0.0.1:58568 - "GET /api/library/agents?page=1&page_size=100 HTTP/1.1" 200
|
||||
2026-02-21 20:37:22,515 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=False, msg_count=11, last_role=tool
|
||||
2026-02-21 20:37:22,524 [34mINFO[0m 127.0.0.1:58599 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:37:22,535 [34mINFO[0m 127.0.0.1:58607 - "GET /api/chat/sessions?limit=50 HTTP/1.1" 200
|
||||
2026-02-21 20:37:22,608 [34mINFO[0m 127.0.0.1:58568 - "GET /api/integrations/credentials HTTP/1.1" 200
|
||||
2026-02-21 20:37:23,531 [34mINFO[0m 127.0.0.1:58568 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:25,612 [34mINFO[0m 127.0.0.1:58568 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:29,708 [34mINFO[0m 127.0.0.1:58671 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:29,975 [33mWARNING[0m [33mService communication: Retry attempt 7 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:34,125 [34mINFO[0m [TIMING] stream_chat_post STARTED, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, user=68383665-d3d9-41f3-b10c-fca0dc6080ed, message_len=36
|
||||
2026-02-21 20:37:34,134 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=11, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool']
|
||||
2026-02-21 20:37:34,135 [34mINFO[0m [TIMING] session validated in 10.6ms
|
||||
2026-02-21 20:37:34,136 [34mINFO[0m [STREAM] Saving user message to session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f
|
||||
2026-02-21 20:37:34,138 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=11, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool']
|
||||
2026-02-21 20:37:34,168 [34mINFO[0m Saving 1 new messages to DB for session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f: roles=['user'], start_sequence=11
|
||||
2026-02-21 20:37:34,201 [34mINFO[0m [STREAM] User message saved for session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f
|
||||
2026-02-21 20:37:34,202 [34mINFO[0m [TIMING] create_task STARTED, task=bba63941-8048-4f39-9329-8568e5ebe9cd, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, user=68383665-d3d9-41f3-b10c-fca0dc6080ed
|
||||
2026-02-21 20:37:34,202 [34mINFO[0m [TIMING] get_redis_async took 0.0ms
|
||||
2026-02-21 20:37:34,205 [34mINFO[0m [TIMING] redis.hset took 2.9ms
|
||||
2026-02-21 20:37:34,208 [34mINFO[0m [TIMING] create_task COMPLETED in 6.1ms; task=bba63941-8048-4f39-9329-8568e5ebe9cd, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f
|
||||
2026-02-21 20:37:34,208 [34mINFO[0m [TIMING] create_task completed in 6.8ms
|
||||
2026-02-21 20:37:34,210 [34mINFO[0m [PID-6093|THREAD-77685502|AgentServer|AsyncRabbitMQ-bbe1cabd-35fe-4944-89d1-fddd09c93923] Acquiring async connection started...
|
||||
2026-02-21 20:37:34,296 [34mINFO[0m [PID-6093|THREAD-77685502|AgentServer|AsyncRabbitMQ-bbe1cabd-35fe-4944-89d1-fddd09c93923] Acquiring async connection completed successfully.
|
||||
2026-02-21 20:37:34,305 [34mINFO[0m [TIMING] Task enqueued to RabbitMQ, setup=180.6ms
|
||||
2026-02-21 20:37:34,307 [34mINFO[0m [TIMING] event_generator STARTED, task=bba63941-8048-4f39-9329-8568e5ebe9cd, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, user=68383665-d3d9-41f3-b10c-fca0dc6080ed
|
||||
2026-02-21 20:37:34,307 [34mINFO[0m [TIMING] subscribe_to_task STARTED, task=bba63941-8048-4f39-9329-8568e5ebe9cd, user=68383665-d3d9-41f3-b10c-fca0dc6080ed, last_msg=0-0
|
||||
2026-02-21 20:37:34,309 [34mINFO[0m [TIMING] Redis hgetall took 2.1ms
|
||||
2026-02-21 20:37:34,353 [34mINFO[0m [PID-6048|THREAD-77685506|CoPilotExecutor|Redis-943506d1-86e7-48a7-871b-9977fb0ace47] Acquiring connection started...
|
||||
2026-02-21 20:37:34,435 [34mINFO[0m [PID-6048|THREAD-77685506|CoPilotExecutor|Redis-943506d1-86e7-48a7-871b-9977fb0ace47] Acquiring connection completed successfully.
|
||||
2026-02-21 20:37:34,442 [34mINFO[0m [CoPilotExecutor] Acquired cluster lock for bba63941-8048-4f39-9329-8568e5ebe9cd, executor_id=fb7d76b3-8dc3-40a4-947e-a93bfad207da
|
||||
2026-02-21 20:37:34,535 [34mINFO[0m [CoPilotExecutor] [CoPilotExecutor] Worker 13455405056 started
|
||||
2026-02-21 20:37:34,536 [34mINFO[0m [CoPilotExecutor|task_id:bba63941-8048-4f39-9329-8568e5ebe9cd|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Starting execution
|
||||
2026-02-21 20:37:35,596 [34mINFO[0m [CoPilotExecutor|task_id:bba63941-8048-4f39-9329-8568e5ebe9cd|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Using SDK service
|
||||
2026-02-21 20:37:35,596 [34mINFO[0m [PID-6048|THREAD-77697399|CoPilotExecutor|AsyncRedis-2e10c980-0364-4c4b-9b2d-8186f23b1735] Acquiring connection started...
|
||||
2026-02-21 20:37:35,600 [34mINFO[0m [PID-6048|THREAD-77697399|CoPilotExecutor|AsyncRedis-2e10c980-0364-4c4b-9b2d-8186f23b1735] Acquiring connection completed successfully.
|
||||
2026-02-21 20:37:35,601 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:35,601 [34mINFO[0m [PID-6048|THREAD-77697399|CoPilotExecutor|AppService client-34797c8f-0201-4f99-bf73-3f3fb4697e6d] Creating service client started...
|
||||
2026-02-21 20:37:35,601 [34mINFO[0m [PID-6048|THREAD-77697399|CoPilotExecutor|AppService client-34797c8f-0201-4f99-bf73-3f3fb4697e6d] Creating service client completed successfully.
|
||||
2026-02-21 20:37:35,657 [33mWARNING[0m [33mService communication: Retry attempt 1 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:36,713 [33mWARNING[0m [33mService communication: Retry attempt 2 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:39,646 [33mWARNING[0m [33mService communication: Retry attempt 3 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:43,415 [34mINFO[0m 127.0.0.1:58782 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:44,423 [33mWARNING[0m [33mService communication: Retry attempt 4 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:44,486 [34mINFO[0m 127.0.0.1:58782 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:45,048 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:45,053 [34mINFO[0m [TASK_LOOKUP] Found running task bba63941... for session 322af5c3...
|
||||
2026-02-21 20:37:45,063 [34mINFO[0m [CoPilotExecutor] Received cancel for bba63941-8048-4f39-9329-8568e5ebe9cd
|
||||
2026-02-21 20:37:45,064 [34mINFO[0m [CANCEL] Published cancel for task ...e5ebe9cd session ...f0aa0c9f
|
||||
2026-02-21 20:37:45,113 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:45,120 [34mINFO[0m [TASK_LOOKUP] Found running task bba63941... for session 322af5c3...
|
||||
2026-02-21 20:37:45,121 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=True, msg_count=12, last_role=user
|
||||
2026-02-21 20:37:45,123 [34mINFO[0m 127.0.0.1:58802 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:37:45,306 [34mINFO[0m [TASK_LOOKUP] Found running task bba63941... for session 322af5c3...
|
||||
2026-02-21 20:37:45,307 [34mINFO[0m [TIMING] subscribe_to_task STARTED, task=bba63941-8048-4f39-9329-8568e5ebe9cd, user=68383665-d3d9-41f3-b10c-fca0dc6080ed, last_msg=0-0
|
||||
2026-02-21 20:37:45,309 [34mINFO[0m [TIMING] Redis hgetall took 1.5ms
|
||||
2026-02-21 20:37:45,604 [34mINFO[0m [CoPilotExecutor|task_id:bba63941-8048-4f39-9329-8568e5ebe9cd|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Cancellation requested
|
||||
2026-02-21 20:37:45,604 [34mINFO[0m [CoPilotExecutor|task_id:bba63941-8048-4f39-9329-8568e5ebe9cd|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Execution completed in 11.07s
|
||||
2026-02-21 20:37:45,604 [34mINFO[0m [CoPilotExecutor] Run completed for bba63941-8048-4f39-9329-8568e5ebe9cd
|
||||
2026-02-21 20:37:45,604 [34mINFO[0m [CoPilotExecutor|task_id:bba63941-8048-4f39-9329-8568e5ebe9cd|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Task cancelled
|
||||
2026-02-21 20:37:45,605 [34mINFO[0m [CoPilotExecutor] Releasing cluster lock for bba63941-8048-4f39-9329-8568e5ebe9cd
|
||||
2026-02-21 20:37:45,609 [34mINFO[0m [CoPilotExecutor] Cleaned up completed task bba63941-8048-4f39-9329-8568e5ebe9cd
|
||||
2026-02-21 20:37:45,610 [34mINFO[0m [TIMING] Redis xread (replay) took 301.1ms, status=running
|
||||
2026-02-21 20:37:45,610 [34mINFO[0m [TIMING] publish_chunk StreamFinish in 1.8ms (xadd=1.3ms)
|
||||
2026-02-21 20:37:45,612 [34mINFO[0m [TIMING] Replayed 1 messages, last_id=1771681065606-0
|
||||
2026-02-21 20:37:45,612 [34mINFO[0m [TIMING] Task still running, starting _stream_listener
|
||||
2026-02-21 20:37:45,613 [34mINFO[0m [TIMING] subscribe_to_task COMPLETED in 305.8ms; task=bba63941-8048-4f39-9329-8568e5ebe9cd, n_messages_replayed=1
|
||||
2026-02-21 20:37:45,614 [34mINFO[0m [TIMING] _stream_listener STARTED, task=bba63941-8048-4f39-9329-8568e5ebe9cd, last_id=1771681065606-0
|
||||
2026-02-21 20:37:45,614 [34mINFO[0m Resume stream chunk
|
||||
2026-02-21 20:37:45,615 [34mINFO[0m 127.0.0.1:58802 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/stream HTTP/1.1" 200
|
||||
2026-02-21 20:37:45,615 [34mINFO[0m [TIMING] Redis xread (replay) took 11305.8ms, status=running
|
||||
2026-02-21 20:37:45,616 [34mINFO[0m [TIMING] Replayed 1 messages, last_id=1771681065606-0
|
||||
2026-02-21 20:37:45,616 [34mINFO[0m [TIMING] Task still running, starting _stream_listener
|
||||
2026-02-21 20:37:45,616 [34mINFO[0m [TIMING] subscribe_to_task COMPLETED in 11308.9ms; task=bba63941-8048-4f39-9329-8568e5ebe9cd, n_messages_replayed=1
|
||||
2026-02-21 20:37:45,616 [34mINFO[0m [TIMING] Starting to read from subscriber_queue
|
||||
2026-02-21 20:37:45,616 [34mINFO[0m [TIMING] FIRST CHUNK from queue at 11.31s, type=StreamFinish
|
||||
2026-02-21 20:37:45,616 [34mINFO[0m 127.0.0.1:58710 - "POST /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/stream HTTP/1.1" 200
|
||||
2026-02-21 20:37:45,617 [34mINFO[0m [TIMING] StreamFinish received in 11.31s; n_chunks=1
|
||||
2026-02-21 20:37:45,617 [34mINFO[0m [TIMING] _stream_listener CANCELLED after 3.5ms, delivered=0
|
||||
2026-02-21 20:37:45,617 [34mINFO[0m [TIMING] _stream_listener FINISHED in 0.0s; task=bba63941-8048-4f39-9329-8568e5ebe9cd, delivered=0, xread_count=1
|
||||
2026-02-21 20:37:45,618 [34mINFO[0m Resume stream completed
|
||||
2026-02-21 20:37:45,618 [34mINFO[0m [TIMING] event_generator FINISHED in 11.31s; task=bba63941-8048-4f39-9329-8568e5ebe9cd, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, n_chunks=1
|
||||
2026-02-21 20:37:45,691 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:45,694 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=False, msg_count=12, last_role=user
|
||||
2026-02-21 20:37:45,695 [34mINFO[0m 127.0.0.1:58710 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:37:45,710 [34mINFO[0m 127.0.0.1:58802 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/stream HTTP/1.1" 204
|
||||
2026-02-21 20:37:45,771 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:45,775 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=False, msg_count=12, last_role=user
|
||||
2026-02-21 20:37:45,775 [34mINFO[0m 127.0.0.1:58710 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:37:46,075 [34mINFO[0m [CANCEL] Task ...e5ebe9cd confirmed stopped (status=failed) after 1.0s
|
||||
2026-02-21 20:37:46,076 [34mINFO[0m 127.0.0.1:58782 - "POST /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/cancel HTTP/1.1" 200
|
||||
2026-02-21 20:37:46,573 [34mINFO[0m 127.0.0.1:58710 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:50,090 [34mINFO[0m 127.0.0.1:58710 - "GET /api/integrations/providers/system HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,103 [34mINFO[0m 127.0.0.1:58842 - "GET /api/integrations/providers HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,681 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:50,686 [34mINFO[0m 127.0.0.1:58710 - "GET /api/library/agents?page=1&page_size=100 HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,692 [34mINFO[0m 127.0.0.1:58850 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:50,702 [34mINFO[0m 127.0.0.1:58842 - "GET /api/integrations/credentials HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,710 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=False, msg_count=12, last_role=user
|
||||
2026-02-21 20:37:50,711 [34mINFO[0m 127.0.0.1:58862 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,714 [34mINFO[0m 127.0.0.1:58852 - "GET /api/onboarding HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,720 [34mINFO[0m 127.0.0.1:58854 - "GET /api/executions HTTP/1.1" 200
|
||||
2026-02-21 20:37:50,795 [34mINFO[0m 127.0.0.1:58710 - "GET /api/chat/sessions?limit=50 HTTP/1.1" 200
|
||||
2026-02-21 20:37:51,955 [34mINFO[0m 127.0.0.1:58710 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:54,064 [34mINFO[0m 127.0.0.1:58710 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:54,157 [34mINFO[0m [TIMING] stream_chat_post STARTED, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, user=68383665-d3d9-41f3-b10c-fca0dc6080ed, message_len=5
|
||||
2026-02-21 20:37:54,169 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:54,170 [34mINFO[0m [TIMING] session validated in 13.0ms
|
||||
2026-02-21 20:37:54,170 [34mINFO[0m [STREAM] Saving user message to session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f
|
||||
2026-02-21 20:37:54,172 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=12, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user']
|
||||
2026-02-21 20:37:54,212 [34mINFO[0m Saving 1 new messages to DB for session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f: roles=['user'], start_sequence=12
|
||||
2026-02-21 20:37:54,238 [34mINFO[0m [STREAM] User message saved for session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f
|
||||
2026-02-21 20:37:54,238 [34mINFO[0m [TIMING] create_task STARTED, task=6360d249-c803-47d3-8a08-d77275e4b2d8, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, user=68383665-d3d9-41f3-b10c-fca0dc6080ed
|
||||
2026-02-21 20:37:54,238 [34mINFO[0m [TIMING] get_redis_async took 0.0ms
|
||||
2026-02-21 20:37:54,242 [34mINFO[0m [TIMING] redis.hset took 3.1ms
|
||||
2026-02-21 20:37:54,250 [34mINFO[0m [TIMING] create_task COMPLETED in 11.6ms; task=6360d249-c803-47d3-8a08-d77275e4b2d8, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f
|
||||
2026-02-21 20:37:54,251 [34mINFO[0m [TIMING] create_task completed in 12.9ms
|
||||
2026-02-21 20:37:54,261 [34mINFO[0m [TIMING] Task enqueued to RabbitMQ, setup=103.8ms
|
||||
2026-02-21 20:37:54,262 [34mINFO[0m [TIMING] event_generator STARTED, task=6360d249-c803-47d3-8a08-d77275e4b2d8, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, user=68383665-d3d9-41f3-b10c-fca0dc6080ed
|
||||
2026-02-21 20:37:54,263 [34mINFO[0m [TIMING] subscribe_to_task STARTED, task=6360d249-c803-47d3-8a08-d77275e4b2d8, user=68383665-d3d9-41f3-b10c-fca0dc6080ed, last_msg=0-0
|
||||
2026-02-21 20:37:54,264 [34mINFO[0m [TIMING] Redis hgetall took 1.7ms
|
||||
2026-02-21 20:37:54,265 [34mINFO[0m [CoPilotExecutor] Acquired cluster lock for 6360d249-c803-47d3-8a08-d77275e4b2d8, executor_id=fb7d76b3-8dc3-40a4-947e-a93bfad207da
|
||||
2026-02-21 20:37:54,267 [34mINFO[0m [CoPilotExecutor|task_id:6360d249-c803-47d3-8a08-d77275e4b2d8|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Starting execution
|
||||
2026-02-21 20:37:54,286 [34mINFO[0m [CoPilotExecutor|task_id:6360d249-c803-47d3-8a08-d77275e4b2d8|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Using SDK service
|
||||
2026-02-21 20:37:54,290 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=13, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user', 'user']
|
||||
2026-02-21 20:37:54,357 [33mWARNING[0m [33mService communication: Retry attempt 1 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:56,312 [33mWARNING[0m [33mService communication: Retry attempt 2 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:37:58,224 [34mINFO[0m 127.0.0.1:58917 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:37:58,928 [33mWARNING[0m [33mService communication: Retry attempt 3 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:38:00,041 [33mWARNING[0m [33mService communication: Retry attempt 8 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:38:03,701 [33mWARNING[0m [33mService communication: Retry attempt 4 for '_call_method_async': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_chat_session_message_count'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:38:06,882 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=13, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user', 'user']
|
||||
2026-02-21 20:38:06,888 [34mINFO[0m [TASK_LOOKUP] Found running task 6360d249... for session 322af5c3...
|
||||
2026-02-21 20:38:06,898 [34mINFO[0m [CoPilotExecutor] Received cancel for 6360d249-c803-47d3-8a08-d77275e4b2d8
|
||||
2026-02-21 20:38:06,898 [34mINFO[0m [CANCEL] Published cancel for task ...75e4b2d8 session ...f0aa0c9f
|
||||
2026-02-21 20:38:06,919 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=13, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user', 'user']
|
||||
2026-02-21 20:38:06,925 [34mINFO[0m [TASK_LOOKUP] Found running task 6360d249... for session 322af5c3...
|
||||
2026-02-21 20:38:06,926 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=True, msg_count=13, last_role=user
|
||||
2026-02-21 20:38:06,927 [34mINFO[0m 127.0.0.1:58976 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:38:07,136 [34mINFO[0m [TASK_LOOKUP] Found running task 6360d249... for session 322af5c3...
|
||||
2026-02-21 20:38:07,138 [34mINFO[0m [TIMING] subscribe_to_task STARTED, task=6360d249-c803-47d3-8a08-d77275e4b2d8, user=68383665-d3d9-41f3-b10c-fca0dc6080ed, last_msg=0-0
|
||||
2026-02-21 20:38:07,140 [34mINFO[0m [TIMING] Redis hgetall took 1.3ms
|
||||
2026-02-21 20:38:07,359 [34mINFO[0m [CoPilotExecutor|task_id:6360d249-c803-47d3-8a08-d77275e4b2d8|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Cancellation requested
|
||||
2026-02-21 20:38:07,360 [34mINFO[0m [CoPilotExecutor|task_id:6360d249-c803-47d3-8a08-d77275e4b2d8|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Execution completed in 13.09s
|
||||
2026-02-21 20:38:07,360 [34mINFO[0m [CoPilotExecutor] Run completed for 6360d249-c803-47d3-8a08-d77275e4b2d8
|
||||
2026-02-21 20:38:07,360 [34mINFO[0m [CoPilotExecutor|task_id:6360d249-c803-47d3-8a08-d77275e4b2d8|session_id:322af5c3-70fc-4a06-9443-8c5df0aa0c9f|user_id:68383665-d3d9-41f3-b10c-fca0dc6080ed] Task cancelled
|
||||
2026-02-21 20:38:07,360 [34mINFO[0m [CoPilotExecutor] Releasing cluster lock for 6360d249-c803-47d3-8a08-d77275e4b2d8
|
||||
2026-02-21 20:38:07,362 [34mINFO[0m [CoPilotExecutor] Cleaned up completed task 6360d249-c803-47d3-8a08-d77275e4b2d8
|
||||
2026-02-21 20:38:07,364 [34mINFO[0m [TIMING] Redis xread (replay) took 224.1ms, status=running
|
||||
2026-02-21 20:38:07,364 [34mINFO[0m [TIMING] Replayed 1 messages, last_id=1771681087362-0
|
||||
2026-02-21 20:38:07,365 [34mINFO[0m [TIMING] Task still running, starting _stream_listener
|
||||
2026-02-21 20:38:07,365 [34mINFO[0m [TIMING] publish_chunk StreamFinish in 2.1ms (xadd=1.2ms)
|
||||
2026-02-21 20:38:07,365 [34mINFO[0m [TIMING] subscribe_to_task COMPLETED in 226.8ms; task=6360d249-c803-47d3-8a08-d77275e4b2d8, n_messages_replayed=1
|
||||
2026-02-21 20:38:07,366 [34mINFO[0m [TIMING] _stream_listener STARTED, task=6360d249-c803-47d3-8a08-d77275e4b2d8, last_id=1771681087362-0
|
||||
2026-02-21 20:38:07,366 [34mINFO[0m Resume stream chunk
|
||||
2026-02-21 20:38:07,366 [34mINFO[0m 127.0.0.1:58976 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/stream HTTP/1.1" 200
|
||||
2026-02-21 20:38:07,367 [34mINFO[0m [TIMING] Redis xread (replay) took 13101.9ms, status=running
|
||||
2026-02-21 20:38:07,367 [34mINFO[0m [TIMING] Replayed 1 messages, last_id=1771681087362-0
|
||||
2026-02-21 20:38:07,367 [34mINFO[0m [TIMING] Task still running, starting _stream_listener
|
||||
2026-02-21 20:38:07,367 [34mINFO[0m [TIMING] subscribe_to_task COMPLETED in 13104.6ms; task=6360d249-c803-47d3-8a08-d77275e4b2d8, n_messages_replayed=1
|
||||
2026-02-21 20:38:07,367 [34mINFO[0m [TIMING] Starting to read from subscriber_queue
|
||||
2026-02-21 20:38:07,368 [34mINFO[0m [TIMING] FIRST CHUNK from queue at 13.11s, type=StreamFinish
|
||||
2026-02-21 20:38:07,368 [34mINFO[0m 127.0.0.1:58710 - "POST /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/stream HTTP/1.1" 200
|
||||
2026-02-21 20:38:07,368 [34mINFO[0m [TIMING] StreamFinish received in 13.11s; n_chunks=1
|
||||
2026-02-21 20:38:07,368 [34mINFO[0m [TIMING] _stream_listener CANCELLED after 2.7ms, delivered=0
|
||||
2026-02-21 20:38:07,368 [34mINFO[0m [TIMING] _stream_listener FINISHED in 0.0s; task=6360d249-c803-47d3-8a08-d77275e4b2d8, delivered=0, xread_count=1
|
||||
2026-02-21 20:38:07,369 [34mINFO[0m Resume stream completed
|
||||
2026-02-21 20:38:07,369 [34mINFO[0m [TIMING] event_generator FINISHED in 13.11s; task=6360d249-c803-47d3-8a08-d77275e4b2d8, session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, n_chunks=1
|
||||
2026-02-21 20:38:07,408 [34mINFO[0m [CANCEL] Task ...75e4b2d8 confirmed stopped (status=failed) after 0.5s
|
||||
2026-02-21 20:38:07,409 [34mINFO[0m 127.0.0.1:58974 - "POST /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/cancel HTTP/1.1" 200
|
||||
2026-02-21 20:38:07,447 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=13, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user', 'user']
|
||||
2026-02-21 20:38:07,451 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=False, msg_count=13, last_role=user
|
||||
2026-02-21 20:38:07,451 [34mINFO[0m 127.0.0.1:58710 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:38:07,468 [34mINFO[0m 127.0.0.1:58710 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f/stream HTTP/1.1" 204
|
||||
2026-02-21 20:38:07,521 [34mINFO[0m Loading session 322af5c3-70fc-4a06-9443-8c5df0aa0c9f from cache: message_count=13, roles=['user', 'assistant', 'tool', 'assistant', 'tool', 'assistant', 'tool', 'tool', 'assistant', 'tool', 'tool', 'user', 'user']
|
||||
2026-02-21 20:38:07,527 [34mINFO[0m [GET_SESSION] session=322af5c3-70fc-4a06-9443-8c5df0aa0c9f, active_task=False, msg_count=13, last_role=user
|
||||
2026-02-21 20:38:07,528 [34mINFO[0m 127.0.0.1:58710 - "GET /api/chat/sessions/322af5c3-70fc-4a06-9443-8c5df0aa0c9f HTTP/1.1" 200
|
||||
2026-02-21 20:38:18,440 [34mINFO[0m 127.0.0.1:59077 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:38:19,553 [34mINFO[0m 127.0.0.1:59077 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:38:21,643 [34mINFO[0m 127.0.0.1:59077 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:38:30,090 [33mWARNING[0m [33mService communication: Retry attempt 9 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:39:00,123 [33mWARNING[0m [33mService communication: Retry attempt 10 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:39:13,881 [34mINFO[0m 127.0.0.1:59398 - "GET /api/chat/sessions?limit=50 HTTP/1.1" 200
|
||||
2026-02-21 20:39:30,173 [33mWARNING[0m [33mService communication: Retry attempt 11 for '_call_method_sync': HTTPServerError: HTTP 500: Server error '500 Internal Server Error' for url 'http://localhost:8005/get_graph_executions'
|
||||
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500[0m
|
||||
2026-02-21 20:39:35,355 [34mINFO[0m 127.0.0.1:59522 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:39:35,685 [34mINFO[0m 127.0.0.1:59526 - "GET /api/executions HTTP/1.1" 200
|
||||
2026-02-21 20:39:38,916 [34mINFO[0m 127.0.0.1:59522 - "GET /api/store/profile HTTP/1.1" 404
|
||||
2026-02-21 20:39:40,019 [34mINFO[0m 127.0.0.1:59522 - "GET /api/store/profile HTTP/1.1" 404
|
||||
@@ -190,5 +190,8 @@ ZEROBOUNCE_API_KEY=
|
||||
POSTHOG_API_KEY=
|
||||
POSTHOG_HOST=https://eu.i.posthog.com
|
||||
|
||||
# Tally Form Integration (pre-populate business understanding on signup)
|
||||
TALLY_API_KEY=
|
||||
|
||||
# Other Services
|
||||
AUTOMOD_API_KEY=
|
||||
|
||||
1
autogpt_platform/backend/.gitignore
vendored
1
autogpt_platform/backend/.gitignore
vendored
@@ -22,4 +22,3 @@ migrations/*/rollback*.sql
|
||||
|
||||
# Workspace files
|
||||
workspaces/
|
||||
sample.logs
|
||||
|
||||
@@ -88,20 +88,23 @@ async def require_auth(
|
||||
)
|
||||
|
||||
|
||||
def require_permission(permission: APIKeyPermission):
|
||||
def require_permission(*permissions: APIKeyPermission):
|
||||
"""
|
||||
Dependency function for checking specific permissions
|
||||
Dependency function for checking required permissions.
|
||||
All listed permissions must be present.
|
||||
(works with API keys and OAuth tokens)
|
||||
"""
|
||||
|
||||
async def check_permission(
|
||||
async def check_permissions(
|
||||
auth: APIAuthorizationInfo = Security(require_auth),
|
||||
) -> APIAuthorizationInfo:
|
||||
if permission not in auth.scopes:
|
||||
missing = [p for p in permissions if p not in auth.scopes]
|
||||
if missing:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"Missing required permission: {permission.value}",
|
||||
detail=f"Missing required permission(s): "
|
||||
f"{', '.join(p.value for p in missing)}",
|
||||
)
|
||||
return auth
|
||||
|
||||
return check_permission
|
||||
return check_permissions
|
||||
|
||||
@@ -18,6 +18,7 @@ from backend.data import user as user_db
|
||||
from backend.data.auth.base import APIAuthorizationInfo
|
||||
from backend.data.block import BlockInput, CompletedBlockOutput
|
||||
from backend.executor.utils import add_graph_execution
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import on_graph_activate
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from .integrations import integrations_router
|
||||
@@ -95,6 +96,43 @@ async def execute_graph_block(
|
||||
return output
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/graphs",
|
||||
tags=["graphs"],
|
||||
status_code=201,
|
||||
dependencies=[
|
||||
Security(
|
||||
require_permission(
|
||||
APIKeyPermission.WRITE_GRAPH, APIKeyPermission.WRITE_LIBRARY
|
||||
)
|
||||
)
|
||||
],
|
||||
)
|
||||
async def create_graph(
|
||||
graph: graph_db.Graph,
|
||||
auth: APIAuthorizationInfo = Security(
|
||||
require_permission(APIKeyPermission.WRITE_GRAPH, APIKeyPermission.WRITE_LIBRARY)
|
||||
),
|
||||
) -> graph_db.GraphModel:
|
||||
"""
|
||||
Create a new agent graph.
|
||||
|
||||
The graph will be validated and assigned a new ID.
|
||||
It is automatically added to the user's library.
|
||||
"""
|
||||
from backend.api.features.library import db as library_db
|
||||
|
||||
graph_model = graph_db.make_graph_model(graph, auth.user_id)
|
||||
graph_model.reassign_ids(user_id=auth.user_id, reassign_graph_id=True)
|
||||
graph_model.validate_graph(for_run=False)
|
||||
|
||||
await graph_db.create_graph(graph_model, user_id=auth.user_id)
|
||||
await library_db.create_library_agent(graph_model, auth.user_id)
|
||||
activated_graph = await on_graph_activate(graph_model, user_id=auth.user_id)
|
||||
|
||||
return activated_graph
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/graphs/{graph_id}/execute/{graph_version}",
|
||||
tags=["graphs"],
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Sequence
|
||||
from typing import Any, Sequence, get_args, get_origin
|
||||
|
||||
import prisma
|
||||
from prisma.enums import ContentType
|
||||
from prisma.models import mv_suggested_blocks
|
||||
|
||||
import backend.api.features.library.db as library_db
|
||||
import backend.api.features.library.model as library_model
|
||||
import backend.api.features.store.db as store_db
|
||||
import backend.api.features.store.model as store_model
|
||||
from backend.api.features.store.hybrid_search import unified_hybrid_search
|
||||
from backend.blocks import load_all_blocks
|
||||
from backend.blocks._base import (
|
||||
AnyBlockSchema,
|
||||
@@ -19,7 +21,6 @@ from backend.blocks._base import (
|
||||
BlockType,
|
||||
)
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.db import query_raw_with_schema
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.cache import cached
|
||||
from backend.util.models import Pagination
|
||||
@@ -42,6 +43,16 @@ MAX_LIBRARY_AGENT_RESULTS = 100
|
||||
MAX_MARKETPLACE_AGENT_RESULTS = 100
|
||||
MIN_SCORE_FOR_FILTERED_RESULTS = 10.0
|
||||
|
||||
# Boost blocks over marketplace agents in search results
|
||||
BLOCK_SCORE_BOOST = 50.0
|
||||
|
||||
# Block IDs to exclude from search results
|
||||
EXCLUDED_BLOCK_IDS = frozenset(
|
||||
{
|
||||
"e189baac-8c20-45a1-94a7-55177ea42565", # AgentExecutorBlock
|
||||
}
|
||||
)
|
||||
|
||||
SearchResultItem = BlockInfo | library_model.LibraryAgent | store_model.StoreAgent
|
||||
|
||||
|
||||
@@ -64,8 +75,8 @@ def get_block_categories(category_blocks: int = 3) -> list[BlockCategoryResponse
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
# Skip disabled and excluded blocks
|
||||
if block.disabled or block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
# Skip blocks that don't have categories (all should have at least one)
|
||||
if not block.categories:
|
||||
@@ -116,6 +127,9 @@ def get_blocks(
|
||||
# Skip disabled blocks
|
||||
if block.disabled:
|
||||
continue
|
||||
# Skip excluded blocks
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
# Skip blocks that don't match the category
|
||||
if category and category not in {c.name.lower() for c in block.categories}:
|
||||
continue
|
||||
@@ -255,14 +269,25 @@ async def _build_cached_search_results(
|
||||
"my_agents": 0,
|
||||
}
|
||||
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
normalized_query=normalized_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
# Use hybrid search when query is present, otherwise list all blocks
|
||||
if (include_blocks or include_integrations) and normalized_query:
|
||||
block_results, block_total, integration_total = await _hybrid_search_blocks(
|
||||
query=search_query,
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
elif include_blocks or include_integrations:
|
||||
# No query - list all blocks using in-memory approach
|
||||
block_results, block_total, integration_total = _collect_block_results(
|
||||
include_blocks=include_blocks,
|
||||
include_integrations=include_integrations,
|
||||
)
|
||||
scored_items.extend(block_results)
|
||||
total_items["blocks"] = block_total
|
||||
total_items["integrations"] = integration_total
|
||||
|
||||
if include_library_agents:
|
||||
library_response = await library_db.list_library_agents(
|
||||
@@ -307,10 +332,14 @@ async def _build_cached_search_results(
|
||||
|
||||
def _collect_block_results(
|
||||
*,
|
||||
normalized_query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
"""
|
||||
Collect all blocks for listing (no search query).
|
||||
|
||||
All blocks get BLOCK_SCORE_BOOST to prioritize them over marketplace agents.
|
||||
"""
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
@@ -323,6 +352,10 @@ def _collect_block_results(
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
# Skip excluded blocks
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
|
||||
block_info = block.get_info()
|
||||
credentials = list(block.input_schema.get_credentials_fields().values())
|
||||
is_integration = len(credentials) > 0
|
||||
@@ -332,10 +365,6 @@ def _collect_block_results(
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
score = _score_block(block, block_info, normalized_query)
|
||||
if not _should_include_item(score, normalized_query):
|
||||
continue
|
||||
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
@@ -346,8 +375,122 @@ def _collect_block_results(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=score,
|
||||
sort_key=_get_item_name(block_info),
|
||||
score=BLOCK_SCORE_BOOST,
|
||||
sort_key=block_info.name.lower(),
|
||||
)
|
||||
)
|
||||
|
||||
return results, block_count, integration_count
|
||||
|
||||
|
||||
async def _hybrid_search_blocks(
|
||||
*,
|
||||
query: str,
|
||||
include_blocks: bool,
|
||||
include_integrations: bool,
|
||||
) -> tuple[list[_ScoredItem], int, int]:
|
||||
"""
|
||||
Search blocks using hybrid search with builder-specific filtering.
|
||||
|
||||
Uses unified_hybrid_search for semantic + lexical search, then applies
|
||||
post-filtering for block/integration types and scoring adjustments.
|
||||
|
||||
Scoring:
|
||||
- Base: hybrid relevance score (0-1) scaled to 0-100, plus BLOCK_SCORE_BOOST
|
||||
to prioritize blocks over marketplace agents in combined results
|
||||
- +30 for exact name match, +15 for prefix name match
|
||||
- +20 if the block has an LlmModel field and the query matches an LLM model name
|
||||
|
||||
Args:
|
||||
query: The search query string
|
||||
include_blocks: Whether to include regular blocks
|
||||
include_integrations: Whether to include integration blocks
|
||||
|
||||
Returns:
|
||||
Tuple of (scored_items, block_count, integration_count)
|
||||
"""
|
||||
results: list[_ScoredItem] = []
|
||||
block_count = 0
|
||||
integration_count = 0
|
||||
|
||||
if not include_blocks and not include_integrations:
|
||||
return results, block_count, integration_count
|
||||
|
||||
normalized_query = query.strip().lower()
|
||||
|
||||
# Fetch more results to account for post-filtering
|
||||
search_results, _ = await unified_hybrid_search(
|
||||
query=query,
|
||||
content_types=[ContentType.BLOCK],
|
||||
page=1,
|
||||
page_size=150,
|
||||
min_score=0.10,
|
||||
)
|
||||
|
||||
# Load all blocks for getting BlockInfo
|
||||
all_blocks = load_all_blocks()
|
||||
|
||||
for result in search_results:
|
||||
block_id = result["content_id"]
|
||||
|
||||
# Skip excluded blocks
|
||||
if block_id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
|
||||
metadata = result.get("metadata", {})
|
||||
hybrid_score = result.get("relevance", 0.0)
|
||||
|
||||
# Get the actual block class
|
||||
if block_id not in all_blocks:
|
||||
continue
|
||||
|
||||
block_cls = all_blocks[block_id]
|
||||
block: AnyBlockSchema = block_cls()
|
||||
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
# Check block/integration filter using metadata
|
||||
is_integration = metadata.get("is_integration", False)
|
||||
|
||||
if is_integration and not include_integrations:
|
||||
continue
|
||||
if not is_integration and not include_blocks:
|
||||
continue
|
||||
|
||||
# Get block info
|
||||
block_info = block.get_info()
|
||||
|
||||
# Calculate final score: scale hybrid score and add builder-specific bonuses
|
||||
# Hybrid scores are 0-1, builder scores were 0-200+
|
||||
# Add BLOCK_SCORE_BOOST to prioritize blocks over marketplace agents
|
||||
final_score = hybrid_score * 100 + BLOCK_SCORE_BOOST
|
||||
|
||||
# Add LLM model match bonus
|
||||
has_llm_field = metadata.get("has_llm_model_field", False)
|
||||
if has_llm_field and _matches_llm_model(block.input_schema, normalized_query):
|
||||
final_score += 20
|
||||
|
||||
# Add exact/prefix match bonus for deterministic tie-breaking
|
||||
name = block_info.name.lower()
|
||||
if name == normalized_query:
|
||||
final_score += 30
|
||||
elif name.startswith(normalized_query):
|
||||
final_score += 15
|
||||
|
||||
# Track counts
|
||||
filter_type: FilterType = "integrations" if is_integration else "blocks"
|
||||
if is_integration:
|
||||
integration_count += 1
|
||||
else:
|
||||
block_count += 1
|
||||
|
||||
results.append(
|
||||
_ScoredItem(
|
||||
item=block_info,
|
||||
filter_type=filter_type,
|
||||
score=final_score,
|
||||
sort_key=name,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -472,6 +615,8 @@ async def _get_static_counts():
|
||||
block: AnyBlockSchema = block_type()
|
||||
if block.disabled:
|
||||
continue
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
|
||||
all_blocks += 1
|
||||
|
||||
@@ -498,47 +643,25 @@ async def _get_static_counts():
|
||||
}
|
||||
|
||||
|
||||
def _contains_type(annotation: Any, target: type) -> bool:
|
||||
"""Check if an annotation is or contains the target type (handles Optional/Union/Annotated)."""
|
||||
if annotation is target:
|
||||
return True
|
||||
origin = get_origin(annotation)
|
||||
if origin is None:
|
||||
return False
|
||||
return any(_contains_type(arg, target) for arg in get_args(annotation))
|
||||
|
||||
|
||||
def _matches_llm_model(schema_cls: type[BlockSchema], query: str) -> bool:
|
||||
for field in schema_cls.model_fields.values():
|
||||
if field.annotation == LlmModel:
|
||||
if _contains_type(field.annotation, LlmModel):
|
||||
# Check if query matches any value in llm_models
|
||||
if any(query in name for name in llm_models):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _score_block(
|
||||
block: AnyBlockSchema,
|
||||
block_info: BlockInfo,
|
||||
normalized_query: str,
|
||||
) -> float:
|
||||
if not normalized_query:
|
||||
return 0.0
|
||||
|
||||
name = block_info.name.lower()
|
||||
description = block_info.description.lower()
|
||||
score = _score_primary_fields(name, description, normalized_query)
|
||||
|
||||
category_text = " ".join(
|
||||
category.get("category", "").lower() for category in block_info.categories
|
||||
)
|
||||
score += _score_additional_field(category_text, normalized_query, 12, 6)
|
||||
|
||||
credentials_info = block.input_schema.get_credentials_fields_info().values()
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info
|
||||
for provider in info.provider
|
||||
]
|
||||
provider_text = " ".join(provider_names)
|
||||
score += _score_additional_field(provider_text, normalized_query, 15, 6)
|
||||
|
||||
if _matches_llm_model(block.input_schema, normalized_query):
|
||||
score += 20
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def _score_library_agent(
|
||||
agent: library_model.LibraryAgent,
|
||||
normalized_query: str,
|
||||
@@ -645,31 +768,20 @@ def _get_all_providers() -> dict[ProviderName, Provider]:
|
||||
return providers
|
||||
|
||||
|
||||
@cached(ttl_seconds=3600)
|
||||
@cached(ttl_seconds=3600, shared_cache=True)
|
||||
async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
suggested_blocks = []
|
||||
# Sum the number of executions for each block type
|
||||
# Prisma cannot group by nested relations, so we do a raw query
|
||||
# Calculate the cutoff timestamp
|
||||
timestamp_threshold = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
"""Return the most-executed blocks from the last 14 days.
|
||||
|
||||
results = await query_raw_with_schema(
|
||||
"""
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM {schema_prefix}"AgentNodeExecution" execution
|
||||
JOIN {schema_prefix}"AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= $1::timestamp
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
""",
|
||||
timestamp_threshold,
|
||||
)
|
||||
Queries the mv_suggested_blocks materialized view (refreshed hourly via pg_cron)
|
||||
and returns the top `count` blocks sorted by execution count, excluding
|
||||
Input/Output/Agent block types and blocks in EXCLUDED_BLOCK_IDS.
|
||||
"""
|
||||
results = await mv_suggested_blocks.prisma().find_many()
|
||||
|
||||
# Get the top blocks based on execution count
|
||||
# But ignore Input and Output blocks
|
||||
# But ignore Input, Output, Agent, and excluded blocks
|
||||
blocks: list[tuple[BlockInfo, int]] = []
|
||||
execution_counts = {row.block_id: row.execution_count for row in results}
|
||||
|
||||
for block_type in load_all_blocks().values():
|
||||
block: AnyBlockSchema = block_type()
|
||||
@@ -679,11 +791,9 @@ async def get_suggested_blocks(count: int = 5) -> list[BlockInfo]:
|
||||
BlockType.AGENT,
|
||||
):
|
||||
continue
|
||||
# Find the execution count for this block
|
||||
execution_count = next(
|
||||
(row["execution_count"] for row in results if row["block_id"] == block.id),
|
||||
0,
|
||||
)
|
||||
if block.id in EXCLUDED_BLOCK_IDS:
|
||||
continue
|
||||
execution_count = execution_counts.get(block.id, 0)
|
||||
blocks.append((block.get_info(), execution_count))
|
||||
# Sort blocks by execution count
|
||||
blocks.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
@@ -27,7 +27,6 @@ class SearchEntry(BaseModel):
|
||||
|
||||
# Suggestions
|
||||
class SuggestionsResponse(BaseModel):
|
||||
otto_suggestions: list[str]
|
||||
recent_searches: list[SearchEntry]
|
||||
providers: list[ProviderName]
|
||||
top_blocks: list[BlockInfo]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from typing import Annotated, Sequence
|
||||
from typing import Annotated, Sequence, cast, get_args
|
||||
|
||||
import fastapi
|
||||
from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
@@ -10,6 +10,8 @@ from backend.util.models import Pagination
|
||||
from . import db as builder_db
|
||||
from . import model as builder_model
|
||||
|
||||
VALID_FILTER_VALUES = get_args(builder_model.FilterType)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = fastapi.APIRouter(
|
||||
@@ -49,11 +51,6 @@ async def get_suggestions(
|
||||
Get all suggestions for the Blocks Menu.
|
||||
"""
|
||||
return builder_model.SuggestionsResponse(
|
||||
otto_suggestions=[
|
||||
"What blocks do I need to get started?",
|
||||
"Help me create a list",
|
||||
"Help me feed my data to Google Maps",
|
||||
],
|
||||
recent_searches=await builder_db.get_recent_searches(user_id),
|
||||
providers=[
|
||||
ProviderName.TWITTER,
|
||||
@@ -151,7 +148,7 @@ async def get_providers(
|
||||
async def search(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
search_query: Annotated[str | None, fastapi.Query()] = None,
|
||||
filter: Annotated[list[builder_model.FilterType] | None, fastapi.Query()] = None,
|
||||
filter: Annotated[str | None, fastapi.Query()] = None,
|
||||
search_id: Annotated[str | None, fastapi.Query()] = None,
|
||||
by_creator: Annotated[list[str] | None, fastapi.Query()] = None,
|
||||
page: Annotated[int, fastapi.Query()] = 1,
|
||||
@@ -160,9 +157,20 @@ async def search(
|
||||
"""
|
||||
Search for blocks (including integrations), marketplace agents, and user library agents.
|
||||
"""
|
||||
# If no filters are provided, then we will return all types
|
||||
if not filter:
|
||||
filter = [
|
||||
# Parse and validate filter parameter
|
||||
filters: list[builder_model.FilterType]
|
||||
if filter:
|
||||
filter_values = [f.strip() for f in filter.split(",")]
|
||||
invalid_filters = [f for f in filter_values if f not in VALID_FILTER_VALUES]
|
||||
if invalid_filters:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid filter value(s): {', '.join(invalid_filters)}. "
|
||||
f"Valid values are: {', '.join(VALID_FILTER_VALUES)}",
|
||||
)
|
||||
filters = cast(list[builder_model.FilterType], filter_values)
|
||||
else:
|
||||
filters = [
|
||||
"blocks",
|
||||
"integrations",
|
||||
"marketplace_agents",
|
||||
@@ -174,7 +182,7 @@ async def search(
|
||||
cached_results = await builder_db.get_sorted_search_results(
|
||||
user_id=user_id,
|
||||
search_query=search_query,
|
||||
filters=filter,
|
||||
filters=filters,
|
||||
by_creator=by_creator,
|
||||
)
|
||||
|
||||
@@ -196,7 +204,7 @@ async def search(
|
||||
user_id,
|
||||
builder_model.SearchEntry(
|
||||
search_query=search_query,
|
||||
filter=filter,
|
||||
filter=filters,
|
||||
by_creator=by_creator,
|
||||
search_id=search_id,
|
||||
),
|
||||
|
||||
@@ -2,23 +2,21 @@
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid as uuid_module
|
||||
import re
|
||||
from collections.abc import AsyncGenerator
|
||||
from typing import Annotated
|
||||
from uuid import uuid4
|
||||
|
||||
from autogpt_libs import auth
|
||||
from fastapi import APIRouter, Depends, Header, HTTPException, Query, Response, Security
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Response, Security
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
from prisma.models import UserWorkspaceFile
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from backend.copilot import service as chat_service
|
||||
from backend.copilot import stream_registry
|
||||
from backend.copilot.completion_handler import (
|
||||
process_operation_failure,
|
||||
process_operation_success,
|
||||
)
|
||||
from backend.copilot.config import ChatConfig
|
||||
from backend.copilot.executor.utils import enqueue_cancel_task, enqueue_copilot_task
|
||||
from backend.copilot.executor.utils import enqueue_cancel_task, enqueue_copilot_turn
|
||||
from backend.copilot.model import (
|
||||
ChatMessage,
|
||||
ChatSession,
|
||||
@@ -46,18 +44,19 @@ from backend.copilot.tools.models import (
|
||||
InputValidationErrorResponse,
|
||||
NeedLoginResponse,
|
||||
NoResultsResponse,
|
||||
OperationInProgressResponse,
|
||||
OperationPendingResponse,
|
||||
OperationStartedResponse,
|
||||
SetupRequirementsResponse,
|
||||
SuggestedGoalResponse,
|
||||
UnderstandingUpdatedResponse,
|
||||
)
|
||||
from backend.copilot.tracking import track_user_message
|
||||
from backend.data.workspace import get_or_create_workspace
|
||||
from backend.util.exceptions import NotFoundError
|
||||
|
||||
config = ChatConfig()
|
||||
|
||||
_UUID_RE = re.compile(
|
||||
r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", re.I
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -86,6 +85,9 @@ class StreamChatRequest(BaseModel):
|
||||
message: str
|
||||
is_user_message: bool = True
|
||||
context: dict[str, str] | None = None # {url: str, content: str}
|
||||
file_ids: list[str] | None = Field(
|
||||
default=None, max_length=20
|
||||
) # Workspace file IDs attached to this message
|
||||
|
||||
|
||||
class CreateSessionResponse(BaseModel):
|
||||
@@ -99,10 +101,8 @@ class CreateSessionResponse(BaseModel):
|
||||
class ActiveStreamInfo(BaseModel):
|
||||
"""Information about an active stream for reconnection."""
|
||||
|
||||
task_id: str
|
||||
turn_id: str
|
||||
last_message_id: str # Redis Stream message ID for resumption
|
||||
operation_id: str # Operation ID for completion tracking
|
||||
tool_name: str # Name of the tool being executed
|
||||
|
||||
|
||||
class SessionDetailResponse(BaseModel):
|
||||
@@ -132,22 +132,13 @@ class ListSessionsResponse(BaseModel):
|
||||
total: int
|
||||
|
||||
|
||||
class CancelTaskResponse(BaseModel):
|
||||
"""Response model for the cancel task endpoint."""
|
||||
class CancelSessionResponse(BaseModel):
|
||||
"""Response model for the cancel session endpoint."""
|
||||
|
||||
cancelled: bool
|
||||
task_id: str | None = None
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
class OperationCompleteRequest(BaseModel):
|
||||
"""Request model for external completion webhook."""
|
||||
|
||||
success: bool
|
||||
result: dict | str | None = None
|
||||
error: str | None = None
|
||||
|
||||
|
||||
# ========== Routes ==========
|
||||
|
||||
|
||||
@@ -270,7 +261,7 @@ async def get_session(
|
||||
Retrieve the details of a specific chat session.
|
||||
|
||||
Looks up a chat session by ID for the given user (if authenticated) and returns all session data including messages.
|
||||
If there's an active stream for this session, returns the task_id for reconnection.
|
||||
If there's an active stream for this session, returns active_stream info for reconnection.
|
||||
|
||||
Args:
|
||||
session_id: The unique identifier for the desired chat session.
|
||||
@@ -288,28 +279,21 @@ async def get_session(
|
||||
|
||||
# Check if there's an active stream for this session
|
||||
active_stream_info = None
|
||||
active_task, last_message_id = await stream_registry.get_active_task_for_session(
|
||||
active_session, last_message_id = await stream_registry.get_active_session(
|
||||
session_id, user_id
|
||||
)
|
||||
logger.info(
|
||||
f"[GET_SESSION] session={session_id}, active_task={active_task is not None}, "
|
||||
f"[GET_SESSION] session={session_id}, active_session={active_session is not None}, "
|
||||
f"msg_count={len(messages)}, last_role={messages[-1].get('role') if messages else 'none'}"
|
||||
)
|
||||
if active_task:
|
||||
# Filter out the in-progress assistant message from the session response.
|
||||
# The client will receive the complete assistant response through the SSE
|
||||
# stream replay instead, preventing duplicate content.
|
||||
if messages and messages[-1].get("role") == "assistant":
|
||||
messages = messages[:-1]
|
||||
|
||||
# Use "0-0" as last_message_id to replay the stream from the beginning.
|
||||
# Since we filtered out the cached assistant message, the client needs
|
||||
# the full stream to reconstruct the response.
|
||||
if active_session:
|
||||
# Keep the assistant message (including tool_calls) so the frontend can
|
||||
# render the correct tool UI (e.g. CreateAgent with mini game).
|
||||
# convertChatSessionToUiMessages handles isComplete=false by setting
|
||||
# tool parts without output to state "input-available".
|
||||
active_stream_info = ActiveStreamInfo(
|
||||
task_id=active_task.task_id,
|
||||
last_message_id="0-0",
|
||||
operation_id=active_task.operation_id,
|
||||
tool_name=active_task.tool_name,
|
||||
turn_id=active_session.turn_id,
|
||||
last_message_id=last_message_id,
|
||||
)
|
||||
|
||||
return SessionDetailResponse(
|
||||
@@ -329,7 +313,7 @@ async def get_session(
|
||||
async def cancel_session_task(
|
||||
session_id: str,
|
||||
user_id: Annotated[str | None, Depends(auth.get_user_id)],
|
||||
) -> CancelTaskResponse:
|
||||
) -> CancelSessionResponse:
|
||||
"""Cancel the active streaming task for a session.
|
||||
|
||||
Publishes a cancel event to the executor via RabbitMQ FANOUT, then
|
||||
@@ -338,39 +322,33 @@ async def cancel_session_task(
|
||||
"""
|
||||
await _validate_and_get_session(session_id, user_id)
|
||||
|
||||
active_task, _ = await stream_registry.get_active_task_for_session(
|
||||
session_id, user_id
|
||||
)
|
||||
if not active_task:
|
||||
return CancelTaskResponse(cancelled=False, reason="no_active_task")
|
||||
active_session, _ = await stream_registry.get_active_session(session_id, user_id)
|
||||
if not active_session:
|
||||
return CancelSessionResponse(cancelled=True, reason="no_active_session")
|
||||
|
||||
task_id = active_task.task_id
|
||||
await enqueue_cancel_task(task_id)
|
||||
logger.info(
|
||||
f"[CANCEL] Published cancel for task ...{task_id[-8:]} "
|
||||
f"session ...{session_id[-8:]}"
|
||||
)
|
||||
await enqueue_cancel_task(session_id)
|
||||
logger.info(f"[CANCEL] Published cancel for session ...{session_id[-8:]}")
|
||||
|
||||
# Poll until the executor confirms the task is no longer running.
|
||||
# Keep max_wait below typical reverse-proxy read timeouts.
|
||||
poll_interval = 0.5
|
||||
max_wait = 5.0
|
||||
waited = 0.0
|
||||
while waited < max_wait:
|
||||
await asyncio.sleep(poll_interval)
|
||||
waited += poll_interval
|
||||
task = await stream_registry.get_task(task_id)
|
||||
if task is None or task.status != "running":
|
||||
session_state = await stream_registry.get_session(session_id)
|
||||
if session_state is None or session_state.status != "running":
|
||||
logger.info(
|
||||
f"[CANCEL] Task ...{task_id[-8:]} confirmed stopped "
|
||||
f"(status={task.status if task else 'gone'}) after {waited:.1f}s"
|
||||
f"[CANCEL] Session ...{session_id[-8:]} confirmed stopped "
|
||||
f"(status={session_state.status if session_state else 'gone'}) after {waited:.1f}s"
|
||||
)
|
||||
return CancelTaskResponse(cancelled=True, task_id=task_id)
|
||||
return CancelSessionResponse(cancelled=True)
|
||||
|
||||
logger.warning(f"[CANCEL] Task ...{task_id[-8:]} not confirmed after {max_wait}s")
|
||||
return CancelTaskResponse(
|
||||
cancelled=True, task_id=task_id, reason="cancel_published_not_confirmed"
|
||||
logger.warning(
|
||||
f"[CANCEL] Session ...{session_id[-8:]} not confirmed after {max_wait}s, force-completing"
|
||||
)
|
||||
await stream_registry.mark_session_completed(session_id, error_message="Cancelled")
|
||||
return CancelSessionResponse(cancelled=True)
|
||||
|
||||
|
||||
@router.post(
|
||||
@@ -390,16 +368,15 @@ async def stream_chat_post(
|
||||
- Tool execution results
|
||||
|
||||
The AI generation runs in a background task that continues even if the client disconnects.
|
||||
All chunks are written to Redis for reconnection support. If the client disconnects,
|
||||
they can reconnect using GET /tasks/{task_id}/stream to resume from where they left off.
|
||||
All chunks are written to a per-turn Redis stream for reconnection support. If the client
|
||||
disconnects, they can reconnect using GET /sessions/{session_id}/stream to resume.
|
||||
|
||||
Args:
|
||||
session_id: The chat session identifier to associate with the streamed messages.
|
||||
request: Request body containing message, is_user_message, and optional context.
|
||||
user_id: Optional authenticated user ID.
|
||||
Returns:
|
||||
StreamingResponse: SSE-formatted response chunks. First chunk is a "start" event
|
||||
containing the task_id for reconnection.
|
||||
StreamingResponse: SSE-formatted response chunks.
|
||||
|
||||
"""
|
||||
import asyncio
|
||||
@@ -426,6 +403,38 @@ async def stream_chat_post(
|
||||
},
|
||||
)
|
||||
|
||||
# Enrich message with file metadata if file_ids are provided.
|
||||
# Also sanitise file_ids so only validated, workspace-scoped IDs are
|
||||
# forwarded downstream (e.g. to the executor via enqueue_copilot_turn).
|
||||
sanitized_file_ids: list[str] | None = None
|
||||
if request.file_ids and user_id:
|
||||
# Filter to valid UUIDs only to prevent DB abuse
|
||||
valid_ids = [fid for fid in request.file_ids if _UUID_RE.match(fid)]
|
||||
|
||||
if valid_ids:
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
# Batch query instead of N+1
|
||||
files = await UserWorkspaceFile.prisma().find_many(
|
||||
where={
|
||||
"id": {"in": valid_ids},
|
||||
"workspaceId": workspace.id,
|
||||
"isDeleted": False,
|
||||
}
|
||||
)
|
||||
# Only keep IDs that actually exist in the user's workspace
|
||||
sanitized_file_ids = [wf.id for wf in files] or None
|
||||
file_lines: list[str] = [
|
||||
f"- {wf.name} ({wf.mimeType}, {round(wf.sizeBytes / 1024, 1)} KB), file_id={wf.id}"
|
||||
for wf in files
|
||||
]
|
||||
if file_lines:
|
||||
files_block = (
|
||||
"\n\n[Attached files]\n"
|
||||
+ "\n".join(file_lines)
|
||||
+ "\nUse read_workspace_file with the file_id to access file contents."
|
||||
)
|
||||
request.message += files_block
|
||||
|
||||
# Atomically append user message to session BEFORE creating task to avoid
|
||||
# race condition where GET_SESSION sees task as "running" but message isn't
|
||||
# saved yet. append_and_save_message re-fetches inside a lock to prevent
|
||||
@@ -446,37 +455,38 @@ async def stream_chat_post(
|
||||
logger.info(f"[STREAM] User message saved for session {session_id}")
|
||||
|
||||
# Create a task in the stream registry for reconnection support
|
||||
task_id = str(uuid_module.uuid4())
|
||||
operation_id = str(uuid_module.uuid4())
|
||||
log_meta["task_id"] = task_id
|
||||
turn_id = str(uuid4())
|
||||
log_meta["turn_id"] = turn_id
|
||||
|
||||
task_create_start = time.perf_counter()
|
||||
await stream_registry.create_task(
|
||||
task_id=task_id,
|
||||
session_create_start = time.perf_counter()
|
||||
await stream_registry.create_session(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
tool_call_id="chat_stream", # Not a tool call, but needed for the model
|
||||
tool_call_id="chat_stream",
|
||||
tool_name="chat",
|
||||
operation_id=operation_id,
|
||||
turn_id=turn_id,
|
||||
)
|
||||
logger.info(
|
||||
f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start) * 1000:.1f}ms",
|
||||
f"[TIMING] create_session completed in {(time.perf_counter() - session_create_start) * 1000:.1f}ms",
|
||||
extra={
|
||||
"json_fields": {
|
||||
**log_meta,
|
||||
"duration_ms": (time.perf_counter() - task_create_start) * 1000,
|
||||
"duration_ms": (time.perf_counter() - session_create_start) * 1000,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
await enqueue_copilot_task(
|
||||
task_id=task_id,
|
||||
# Per-turn stream is always fresh (unique turn_id), subscribe from beginning
|
||||
subscribe_from_id = "0-0"
|
||||
|
||||
await enqueue_copilot_turn(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
operation_id=operation_id,
|
||||
message=request.message,
|
||||
turn_id=turn_id,
|
||||
is_user_message=request.is_user_message,
|
||||
context=request.context,
|
||||
file_ids=sanitized_file_ids,
|
||||
)
|
||||
|
||||
setup_time = (time.perf_counter() - stream_start_time) * 1000
|
||||
@@ -491,7 +501,7 @@ async def stream_chat_post(
|
||||
|
||||
event_gen_start = time_module.perf_counter()
|
||||
logger.info(
|
||||
f"[TIMING] event_generator STARTED, task={task_id}, session={session_id}, "
|
||||
f"[TIMING] event_generator STARTED, turn={turn_id}, session={session_id}, "
|
||||
f"user={user_id}",
|
||||
extra={"json_fields": log_meta},
|
||||
)
|
||||
@@ -499,11 +509,12 @@ async def stream_chat_post(
|
||||
first_chunk_yielded = False
|
||||
chunks_yielded = 0
|
||||
try:
|
||||
# Subscribe to the task stream (this replays existing messages + live updates)
|
||||
subscriber_queue = await stream_registry.subscribe_to_task(
|
||||
task_id=task_id,
|
||||
# Subscribe from the position we captured before enqueuing
|
||||
# This avoids replaying old messages while catching all new ones
|
||||
subscriber_queue = await stream_registry.subscribe_to_session(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
last_message_id="0-0", # Get all messages from the beginning
|
||||
last_message_id=subscribe_from_id,
|
||||
)
|
||||
|
||||
if subscriber_queue is None:
|
||||
@@ -518,7 +529,7 @@ async def stream_chat_post(
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0)
|
||||
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=10.0)
|
||||
chunks_yielded += 1
|
||||
|
||||
if not first_chunk_yielded:
|
||||
@@ -586,19 +597,19 @@ async def stream_chat_post(
|
||||
# Unsubscribe when client disconnects or stream ends
|
||||
if subscriber_queue is not None:
|
||||
try:
|
||||
await stream_registry.unsubscribe_from_task(
|
||||
task_id, subscriber_queue
|
||||
await stream_registry.unsubscribe_from_session(
|
||||
session_id, subscriber_queue
|
||||
)
|
||||
except Exception as unsub_err:
|
||||
logger.error(
|
||||
f"Error unsubscribing from task {task_id}: {unsub_err}",
|
||||
f"Error unsubscribing from session {session_id}: {unsub_err}",
|
||||
exc_info=True,
|
||||
)
|
||||
# AI SDK protocol termination - always yield even if unsubscribe fails
|
||||
total_time = time_module.perf_counter() - event_gen_start
|
||||
logger.info(
|
||||
f"[TIMING] event_generator FINISHED in {total_time:.2f}s; "
|
||||
f"task={task_id}, session={session_id}, n_chunks={chunks_yielded}",
|
||||
f"turn={turn_id}, session={session_id}, n_chunks={chunks_yielded}",
|
||||
extra={
|
||||
"json_fields": {
|
||||
**log_meta,
|
||||
@@ -645,17 +656,21 @@ async def resume_session_stream(
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
active_task, _last_id = await stream_registry.get_active_task_for_session(
|
||||
active_session, last_message_id = await stream_registry.get_active_session(
|
||||
session_id, user_id
|
||||
)
|
||||
|
||||
if not active_task:
|
||||
if not active_session:
|
||||
return Response(status_code=204)
|
||||
|
||||
subscriber_queue = await stream_registry.subscribe_to_task(
|
||||
task_id=active_task.task_id,
|
||||
# Always replay from the beginning ("0-0") on resume.
|
||||
# We can't use last_message_id because it's the latest ID in the backend
|
||||
# stream, not the latest the frontend received — the gap causes lost
|
||||
# messages. The frontend deduplicates replayed content.
|
||||
subscriber_queue = await stream_registry.subscribe_to_session(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
last_message_id="0-0", # Full replay so useChat rebuilds the message
|
||||
last_message_id="0-0",
|
||||
)
|
||||
|
||||
if subscriber_queue is None:
|
||||
@@ -667,7 +682,7 @@ async def resume_session_stream(
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0)
|
||||
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=10.0)
|
||||
if chunk_count < 3:
|
||||
logger.info(
|
||||
"Resume stream chunk",
|
||||
@@ -691,12 +706,12 @@ async def resume_session_stream(
|
||||
logger.error(f"Error in resume stream for session {session_id}: {e}")
|
||||
finally:
|
||||
try:
|
||||
await stream_registry.unsubscribe_from_task(
|
||||
active_task.task_id, subscriber_queue
|
||||
await stream_registry.unsubscribe_from_session(
|
||||
session_id, subscriber_queue
|
||||
)
|
||||
except Exception as unsub_err:
|
||||
logger.error(
|
||||
f"Error unsubscribing from task {active_task.task_id}: {unsub_err}",
|
||||
f"Error unsubscribing from session {active_session.session_id}: {unsub_err}",
|
||||
exc_info=True,
|
||||
)
|
||||
logger.info(
|
||||
@@ -747,229 +762,6 @@ async def session_assign_user(
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
# ========== Task Streaming (SSE Reconnection) ==========
|
||||
|
||||
|
||||
@router.get(
|
||||
"/tasks/{task_id}/stream",
|
||||
)
|
||||
async def stream_task(
|
||||
task_id: str,
|
||||
user_id: str | None = Depends(auth.get_user_id),
|
||||
last_message_id: str = Query(
|
||||
default="0-0",
|
||||
description="Last Redis Stream message ID received (e.g., '1706540123456-0'). Use '0-0' for full replay.",
|
||||
),
|
||||
):
|
||||
"""
|
||||
Reconnect to a long-running task's SSE stream.
|
||||
|
||||
When a long-running operation (like agent generation) starts, the client
|
||||
receives a task_id. If the connection drops, the client can reconnect
|
||||
using this endpoint to resume receiving updates.
|
||||
|
||||
Args:
|
||||
task_id: The task ID from the operation_started response.
|
||||
user_id: Authenticated user ID for ownership validation.
|
||||
last_message_id: Last Redis Stream message ID received ("0-0" for full replay).
|
||||
|
||||
Returns:
|
||||
StreamingResponse: SSE-formatted response chunks starting after last_message_id.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if task not found, 410 if task expired, 403 if access denied.
|
||||
"""
|
||||
# Check task existence and expiry before subscribing
|
||||
task, error_code = await stream_registry.get_task_with_expiry_info(task_id)
|
||||
|
||||
if error_code == "TASK_EXPIRED":
|
||||
raise HTTPException(
|
||||
status_code=410,
|
||||
detail={
|
||||
"code": "TASK_EXPIRED",
|
||||
"message": "This operation has expired. Please try again.",
|
||||
},
|
||||
)
|
||||
|
||||
if error_code == "TASK_NOT_FOUND":
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail={
|
||||
"code": "TASK_NOT_FOUND",
|
||||
"message": f"Task {task_id} not found.",
|
||||
},
|
||||
)
|
||||
|
||||
# Validate ownership if task has an owner
|
||||
if task and task.user_id and user_id != task.user_id:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail={
|
||||
"code": "ACCESS_DENIED",
|
||||
"message": "You do not have access to this task.",
|
||||
},
|
||||
)
|
||||
|
||||
# Get subscriber queue from stream registry
|
||||
subscriber_queue = await stream_registry.subscribe_to_task(
|
||||
task_id=task_id,
|
||||
user_id=user_id,
|
||||
last_message_id=last_message_id,
|
||||
)
|
||||
|
||||
if subscriber_queue is None:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail={
|
||||
"code": "TASK_NOT_FOUND",
|
||||
"message": f"Task {task_id} not found or access denied.",
|
||||
},
|
||||
)
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
heartbeat_interval = 15.0 # Send heartbeat every 15 seconds
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
# Wait for next chunk with timeout for heartbeats
|
||||
chunk = await asyncio.wait_for(
|
||||
subscriber_queue.get(), timeout=heartbeat_interval
|
||||
)
|
||||
yield chunk.to_sse()
|
||||
|
||||
# Check for finish signal
|
||||
if isinstance(chunk, StreamFinish):
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
# Send heartbeat to keep connection alive
|
||||
yield StreamHeartbeat().to_sse()
|
||||
except Exception as e:
|
||||
logger.error(f"Error in task stream {task_id}: {e}", exc_info=True)
|
||||
finally:
|
||||
# Unsubscribe when client disconnects or stream ends
|
||||
try:
|
||||
await stream_registry.unsubscribe_from_task(task_id, subscriber_queue)
|
||||
except Exception as unsub_err:
|
||||
logger.error(
|
||||
f"Error unsubscribing from task {task_id}: {unsub_err}",
|
||||
exc_info=True,
|
||||
)
|
||||
# AI SDK protocol termination - always yield even if unsubscribe fails
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
"x-vercel-ai-ui-message-stream": "v1",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/tasks/{task_id}",
|
||||
)
|
||||
async def get_task_status(
|
||||
task_id: str,
|
||||
user_id: str | None = Depends(auth.get_user_id),
|
||||
) -> dict:
|
||||
"""
|
||||
Get the status of a long-running task.
|
||||
|
||||
Args:
|
||||
task_id: The task ID to check.
|
||||
user_id: Authenticated user ID for ownership validation.
|
||||
|
||||
Returns:
|
||||
dict: Task status including task_id, status, tool_name, and operation_id.
|
||||
|
||||
Raises:
|
||||
NotFoundError: If task_id is not found or user doesn't have access.
|
||||
"""
|
||||
task = await stream_registry.get_task(task_id)
|
||||
|
||||
if task is None:
|
||||
raise NotFoundError(f"Task {task_id} not found.")
|
||||
|
||||
# Validate ownership - if task has an owner, requester must match
|
||||
if task.user_id and user_id != task.user_id:
|
||||
raise NotFoundError(f"Task {task_id} not found.")
|
||||
|
||||
return {
|
||||
"task_id": task.task_id,
|
||||
"session_id": task.session_id,
|
||||
"status": task.status,
|
||||
"tool_name": task.tool_name,
|
||||
"operation_id": task.operation_id,
|
||||
"created_at": task.created_at.isoformat(),
|
||||
}
|
||||
|
||||
|
||||
# ========== External Completion Webhook ==========
|
||||
|
||||
|
||||
@router.post(
|
||||
"/operations/{operation_id}/complete",
|
||||
status_code=200,
|
||||
)
|
||||
async def complete_operation(
|
||||
operation_id: str,
|
||||
request: OperationCompleteRequest,
|
||||
x_api_key: str | None = Header(default=None),
|
||||
) -> dict:
|
||||
"""
|
||||
External completion webhook for long-running operations.
|
||||
|
||||
Called by Agent Generator (or other services) when an operation completes.
|
||||
This triggers the stream registry to publish completion and continue LLM generation.
|
||||
|
||||
Args:
|
||||
operation_id: The operation ID to complete.
|
||||
request: Completion payload with success status and result/error.
|
||||
x_api_key: Internal API key for authentication.
|
||||
|
||||
Returns:
|
||||
dict: Status of the completion.
|
||||
|
||||
Raises:
|
||||
HTTPException: If API key is invalid or operation not found.
|
||||
"""
|
||||
# Validate internal API key - reject if not configured or invalid
|
||||
if not config.internal_api_key:
|
||||
logger.error(
|
||||
"Operation complete webhook rejected: CHAT_INTERNAL_API_KEY not configured"
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=503,
|
||||
detail="Webhook not available: internal API key not configured",
|
||||
)
|
||||
if x_api_key != config.internal_api_key:
|
||||
raise HTTPException(status_code=401, detail="Invalid API key")
|
||||
|
||||
# Find task by operation_id
|
||||
task = await stream_registry.find_task_by_operation_id(operation_id)
|
||||
if task is None:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Operation {operation_id} not found",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Received completion webhook for operation {operation_id} "
|
||||
f"(task_id={task.task_id}, success={request.success})"
|
||||
)
|
||||
|
||||
if request.success:
|
||||
await process_operation_success(task, request.result)
|
||||
else:
|
||||
await process_operation_failure(task, request.error)
|
||||
|
||||
return {"status": "ok", "task_id": task.task_id}
|
||||
|
||||
|
||||
# ========== Configuration ==========
|
||||
|
||||
|
||||
@@ -1050,9 +842,6 @@ ToolResponseUnion = (
|
||||
| BlockOutputResponse
|
||||
| DocSearchResultsResponse
|
||||
| DocPageResponse
|
||||
| OperationStartedResponse
|
||||
| OperationPendingResponse
|
||||
| OperationInProgressResponse
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,160 @@
|
||||
"""Tests for chat route file_ids validation and enrichment."""
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
|
||||
from backend.api.features.chat import routes as chat_routes
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(chat_routes.router)
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
TEST_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_auth(mock_jwt_user):
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
# ---- file_ids Pydantic validation (B1) ----
|
||||
|
||||
|
||||
def test_stream_chat_rejects_too_many_file_ids():
|
||||
"""More than 20 file_ids should be rejected by Pydantic validation (422)."""
|
||||
response = client.post(
|
||||
"/sessions/sess-1/stream",
|
||||
json={
|
||||
"message": "hello",
|
||||
"file_ids": [f"00000000-0000-0000-0000-{i:012d}" for i in range(21)],
|
||||
},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
def _mock_stream_internals(mocker: pytest_mock.MockFixture):
|
||||
"""Mock the async internals of stream_chat_post so tests can exercise
|
||||
validation and enrichment logic without needing Redis/RabbitMQ."""
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes._validate_and_get_session",
|
||||
return_value=None,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.append_and_save_message",
|
||||
return_value=None,
|
||||
)
|
||||
mock_registry = mocker.MagicMock()
|
||||
mock_registry.create_session = mocker.AsyncMock(return_value=None)
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.stream_registry",
|
||||
mock_registry,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.enqueue_copilot_turn",
|
||||
return_value=None,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.track_user_message",
|
||||
return_value=None,
|
||||
)
|
||||
|
||||
|
||||
def test_stream_chat_accepts_20_file_ids(mocker: pytest_mock.MockFixture):
|
||||
"""Exactly 20 file_ids should be accepted (not rejected by validation)."""
|
||||
_mock_stream_internals(mocker)
|
||||
# Patch workspace lookup as imported by the routes module
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.get_or_create_workspace",
|
||||
return_value=type("W", (), {"id": "ws-1"})(),
|
||||
)
|
||||
mock_prisma = mocker.MagicMock()
|
||||
mock_prisma.find_many = mocker.AsyncMock(return_value=[])
|
||||
mocker.patch(
|
||||
"prisma.models.UserWorkspaceFile.prisma",
|
||||
return_value=mock_prisma,
|
||||
)
|
||||
|
||||
response = client.post(
|
||||
"/sessions/sess-1/stream",
|
||||
json={
|
||||
"message": "hello",
|
||||
"file_ids": [f"00000000-0000-0000-0000-{i:012d}" for i in range(20)],
|
||||
},
|
||||
)
|
||||
# Should get past validation — 200 streaming response expected
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
# ---- UUID format filtering ----
|
||||
|
||||
|
||||
def test_file_ids_filters_invalid_uuids(mocker: pytest_mock.MockFixture):
|
||||
"""Non-UUID strings in file_ids should be silently filtered out
|
||||
and NOT passed to the database query."""
|
||||
_mock_stream_internals(mocker)
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.get_or_create_workspace",
|
||||
return_value=type("W", (), {"id": "ws-1"})(),
|
||||
)
|
||||
|
||||
mock_prisma = mocker.MagicMock()
|
||||
mock_prisma.find_many = mocker.AsyncMock(return_value=[])
|
||||
mocker.patch(
|
||||
"prisma.models.UserWorkspaceFile.prisma",
|
||||
return_value=mock_prisma,
|
||||
)
|
||||
|
||||
valid_id = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
|
||||
client.post(
|
||||
"/sessions/sess-1/stream",
|
||||
json={
|
||||
"message": "hello",
|
||||
"file_ids": [
|
||||
valid_id,
|
||||
"not-a-uuid",
|
||||
"../../../etc/passwd",
|
||||
"",
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
# The find_many call should only receive the one valid UUID
|
||||
mock_prisma.find_many.assert_called_once()
|
||||
call_kwargs = mock_prisma.find_many.call_args[1]
|
||||
assert call_kwargs["where"]["id"]["in"] == [valid_id]
|
||||
|
||||
|
||||
# ---- Cross-workspace file_ids ----
|
||||
|
||||
|
||||
def test_file_ids_scoped_to_workspace(mocker: pytest_mock.MockFixture):
|
||||
"""The batch query should scope to the user's workspace."""
|
||||
_mock_stream_internals(mocker)
|
||||
mocker.patch(
|
||||
"backend.api.features.chat.routes.get_or_create_workspace",
|
||||
return_value=type("W", (), {"id": "my-workspace-id"})(),
|
||||
)
|
||||
|
||||
mock_prisma = mocker.MagicMock()
|
||||
mock_prisma.find_many = mocker.AsyncMock(return_value=[])
|
||||
mocker.patch(
|
||||
"prisma.models.UserWorkspaceFile.prisma",
|
||||
return_value=mock_prisma,
|
||||
)
|
||||
|
||||
fid = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
|
||||
client.post(
|
||||
"/sessions/sess-1/stream",
|
||||
json={"message": "hi", "file_ids": [fid]},
|
||||
)
|
||||
|
||||
call_kwargs = mock_prisma.find_many.call_args[1]
|
||||
assert call_kwargs["where"]["workspaceId"] == "my-workspace-id"
|
||||
assert call_kwargs["where"]["isDeleted"] is False
|
||||
File diff suppressed because it is too large
Load Diff
@@ -144,6 +144,7 @@ async def test_add_agent_to_library(mocker):
|
||||
)
|
||||
|
||||
mock_library_agent = mocker.patch("prisma.models.LibraryAgent.prisma")
|
||||
mock_library_agent.return_value.find_first = mocker.AsyncMock(return_value=None)
|
||||
mock_library_agent.return_value.find_unique = mocker.AsyncMock(return_value=None)
|
||||
mock_library_agent.return_value.create = mocker.AsyncMock(
|
||||
return_value=mock_library_agent_data
|
||||
@@ -178,7 +179,6 @@ async def test_add_agent_to_library(mocker):
|
||||
"agentGraphVersion": 1,
|
||||
}
|
||||
},
|
||||
include={"AgentGraph": True},
|
||||
)
|
||||
# Check that create was called with the expected data including settings
|
||||
create_call_args = mock_library_agent.return_value.create.call_args
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
class FolderValidationError(Exception):
|
||||
"""Raised when folder operations fail validation."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FolderAlreadyExistsError(FolderValidationError):
|
||||
"""Raised when a folder with the same name already exists in the location."""
|
||||
|
||||
pass
|
||||
@@ -26,6 +26,95 @@ class LibraryAgentStatus(str, Enum):
|
||||
ERROR = "ERROR"
|
||||
|
||||
|
||||
# === Folder Models ===
|
||||
|
||||
|
||||
class LibraryFolder(pydantic.BaseModel):
|
||||
"""Represents a folder for organizing library agents."""
|
||||
|
||||
id: str
|
||||
user_id: str
|
||||
name: str
|
||||
icon: str | None = None
|
||||
color: str | None = None
|
||||
parent_id: str | None = None
|
||||
created_at: datetime.datetime
|
||||
updated_at: datetime.datetime
|
||||
agent_count: int = 0 # Direct agents in folder
|
||||
subfolder_count: int = 0 # Direct child folders
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
folder: prisma.models.LibraryFolder,
|
||||
agent_count: int = 0,
|
||||
subfolder_count: int = 0,
|
||||
) -> "LibraryFolder":
|
||||
"""Factory method that constructs a LibraryFolder from a Prisma model."""
|
||||
return LibraryFolder(
|
||||
id=folder.id,
|
||||
user_id=folder.userId,
|
||||
name=folder.name,
|
||||
icon=folder.icon,
|
||||
color=folder.color,
|
||||
parent_id=folder.parentId,
|
||||
created_at=folder.createdAt,
|
||||
updated_at=folder.updatedAt,
|
||||
agent_count=agent_count,
|
||||
subfolder_count=subfolder_count,
|
||||
)
|
||||
|
||||
|
||||
class LibraryFolderTree(LibraryFolder):
|
||||
"""Folder with nested children for tree view."""
|
||||
|
||||
children: list["LibraryFolderTree"] = []
|
||||
|
||||
|
||||
class FolderCreateRequest(pydantic.BaseModel):
|
||||
"""Request model for creating a folder."""
|
||||
|
||||
name: str = pydantic.Field(..., min_length=1, max_length=100)
|
||||
icon: str | None = None
|
||||
color: str | None = pydantic.Field(
|
||||
None, pattern=r"^#[0-9A-Fa-f]{6}$", description="Hex color code (#RRGGBB)"
|
||||
)
|
||||
parent_id: str | None = None
|
||||
|
||||
|
||||
class FolderUpdateRequest(pydantic.BaseModel):
|
||||
"""Request model for updating a folder."""
|
||||
|
||||
name: str | None = pydantic.Field(None, min_length=1, max_length=100)
|
||||
icon: str | None = None
|
||||
color: str | None = None
|
||||
|
||||
|
||||
class FolderMoveRequest(pydantic.BaseModel):
|
||||
"""Request model for moving a folder to a new parent."""
|
||||
|
||||
target_parent_id: str | None = None # None = move to root
|
||||
|
||||
|
||||
class BulkMoveAgentsRequest(pydantic.BaseModel):
|
||||
"""Request model for moving multiple agents to a folder."""
|
||||
|
||||
agent_ids: list[str]
|
||||
folder_id: str | None = None # None = move to root
|
||||
|
||||
|
||||
class FolderListResponse(pydantic.BaseModel):
|
||||
"""Response schema for a list of folders."""
|
||||
|
||||
folders: list[LibraryFolder]
|
||||
pagination: Pagination
|
||||
|
||||
|
||||
class FolderTreeResponse(pydantic.BaseModel):
|
||||
"""Response schema for folder tree structure."""
|
||||
|
||||
tree: list[LibraryFolderTree]
|
||||
|
||||
|
||||
class MarketplaceListingCreator(pydantic.BaseModel):
|
||||
"""Creator information for a marketplace listing."""
|
||||
|
||||
@@ -120,6 +209,9 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
can_access_graph: bool
|
||||
is_latest_version: bool
|
||||
is_favorite: bool
|
||||
folder_id: str | None = None
|
||||
folder_name: str | None = None # Denormalized for display
|
||||
|
||||
recommended_schedule_cron: str | None = None
|
||||
settings: GraphSettings = pydantic.Field(default_factory=GraphSettings)
|
||||
marketplace_listing: Optional["MarketplaceListing"] = None
|
||||
@@ -259,6 +351,8 @@ class LibraryAgent(pydantic.BaseModel):
|
||||
can_access_graph=can_access_graph,
|
||||
is_latest_version=is_latest_version,
|
||||
is_favorite=agent.isFavorite,
|
||||
folder_id=agent.folderId,
|
||||
folder_name=agent.Folder.name if agent.Folder else None,
|
||||
recommended_schedule_cron=agent.AgentGraph.recommendedScheduleCron,
|
||||
settings=_parse_settings(agent.settings),
|
||||
marketplace_listing=marketplace_listing_data,
|
||||
@@ -470,3 +564,7 @@ class LibraryAgentUpdateRequest(pydantic.BaseModel):
|
||||
settings: Optional[GraphSettings] = pydantic.Field(
|
||||
default=None, description="User-specific settings for this library agent"
|
||||
)
|
||||
folder_id: Optional[str] = pydantic.Field(
|
||||
default=None,
|
||||
description="Folder ID to move agent to (None to move to root)",
|
||||
)
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import fastapi
|
||||
|
||||
from .agents import router as agents_router
|
||||
from .folders import router as folders_router
|
||||
from .presets import router as presets_router
|
||||
|
||||
router = fastapi.APIRouter()
|
||||
|
||||
router.include_router(presets_router)
|
||||
router.include_router(folders_router)
|
||||
router.include_router(agents_router)
|
||||
|
||||
@@ -41,6 +41,14 @@ async def list_library_agents(
|
||||
ge=1,
|
||||
description="Number of agents per page (must be >= 1)",
|
||||
),
|
||||
folder_id: Optional[str] = Query(
|
||||
None,
|
||||
description="Filter by folder ID",
|
||||
),
|
||||
include_root_only: bool = Query(
|
||||
False,
|
||||
description="Only return agents without a folder (root-level agents)",
|
||||
),
|
||||
) -> library_model.LibraryAgentResponse:
|
||||
"""
|
||||
Get all agents in the user's library (both created and saved).
|
||||
@@ -51,6 +59,8 @@ async def list_library_agents(
|
||||
sort_by=sort_by,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
folder_id=folder_id,
|
||||
include_root_only=include_root_only,
|
||||
)
|
||||
|
||||
|
||||
@@ -168,6 +178,7 @@ async def update_library_agent(
|
||||
is_favorite=payload.is_favorite,
|
||||
is_archived=payload.is_archived,
|
||||
settings=payload.settings,
|
||||
folder_id=payload.folder_id,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,287 @@
|
||||
from typing import Optional
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
from fastapi import APIRouter, Query, Security, status
|
||||
from fastapi.responses import Response
|
||||
|
||||
from .. import db as library_db
|
||||
from .. import model as library_model
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/folders",
|
||||
tags=["library", "folders", "private"],
|
||||
dependencies=[Security(autogpt_auth_lib.requires_user)],
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"",
|
||||
summary="List Library Folders",
|
||||
response_model=library_model.FolderListResponse,
|
||||
responses={
|
||||
200: {"description": "List of folders"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def list_folders(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
parent_id: Optional[str] = Query(
|
||||
None,
|
||||
description="Filter by parent folder ID. If not provided, returns root-level folders.",
|
||||
),
|
||||
include_relations: bool = Query(
|
||||
True,
|
||||
description="Include agent and subfolder relations (for counts)",
|
||||
),
|
||||
) -> library_model.FolderListResponse:
|
||||
"""
|
||||
List folders for the authenticated user.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user.
|
||||
parent_id: Optional parent folder ID to filter by.
|
||||
include_relations: Whether to include agent and subfolder relations for counts.
|
||||
|
||||
Returns:
|
||||
A FolderListResponse containing folders.
|
||||
"""
|
||||
folders = await library_db.list_folders(
|
||||
user_id=user_id,
|
||||
parent_id=parent_id,
|
||||
include_relations=include_relations,
|
||||
)
|
||||
return library_model.FolderListResponse(
|
||||
folders=folders,
|
||||
pagination=library_model.Pagination(
|
||||
total_items=len(folders),
|
||||
total_pages=1,
|
||||
current_page=1,
|
||||
page_size=len(folders),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/tree",
|
||||
summary="Get Folder Tree",
|
||||
response_model=library_model.FolderTreeResponse,
|
||||
responses={
|
||||
200: {"description": "Folder tree structure"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def get_folder_tree(
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.FolderTreeResponse:
|
||||
"""
|
||||
Get the full folder tree for the authenticated user.
|
||||
|
||||
Args:
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
A FolderTreeResponse containing the nested folder structure.
|
||||
"""
|
||||
tree = await library_db.get_folder_tree(user_id=user_id)
|
||||
return library_model.FolderTreeResponse(tree=tree)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{folder_id}",
|
||||
summary="Get Folder",
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
200: {"description": "Folder details"},
|
||||
404: {"description": "Folder not found"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def get_folder(
|
||||
folder_id: str,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Get a specific folder.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to retrieve.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The requested LibraryFolder.
|
||||
"""
|
||||
return await library_db.get_folder(folder_id=folder_id, user_id=user_id)
|
||||
|
||||
|
||||
@router.post(
|
||||
"",
|
||||
summary="Create Folder",
|
||||
status_code=status.HTTP_201_CREATED,
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
201: {"description": "Folder created successfully"},
|
||||
400: {"description": "Validation error"},
|
||||
404: {"description": "Parent folder not found"},
|
||||
409: {"description": "Folder name conflict"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def create_folder(
|
||||
payload: library_model.FolderCreateRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Create a new folder.
|
||||
|
||||
Args:
|
||||
payload: The folder creation request.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The created LibraryFolder.
|
||||
"""
|
||||
return await library_db.create_folder(
|
||||
user_id=user_id,
|
||||
name=payload.name,
|
||||
parent_id=payload.parent_id,
|
||||
icon=payload.icon,
|
||||
color=payload.color,
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/{folder_id}",
|
||||
summary="Update Folder",
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
200: {"description": "Folder updated successfully"},
|
||||
400: {"description": "Validation error"},
|
||||
404: {"description": "Folder not found"},
|
||||
409: {"description": "Folder name conflict"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def update_folder(
|
||||
folder_id: str,
|
||||
payload: library_model.FolderUpdateRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Update a folder's properties.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to update.
|
||||
payload: The folder update request.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The updated LibraryFolder.
|
||||
"""
|
||||
return await library_db.update_folder(
|
||||
folder_id=folder_id,
|
||||
user_id=user_id,
|
||||
name=payload.name,
|
||||
icon=payload.icon,
|
||||
color=payload.color,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{folder_id}/move",
|
||||
summary="Move Folder",
|
||||
response_model=library_model.LibraryFolder,
|
||||
responses={
|
||||
200: {"description": "Folder moved successfully"},
|
||||
400: {"description": "Validation error (circular reference)"},
|
||||
404: {"description": "Folder or target parent not found"},
|
||||
409: {"description": "Folder name conflict in target location"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def move_folder(
|
||||
folder_id: str,
|
||||
payload: library_model.FolderMoveRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> library_model.LibraryFolder:
|
||||
"""
|
||||
Move a folder to a new parent.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to move.
|
||||
payload: The move request with target parent.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The moved LibraryFolder.
|
||||
"""
|
||||
return await library_db.move_folder(
|
||||
folder_id=folder_id,
|
||||
user_id=user_id,
|
||||
target_parent_id=payload.target_parent_id,
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{folder_id}",
|
||||
summary="Delete Folder",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
responses={
|
||||
204: {"description": "Folder deleted successfully"},
|
||||
404: {"description": "Folder not found"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def delete_folder(
|
||||
folder_id: str,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> Response:
|
||||
"""
|
||||
Soft-delete a folder and all its contents.
|
||||
|
||||
Args:
|
||||
folder_id: ID of the folder to delete.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
204 No Content if successful.
|
||||
"""
|
||||
await library_db.delete_folder(
|
||||
folder_id=folder_id,
|
||||
user_id=user_id,
|
||||
soft_delete=True,
|
||||
)
|
||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
# === Bulk Agent Operations ===
|
||||
|
||||
|
||||
@router.post(
|
||||
"/agents/bulk-move",
|
||||
summary="Bulk Move Agents",
|
||||
response_model=list[library_model.LibraryAgent],
|
||||
responses={
|
||||
200: {"description": "Agents moved successfully"},
|
||||
404: {"description": "Folder not found"},
|
||||
500: {"description": "Server error"},
|
||||
},
|
||||
)
|
||||
async def bulk_move_agents(
|
||||
payload: library_model.BulkMoveAgentsRequest,
|
||||
user_id: str = Security(autogpt_auth_lib.get_user_id),
|
||||
) -> list[library_model.LibraryAgent]:
|
||||
"""
|
||||
Move multiple agents to a folder.
|
||||
|
||||
Args:
|
||||
payload: The bulk move request with agent IDs and target folder.
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
The updated LibraryAgents.
|
||||
"""
|
||||
return await library_db.bulk_move_agents_to_folder(
|
||||
agent_ids=payload.agent_ids,
|
||||
folder_id=payload.folder_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
@@ -115,6 +115,8 @@ async def test_get_library_agents_success(
|
||||
sort_by=library_model.LibraryAgentSort.UPDATED_AT,
|
||||
page=1,
|
||||
page_size=15,
|
||||
folder_id=None,
|
||||
include_root_only=False,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -9,15 +9,26 @@ import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Any, get_args, get_origin
|
||||
|
||||
from prisma.enums import ContentType
|
||||
|
||||
from backend.blocks.llm import LlmModel
|
||||
from backend.data.db import query_raw_with_schema
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _contains_type(annotation: Any, target: type) -> bool:
|
||||
"""Check if an annotation is or contains the target type (handles Optional/Union/Annotated)."""
|
||||
if annotation is target:
|
||||
return True
|
||||
origin = get_origin(annotation)
|
||||
if origin is None:
|
||||
return False
|
||||
return any(_contains_type(arg, target) for arg in get_args(annotation))
|
||||
|
||||
|
||||
@dataclass
|
||||
class ContentItem:
|
||||
"""Represents a piece of content to be embedded."""
|
||||
@@ -188,45 +199,51 @@ class BlockHandler(ContentHandler):
|
||||
try:
|
||||
block_instance = block_cls()
|
||||
|
||||
# Skip disabled blocks - they shouldn't be indexed
|
||||
if block_instance.disabled:
|
||||
continue
|
||||
|
||||
# Build searchable text from block metadata
|
||||
parts = []
|
||||
if hasattr(block_instance, "name") and block_instance.name:
|
||||
if block_instance.name:
|
||||
parts.append(block_instance.name)
|
||||
if (
|
||||
hasattr(block_instance, "description")
|
||||
and block_instance.description
|
||||
):
|
||||
if block_instance.description:
|
||||
parts.append(block_instance.description)
|
||||
if hasattr(block_instance, "categories") and block_instance.categories:
|
||||
# Convert BlockCategory enum to strings
|
||||
if block_instance.categories:
|
||||
parts.append(
|
||||
" ".join(str(cat.value) for cat in block_instance.categories)
|
||||
)
|
||||
|
||||
# Add input/output schema info
|
||||
if hasattr(block_instance, "input_schema"):
|
||||
schema = block_instance.input_schema
|
||||
if hasattr(schema, "model_json_schema"):
|
||||
schema_dict = schema.model_json_schema()
|
||||
if "properties" in schema_dict:
|
||||
for prop_name, prop_info in schema_dict[
|
||||
"properties"
|
||||
].items():
|
||||
if "description" in prop_info:
|
||||
parts.append(
|
||||
f"{prop_name}: {prop_info['description']}"
|
||||
)
|
||||
# Add input schema field descriptions
|
||||
block_input_fields = block_instance.input_schema.model_fields
|
||||
parts += [
|
||||
f"{field_name}: {field_info.description}"
|
||||
for field_name, field_info in block_input_fields.items()
|
||||
if field_info.description
|
||||
]
|
||||
|
||||
searchable_text = " ".join(parts)
|
||||
|
||||
# Convert categories set of enums to list of strings for JSON serialization
|
||||
categories = getattr(block_instance, "categories", set())
|
||||
categories_list = (
|
||||
[cat.value for cat in categories] if categories else []
|
||||
[cat.value for cat in block_instance.categories]
|
||||
if block_instance.categories
|
||||
else []
|
||||
)
|
||||
|
||||
# Extract provider names from credentials fields
|
||||
credentials_info = (
|
||||
block_instance.input_schema.get_credentials_fields_info()
|
||||
)
|
||||
is_integration = len(credentials_info) > 0
|
||||
provider_names = [
|
||||
provider.value.lower()
|
||||
for info in credentials_info.values()
|
||||
for provider in info.provider
|
||||
]
|
||||
|
||||
# Check if block has LlmModel field in input schema
|
||||
has_llm_model_field = any(
|
||||
_contains_type(field.annotation, LlmModel)
|
||||
for field in block_instance.input_schema.model_fields.values()
|
||||
)
|
||||
|
||||
items.append(
|
||||
@@ -235,8 +252,11 @@ class BlockHandler(ContentHandler):
|
||||
content_type=ContentType.BLOCK,
|
||||
searchable_text=searchable_text,
|
||||
metadata={
|
||||
"name": getattr(block_instance, "name", ""),
|
||||
"name": block_instance.name,
|
||||
"categories": categories_list,
|
||||
"providers": provider_names,
|
||||
"has_llm_model_field": has_llm_model_field,
|
||||
"is_integration": is_integration,
|
||||
},
|
||||
user_id=None, # Blocks are public
|
||||
)
|
||||
|
||||
@@ -82,9 +82,10 @@ async def test_block_handler_get_missing_items(mocker):
|
||||
mock_block_instance.description = "Performs calculations"
|
||||
mock_block_instance.categories = [MagicMock(value="MATH")]
|
||||
mock_block_instance.disabled = False
|
||||
mock_block_instance.input_schema.model_json_schema.return_value = {
|
||||
"properties": {"expression": {"description": "Math expression to evaluate"}}
|
||||
}
|
||||
mock_field = MagicMock()
|
||||
mock_field.description = "Math expression to evaluate"
|
||||
mock_block_instance.input_schema.model_fields = {"expression": mock_field}
|
||||
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock_block_class.return_value = mock_block_instance
|
||||
|
||||
mock_blocks = {"block-uuid-1": mock_block_class}
|
||||
@@ -309,19 +310,19 @@ async def test_content_handlers_registry():
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_block_handler_handles_missing_attributes():
|
||||
"""Test BlockHandler gracefully handles blocks with missing attributes."""
|
||||
async def test_block_handler_handles_empty_attributes():
|
||||
"""Test BlockHandler handles blocks with empty/falsy attribute values."""
|
||||
handler = BlockHandler()
|
||||
|
||||
# Mock block with minimal attributes
|
||||
# Mock block with empty values (all attributes exist but are falsy)
|
||||
mock_block_class = MagicMock()
|
||||
mock_block_instance = MagicMock()
|
||||
mock_block_instance.name = "Minimal Block"
|
||||
mock_block_instance.disabled = False
|
||||
# No description, categories, or schema
|
||||
del mock_block_instance.description
|
||||
del mock_block_instance.categories
|
||||
del mock_block_instance.input_schema
|
||||
mock_block_instance.description = ""
|
||||
mock_block_instance.categories = set()
|
||||
mock_block_instance.input_schema.model_fields = {}
|
||||
mock_block_instance.input_schema.get_credentials_fields_info.return_value = {}
|
||||
mock_block_class.return_value = mock_block_instance
|
||||
|
||||
mock_blocks = {"block-minimal": mock_block_class}
|
||||
@@ -352,6 +353,8 @@ async def test_block_handler_skips_failed_blocks():
|
||||
good_instance.description = "Works fine"
|
||||
good_instance.categories = []
|
||||
good_instance.disabled = False
|
||||
good_instance.input_schema.model_fields = {}
|
||||
good_instance.input_schema.get_credentials_fields_info.return_value = {}
|
||||
good_block.return_value = good_instance
|
||||
|
||||
bad_block = MagicMock()
|
||||
|
||||
@@ -126,6 +126,9 @@ v1_router = APIRouter()
|
||||
########################################################
|
||||
|
||||
|
||||
_tally_background_tasks: set[asyncio.Task] = set()
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/auth/user",
|
||||
summary="Get or create user",
|
||||
@@ -134,6 +137,24 @@ v1_router = APIRouter()
|
||||
)
|
||||
async def get_or_create_user_route(user_data: dict = Security(get_jwt_payload)):
|
||||
user = await get_or_create_user(user_data)
|
||||
|
||||
# Fire-and-forget: populate business understanding from Tally form.
|
||||
# We use created_at proximity instead of an is_new flag because
|
||||
# get_or_create_user is cached — a separate is_new return value would be
|
||||
# unreliable on repeated calls within the cache TTL.
|
||||
age_seconds = (datetime.now(timezone.utc) - user.created_at).total_seconds()
|
||||
if age_seconds < 30:
|
||||
try:
|
||||
from backend.data.tally import populate_understanding_from_tally
|
||||
|
||||
task = asyncio.create_task(
|
||||
populate_understanding_from_tally(user.id, user.email)
|
||||
)
|
||||
_tally_background_tasks.add(task)
|
||||
task.add_done_callback(_tally_background_tasks.discard)
|
||||
except Exception:
|
||||
logger.debug("Failed to start Tally population task", exc_info=True)
|
||||
|
||||
return user.model_dump()
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timezone
|
||||
from io import BytesIO
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
@@ -43,6 +43,7 @@ def test_get_or_create_user_route(
|
||||
) -> None:
|
||||
"""Test get or create user endpoint"""
|
||||
mock_user = Mock()
|
||||
mock_user.created_at = datetime.now(timezone.utc)
|
||||
mock_user.model_dump.return_value = {
|
||||
"id": test_user_id,
|
||||
"email": "test@example.com",
|
||||
|
||||
@@ -3,15 +3,29 @@ Workspace API routes for managing user file storage.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Annotated
|
||||
from urllib.parse import quote
|
||||
|
||||
import fastapi
|
||||
from autogpt_libs.auth.dependencies import get_user_id, requires_user
|
||||
from fastapi import Query, UploadFile
|
||||
from fastapi.responses import Response
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.workspace import WorkspaceFile, get_workspace, get_workspace_file
|
||||
from backend.data.workspace import (
|
||||
WorkspaceFile,
|
||||
count_workspace_files,
|
||||
get_or_create_workspace,
|
||||
get_workspace,
|
||||
get_workspace_file,
|
||||
get_workspace_total_size,
|
||||
soft_delete_workspace_file,
|
||||
)
|
||||
from backend.util.settings import Config
|
||||
from backend.util.virus_scanner import scan_content_safe
|
||||
from backend.util.workspace import WorkspaceManager
|
||||
from backend.util.workspace_storage import get_workspace_storage
|
||||
|
||||
|
||||
@@ -98,6 +112,21 @@ async def _create_file_download_response(file: WorkspaceFile) -> Response:
|
||||
raise
|
||||
|
||||
|
||||
class UploadFileResponse(BaseModel):
|
||||
file_id: str
|
||||
name: str
|
||||
path: str
|
||||
mime_type: str
|
||||
size_bytes: int
|
||||
|
||||
|
||||
class StorageUsageResponse(BaseModel):
|
||||
used_bytes: int
|
||||
limit_bytes: int
|
||||
used_percent: float
|
||||
file_count: int
|
||||
|
||||
|
||||
@router.get(
|
||||
"/files/{file_id}/download",
|
||||
summary="Download file by ID",
|
||||
@@ -120,3 +149,120 @@ async def download_file(
|
||||
raise fastapi.HTTPException(status_code=404, detail="File not found")
|
||||
|
||||
return await _create_file_download_response(file)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/files/upload",
|
||||
summary="Upload file to workspace",
|
||||
)
|
||||
async def upload_file(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
file: UploadFile,
|
||||
session_id: str | None = Query(default=None),
|
||||
) -> UploadFileResponse:
|
||||
"""
|
||||
Upload a file to the user's workspace.
|
||||
|
||||
Files are stored in session-scoped paths when session_id is provided,
|
||||
so the agent's session-scoped tools can discover them automatically.
|
||||
"""
|
||||
config = Config()
|
||||
|
||||
# Sanitize filename — strip any directory components
|
||||
filename = os.path.basename(file.filename or "upload") or "upload"
|
||||
|
||||
# Read file content with early abort on size limit
|
||||
max_file_bytes = config.max_file_size_mb * 1024 * 1024
|
||||
chunks: list[bytes] = []
|
||||
total_size = 0
|
||||
while chunk := await file.read(64 * 1024): # 64KB chunks
|
||||
total_size += len(chunk)
|
||||
if total_size > max_file_bytes:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=413,
|
||||
detail=f"File exceeds maximum size of {config.max_file_size_mb} MB",
|
||||
)
|
||||
chunks.append(chunk)
|
||||
content = b"".join(chunks)
|
||||
|
||||
# Get or create workspace
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
|
||||
# Pre-write storage cap check (soft check — final enforcement is post-write)
|
||||
storage_limit_bytes = config.max_workspace_storage_mb * 1024 * 1024
|
||||
current_usage = await get_workspace_total_size(workspace.id)
|
||||
if storage_limit_bytes and current_usage + len(content) > storage_limit_bytes:
|
||||
used_percent = (current_usage / storage_limit_bytes) * 100
|
||||
raise fastapi.HTTPException(
|
||||
status_code=413,
|
||||
detail={
|
||||
"message": "Storage limit exceeded",
|
||||
"used_bytes": current_usage,
|
||||
"limit_bytes": storage_limit_bytes,
|
||||
"used_percent": round(used_percent, 1),
|
||||
},
|
||||
)
|
||||
|
||||
# Warn at 80% usage
|
||||
if (
|
||||
storage_limit_bytes
|
||||
and (usage_ratio := (current_usage + len(content)) / storage_limit_bytes) >= 0.8
|
||||
):
|
||||
logger.warning(
|
||||
f"User {user_id} workspace storage at {usage_ratio * 100:.1f}% "
|
||||
f"({current_usage + len(content)} / {storage_limit_bytes} bytes)"
|
||||
)
|
||||
|
||||
# Virus scan
|
||||
await scan_content_safe(content, filename=filename)
|
||||
|
||||
# Write file via WorkspaceManager
|
||||
manager = WorkspaceManager(user_id, workspace.id, session_id)
|
||||
workspace_file = await manager.write_file(content, filename)
|
||||
|
||||
# Post-write storage check — eliminates TOCTOU race on the quota.
|
||||
# If a concurrent upload pushed us over the limit, undo this write.
|
||||
new_total = await get_workspace_total_size(workspace.id)
|
||||
if storage_limit_bytes and new_total > storage_limit_bytes:
|
||||
await soft_delete_workspace_file(workspace_file.id, workspace.id)
|
||||
raise fastapi.HTTPException(
|
||||
status_code=413,
|
||||
detail={
|
||||
"message": "Storage limit exceeded (concurrent upload)",
|
||||
"used_bytes": new_total,
|
||||
"limit_bytes": storage_limit_bytes,
|
||||
},
|
||||
)
|
||||
|
||||
return UploadFileResponse(
|
||||
file_id=workspace_file.id,
|
||||
name=workspace_file.name,
|
||||
path=workspace_file.path,
|
||||
mime_type=workspace_file.mime_type,
|
||||
size_bytes=workspace_file.size_bytes,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/storage/usage",
|
||||
summary="Get workspace storage usage",
|
||||
)
|
||||
async def get_storage_usage(
|
||||
user_id: Annotated[str, fastapi.Security(get_user_id)],
|
||||
) -> StorageUsageResponse:
|
||||
"""
|
||||
Get storage usage information for the user's workspace.
|
||||
"""
|
||||
config = Config()
|
||||
workspace = await get_or_create_workspace(user_id)
|
||||
|
||||
used_bytes = await get_workspace_total_size(workspace.id)
|
||||
file_count = await count_workspace_files(workspace.id)
|
||||
limit_bytes = config.max_workspace_storage_mb * 1024 * 1024
|
||||
|
||||
return StorageUsageResponse(
|
||||
used_bytes=used_bytes,
|
||||
limit_bytes=limit_bytes,
|
||||
used_percent=round((used_bytes / limit_bytes) * 100, 1) if limit_bytes else 0,
|
||||
file_count=file_count,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,307 @@
|
||||
"""Tests for workspace file upload and download routes."""
|
||||
|
||||
import io
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
|
||||
from backend.api.features.workspace import routes as workspace_routes
|
||||
from backend.data.workspace import WorkspaceFile
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(workspace_routes.router)
|
||||
|
||||
|
||||
@app.exception_handler(ValueError)
|
||||
async def _value_error_handler(
|
||||
request: fastapi.Request, exc: ValueError
|
||||
) -> fastapi.responses.JSONResponse:
|
||||
"""Mirror the production ValueError → 400 mapping from rest_api.py."""
|
||||
return fastapi.responses.JSONResponse(status_code=400, content={"detail": str(exc)})
|
||||
|
||||
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
TEST_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
|
||||
MOCK_WORKSPACE = type("W", (), {"id": "ws-1"})()
|
||||
|
||||
_NOW = datetime(2023, 1, 1, tzinfo=timezone.utc)
|
||||
|
||||
MOCK_FILE = WorkspaceFile(
|
||||
id="file-aaa-bbb",
|
||||
workspace_id="ws-1",
|
||||
created_at=_NOW,
|
||||
updated_at=_NOW,
|
||||
name="hello.txt",
|
||||
path="/session/hello.txt",
|
||||
mime_type="text/plain",
|
||||
size_bytes=13,
|
||||
storage_path="local://hello.txt",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_app_auth(mock_jwt_user):
|
||||
from autogpt_libs.auth.jwt_utils import get_jwt_payload
|
||||
|
||||
app.dependency_overrides[get_jwt_payload] = mock_jwt_user["get_jwt_payload"]
|
||||
yield
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
def _upload(
|
||||
filename: str = "hello.txt",
|
||||
content: bytes = b"Hello, world!",
|
||||
content_type: str = "text/plain",
|
||||
):
|
||||
"""Helper to POST a file upload."""
|
||||
return client.post(
|
||||
"/files/upload?session_id=sess-1",
|
||||
files={"file": (filename, io.BytesIO(content), content_type)},
|
||||
)
|
||||
|
||||
|
||||
# ---- Happy path ----
|
||||
|
||||
|
||||
def test_upload_happy_path(mocker: pytest_mock.MockFixture):
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
return_value=0,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.scan_content_safe",
|
||||
return_value=None,
|
||||
)
|
||||
mock_manager = mocker.MagicMock()
|
||||
mock_manager.write_file = mocker.AsyncMock(return_value=MOCK_FILE)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.WorkspaceManager",
|
||||
return_value=mock_manager,
|
||||
)
|
||||
|
||||
response = _upload()
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["file_id"] == "file-aaa-bbb"
|
||||
assert data["name"] == "hello.txt"
|
||||
assert data["size_bytes"] == 13
|
||||
|
||||
|
||||
# ---- Per-file size limit ----
|
||||
|
||||
|
||||
def test_upload_exceeds_max_file_size(mocker: pytest_mock.MockFixture):
|
||||
"""Files larger than max_file_size_mb should be rejected with 413."""
|
||||
cfg = mocker.patch("backend.api.features.workspace.routes.Config")
|
||||
cfg.return_value.max_file_size_mb = 0 # 0 MB → any content is too big
|
||||
cfg.return_value.max_workspace_storage_mb = 500
|
||||
|
||||
response = _upload(content=b"x" * 1024)
|
||||
assert response.status_code == 413
|
||||
|
||||
|
||||
# ---- Storage quota exceeded ----
|
||||
|
||||
|
||||
def test_upload_storage_quota_exceeded(mocker: pytest_mock.MockFixture):
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
# Current usage already at limit
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
return_value=500 * 1024 * 1024,
|
||||
)
|
||||
|
||||
response = _upload()
|
||||
assert response.status_code == 413
|
||||
assert "Storage limit exceeded" in response.text
|
||||
|
||||
|
||||
# ---- Post-write quota race (B2) ----
|
||||
|
||||
|
||||
def test_upload_post_write_quota_race(mocker: pytest_mock.MockFixture):
|
||||
"""If a concurrent upload tips the total over the limit after write,
|
||||
the file should be soft-deleted and 413 returned."""
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
# Pre-write check passes (under limit), but post-write check fails
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
side_effect=[0, 600 * 1024 * 1024], # first call OK, second over limit
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.scan_content_safe",
|
||||
return_value=None,
|
||||
)
|
||||
mock_manager = mocker.MagicMock()
|
||||
mock_manager.write_file = mocker.AsyncMock(return_value=MOCK_FILE)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.WorkspaceManager",
|
||||
return_value=mock_manager,
|
||||
)
|
||||
mock_delete = mocker.patch(
|
||||
"backend.api.features.workspace.routes.soft_delete_workspace_file",
|
||||
return_value=None,
|
||||
)
|
||||
|
||||
response = _upload()
|
||||
assert response.status_code == 413
|
||||
mock_delete.assert_called_once_with("file-aaa-bbb", "ws-1")
|
||||
|
||||
|
||||
# ---- Any extension accepted (no allowlist) ----
|
||||
|
||||
|
||||
def test_upload_any_extension(mocker: pytest_mock.MockFixture):
|
||||
"""Any file extension should be accepted — ClamAV is the security layer."""
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
return_value=0,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.scan_content_safe",
|
||||
return_value=None,
|
||||
)
|
||||
mock_manager = mocker.MagicMock()
|
||||
mock_manager.write_file = mocker.AsyncMock(return_value=MOCK_FILE)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.WorkspaceManager",
|
||||
return_value=mock_manager,
|
||||
)
|
||||
|
||||
response = _upload(filename="data.xyz", content=b"arbitrary")
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
# ---- Virus scan rejection ----
|
||||
|
||||
|
||||
def test_upload_blocked_by_virus_scan(mocker: pytest_mock.MockFixture):
|
||||
"""Files flagged by ClamAV should be rejected and never written to storage."""
|
||||
from backend.api.features.store.exceptions import VirusDetectedError
|
||||
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
return_value=0,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.scan_content_safe",
|
||||
side_effect=VirusDetectedError("Eicar-Test-Signature"),
|
||||
)
|
||||
mock_manager = mocker.MagicMock()
|
||||
mock_manager.write_file = mocker.AsyncMock(return_value=MOCK_FILE)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.WorkspaceManager",
|
||||
return_value=mock_manager,
|
||||
)
|
||||
|
||||
response = _upload(filename="evil.exe", content=b"X5O!P%@AP...")
|
||||
assert response.status_code == 400
|
||||
assert "Virus detected" in response.text
|
||||
mock_manager.write_file.assert_not_called()
|
||||
|
||||
|
||||
# ---- No file extension ----
|
||||
|
||||
|
||||
def test_upload_file_without_extension(mocker: pytest_mock.MockFixture):
|
||||
"""Files without an extension should be accepted and stored as-is."""
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
return_value=0,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.scan_content_safe",
|
||||
return_value=None,
|
||||
)
|
||||
mock_manager = mocker.MagicMock()
|
||||
mock_manager.write_file = mocker.AsyncMock(return_value=MOCK_FILE)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.WorkspaceManager",
|
||||
return_value=mock_manager,
|
||||
)
|
||||
|
||||
response = _upload(
|
||||
filename="Makefile",
|
||||
content=b"all:\n\techo hello",
|
||||
content_type="application/octet-stream",
|
||||
)
|
||||
assert response.status_code == 200
|
||||
mock_manager.write_file.assert_called_once()
|
||||
assert mock_manager.write_file.call_args[0][1] == "Makefile"
|
||||
|
||||
|
||||
# ---- Filename sanitization (SF5) ----
|
||||
|
||||
|
||||
def test_upload_strips_path_components(mocker: pytest_mock.MockFixture):
|
||||
"""Path-traversal filenames should be reduced to their basename."""
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_or_create_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_total_size",
|
||||
return_value=0,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.scan_content_safe",
|
||||
return_value=None,
|
||||
)
|
||||
mock_manager = mocker.MagicMock()
|
||||
mock_manager.write_file = mocker.AsyncMock(return_value=MOCK_FILE)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.WorkspaceManager",
|
||||
return_value=mock_manager,
|
||||
)
|
||||
|
||||
# Filename with traversal
|
||||
_upload(filename="../../etc/passwd.txt")
|
||||
|
||||
# write_file should have been called with just the basename
|
||||
mock_manager.write_file.assert_called_once()
|
||||
call_args = mock_manager.write_file.call_args
|
||||
assert call_args[0][1] == "passwd.txt"
|
||||
|
||||
|
||||
# ---- Download ----
|
||||
|
||||
|
||||
def test_download_file_not_found(mocker: pytest_mock.MockFixture):
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace",
|
||||
return_value=MOCK_WORKSPACE,
|
||||
)
|
||||
mocker.patch(
|
||||
"backend.api.features.workspace.routes.get_workspace_file",
|
||||
return_value=None,
|
||||
)
|
||||
|
||||
response = client.get("/files/some-file-id/download")
|
||||
assert response.status_code == 404
|
||||
@@ -41,11 +41,11 @@ import backend.data.user
|
||||
import backend.integrations.webhooks.utils
|
||||
import backend.util.service
|
||||
import backend.util.settings
|
||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||
from backend.copilot.completion_consumer import (
|
||||
start_completion_consumer,
|
||||
stop_completion_consumer,
|
||||
from backend.api.features.library.exceptions import (
|
||||
FolderAlreadyExistsError,
|
||||
FolderValidationError,
|
||||
)
|
||||
from backend.blocks.llm import DEFAULT_LLM_MODEL
|
||||
from backend.data.model import Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.monitoring.instrumentation import instrument_fastapi
|
||||
@@ -123,21 +123,9 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
await backend.data.graph.migrate_llm_models(DEFAULT_LLM_MODEL)
|
||||
await backend.integrations.webhooks.utils.migrate_legacy_triggered_graphs()
|
||||
|
||||
# Start chat completion consumer for Redis Streams notifications
|
||||
try:
|
||||
await start_completion_consumer()
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not start chat completion consumer: {e}")
|
||||
|
||||
with launch_darkly_context():
|
||||
yield
|
||||
|
||||
# Stop chat completion consumer
|
||||
try:
|
||||
await stop_completion_consumer()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error stopping chat completion consumer: {e}")
|
||||
|
||||
try:
|
||||
await shutdown_cloud_storage_handler()
|
||||
except Exception as e:
|
||||
@@ -277,6 +265,10 @@ async def validation_error_handler(
|
||||
|
||||
|
||||
app.add_exception_handler(PrismaError, handle_internal_http_error(500))
|
||||
app.add_exception_handler(
|
||||
FolderAlreadyExistsError, handle_internal_http_error(409, False)
|
||||
)
|
||||
app.add_exception_handler(FolderValidationError, handle_internal_http_error(400, False))
|
||||
app.add_exception_handler(NotFoundError, handle_internal_http_error(404, False))
|
||||
app.add_exception_handler(NotAuthorizedError, handle_internal_http_error(403, False))
|
||||
app.add_exception_handler(RequestValidationError, validation_error_handler)
|
||||
|
||||
@@ -24,7 +24,7 @@ def run_processes(*processes: "AppProcess", **kwargs):
|
||||
# Run the last process in the foreground.
|
||||
processes[-1].start(background=False, **kwargs)
|
||||
finally:
|
||||
for process in processes:
|
||||
for process in reversed(processes):
|
||||
try:
|
||||
process.stop()
|
||||
except Exception as e:
|
||||
|
||||
182
autogpt_platform/backend/backend/blocks/telegram/_api.py
Normal file
182
autogpt_platform/backend/backend/blocks/telegram/_api.py
Normal file
@@ -0,0 +1,182 @@
|
||||
"""
|
||||
Telegram Bot API helper functions.
|
||||
|
||||
Provides utilities for making authenticated requests to the Telegram Bot API.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from io import BytesIO
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TELEGRAM_API_BASE = "https://api.telegram.org"
|
||||
|
||||
|
||||
class TelegramMessageResult(BaseModel, extra="allow"):
|
||||
"""Result from Telegram send/edit message API calls."""
|
||||
|
||||
message_id: int = 0
|
||||
chat: dict[str, Any] = {}
|
||||
date: int = 0
|
||||
text: str = ""
|
||||
|
||||
|
||||
class TelegramFileResult(BaseModel, extra="allow"):
|
||||
"""Result from Telegram getFile API call."""
|
||||
|
||||
file_id: str = ""
|
||||
file_unique_id: str = ""
|
||||
file_size: int = 0
|
||||
file_path: str = ""
|
||||
|
||||
|
||||
class TelegramAPIException(ValueError):
|
||||
"""Exception raised for Telegram API errors."""
|
||||
|
||||
def __init__(self, message: str, error_code: int = 0):
|
||||
super().__init__(message)
|
||||
self.error_code = error_code
|
||||
|
||||
|
||||
def get_bot_api_url(bot_token: str, method: str) -> str:
|
||||
"""Construct Telegram Bot API URL for a method."""
|
||||
return f"{TELEGRAM_API_BASE}/bot{bot_token}/{method}"
|
||||
|
||||
|
||||
def get_file_url(bot_token: str, file_path: str) -> str:
|
||||
"""Construct Telegram file download URL."""
|
||||
return f"{TELEGRAM_API_BASE}/file/bot{bot_token}/{file_path}"
|
||||
|
||||
|
||||
async def call_telegram_api(
|
||||
credentials: APIKeyCredentials,
|
||||
method: str,
|
||||
data: Optional[dict[str, Any]] = None,
|
||||
) -> TelegramMessageResult:
|
||||
"""
|
||||
Make a request to the Telegram Bot API.
|
||||
|
||||
Args:
|
||||
credentials: Bot token credentials
|
||||
method: API method name (e.g., "sendMessage", "getFile")
|
||||
data: Request parameters
|
||||
|
||||
Returns:
|
||||
API response result
|
||||
|
||||
Raises:
|
||||
TelegramAPIException: If the API returns an error
|
||||
"""
|
||||
token = credentials.api_key.get_secret_value()
|
||||
url = get_bot_api_url(token, method)
|
||||
|
||||
response = await Requests().post(url, json=data or {})
|
||||
result = response.json()
|
||||
|
||||
if not result.get("ok"):
|
||||
error_code = result.get("error_code", 0)
|
||||
description = result.get("description", "Unknown error")
|
||||
raise TelegramAPIException(description, error_code)
|
||||
|
||||
return TelegramMessageResult(**result.get("result", {}))
|
||||
|
||||
|
||||
async def call_telegram_api_with_file(
|
||||
credentials: APIKeyCredentials,
|
||||
method: str,
|
||||
file_field: str,
|
||||
file_data: bytes,
|
||||
filename: str,
|
||||
content_type: str,
|
||||
data: Optional[dict[str, Any]] = None,
|
||||
) -> TelegramMessageResult:
|
||||
"""
|
||||
Make a multipart/form-data request to the Telegram Bot API with a file upload.
|
||||
|
||||
Args:
|
||||
credentials: Bot token credentials
|
||||
method: API method name (e.g., "sendPhoto", "sendVoice")
|
||||
file_field: Form field name for the file (e.g., "photo", "voice")
|
||||
file_data: Raw file bytes
|
||||
filename: Filename for the upload
|
||||
content_type: MIME type of the file
|
||||
data: Additional form parameters
|
||||
|
||||
Returns:
|
||||
API response result
|
||||
|
||||
Raises:
|
||||
TelegramAPIException: If the API returns an error
|
||||
"""
|
||||
token = credentials.api_key.get_secret_value()
|
||||
url = get_bot_api_url(token, method)
|
||||
|
||||
files = [(file_field, (filename, BytesIO(file_data), content_type))]
|
||||
|
||||
response = await Requests().post(url, files=files, data=data or {})
|
||||
result = response.json()
|
||||
|
||||
if not result.get("ok"):
|
||||
error_code = result.get("error_code", 0)
|
||||
description = result.get("description", "Unknown error")
|
||||
raise TelegramAPIException(description, error_code)
|
||||
|
||||
return TelegramMessageResult(**result.get("result", {}))
|
||||
|
||||
|
||||
async def get_file_info(
|
||||
credentials: APIKeyCredentials, file_id: str
|
||||
) -> TelegramFileResult:
|
||||
"""
|
||||
Get file information from Telegram.
|
||||
|
||||
Args:
|
||||
credentials: Bot token credentials
|
||||
file_id: Telegram file_id from message
|
||||
|
||||
Returns:
|
||||
File info dict containing file_id, file_unique_id, file_size, file_path
|
||||
"""
|
||||
result = await call_telegram_api(credentials, "getFile", {"file_id": file_id})
|
||||
return TelegramFileResult(**result.model_dump())
|
||||
|
||||
|
||||
async def get_file_download_url(credentials: APIKeyCredentials, file_id: str) -> str:
|
||||
"""
|
||||
Get the download URL for a Telegram file.
|
||||
|
||||
Args:
|
||||
credentials: Bot token credentials
|
||||
file_id: Telegram file_id from message
|
||||
|
||||
Returns:
|
||||
Full download URL
|
||||
"""
|
||||
token = credentials.api_key.get_secret_value()
|
||||
result = await get_file_info(credentials, file_id)
|
||||
file_path = result.file_path
|
||||
if not file_path:
|
||||
raise TelegramAPIException("No file_path returned from getFile")
|
||||
return get_file_url(token, file_path)
|
||||
|
||||
|
||||
async def download_telegram_file(credentials: APIKeyCredentials, file_id: str) -> bytes:
|
||||
"""
|
||||
Download a file from Telegram servers.
|
||||
|
||||
Args:
|
||||
credentials: Bot token credentials
|
||||
file_id: Telegram file_id
|
||||
|
||||
Returns:
|
||||
File content as bytes
|
||||
"""
|
||||
url = await get_file_download_url(credentials, file_id)
|
||||
response = await Requests().get(url)
|
||||
return response.content
|
||||
43
autogpt_platform/backend/backend/blocks/telegram/_auth.py
Normal file
43
autogpt_platform/backend/backend/blocks/telegram/_auth.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Telegram Bot credentials handling.
|
||||
|
||||
Telegram bots use an API key (bot token) obtained from @BotFather.
|
||||
"""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
# Bot token credentials (API key style)
|
||||
TelegramCredentials = APIKeyCredentials
|
||||
TelegramCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.TELEGRAM], Literal["api_key"]
|
||||
]
|
||||
|
||||
|
||||
def TelegramCredentialsField() -> TelegramCredentialsInput:
|
||||
"""Creates a Telegram bot token credentials field."""
|
||||
return CredentialsField(
|
||||
description="Telegram Bot API token from @BotFather. "
|
||||
"Create a bot at https://t.me/BotFather to get your token."
|
||||
)
|
||||
|
||||
|
||||
# Test credentials for unit tests
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="telegram",
|
||||
api_key=SecretStr("test_telegram_bot_token"),
|
||||
title="Mock Telegram Bot Token",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
1254
autogpt_platform/backend/backend/blocks/telegram/blocks.py
Normal file
1254
autogpt_platform/backend/backend/blocks/telegram/blocks.py
Normal file
File diff suppressed because it is too large
Load Diff
377
autogpt_platform/backend/backend/blocks/telegram/triggers.py
Normal file
377
autogpt_platform/backend/backend/blocks/telegram/triggers.py
Normal file
@@ -0,0 +1,377 @@
|
||||
"""
|
||||
Telegram trigger blocks for receiving messages via webhooks.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.blocks._base import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchemaInput,
|
||||
BlockSchemaOutput,
|
||||
BlockWebhookConfig,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.integrations.webhooks.telegram import TelegramWebhookType
|
||||
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
TelegramCredentialsField,
|
||||
TelegramCredentialsInput,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Example payload for testing
|
||||
EXAMPLE_MESSAGE_PAYLOAD = {
|
||||
"update_id": 123456789,
|
||||
"message": {
|
||||
"message_id": 1,
|
||||
"from": {
|
||||
"id": 12345678,
|
||||
"is_bot": False,
|
||||
"first_name": "John",
|
||||
"last_name": "Doe",
|
||||
"username": "johndoe",
|
||||
"language_code": "en",
|
||||
},
|
||||
"chat": {
|
||||
"id": 12345678,
|
||||
"first_name": "John",
|
||||
"last_name": "Doe",
|
||||
"username": "johndoe",
|
||||
"type": "private",
|
||||
},
|
||||
"date": 1234567890,
|
||||
"text": "Hello, bot!",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class TelegramTriggerBase:
|
||||
"""Base class for Telegram trigger blocks."""
|
||||
|
||||
class Input(BlockSchemaInput):
|
||||
credentials: TelegramCredentialsInput = TelegramCredentialsField()
|
||||
payload: dict = SchemaField(hidden=True, default_factory=dict)
|
||||
|
||||
|
||||
class TelegramMessageTriggerBlock(TelegramTriggerBase, Block):
|
||||
"""
|
||||
Triggers when a message is received or edited in your Telegram bot.
|
||||
|
||||
Supports text, photos, voice messages, audio files, documents, and videos.
|
||||
Connect the outputs to other blocks to process messages and send responses.
|
||||
"""
|
||||
|
||||
class Input(TelegramTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""Filter for message types to receive."""
|
||||
|
||||
text: bool = True
|
||||
photo: bool = False
|
||||
voice: bool = False
|
||||
audio: bool = False
|
||||
document: bool = False
|
||||
video: bool = False
|
||||
edited_message: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Message Types", description="Types of messages to receive"
|
||||
)
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
payload: dict = SchemaField(
|
||||
description="The complete webhook payload from Telegram"
|
||||
)
|
||||
chat_id: int = SchemaField(
|
||||
description="The chat ID where the message was received. "
|
||||
"Use this to send replies."
|
||||
)
|
||||
message_id: int = SchemaField(description="The unique message ID")
|
||||
user_id: int = SchemaField(description="The user ID who sent the message")
|
||||
username: str = SchemaField(description="Username of the sender (may be empty)")
|
||||
first_name: str = SchemaField(description="First name of the sender")
|
||||
event: str = SchemaField(
|
||||
description="The message type (text, photo, voice, audio, etc.)"
|
||||
)
|
||||
text: str = SchemaField(
|
||||
description="Text content of the message (for text messages)"
|
||||
)
|
||||
photo_file_id: str = SchemaField(
|
||||
description="File ID of the photo (for photo messages). "
|
||||
"Use GetTelegramFileBlock to download."
|
||||
)
|
||||
voice_file_id: str = SchemaField(
|
||||
description="File ID of the voice message (for voice messages). "
|
||||
"Use GetTelegramFileBlock to download."
|
||||
)
|
||||
audio_file_id: str = SchemaField(
|
||||
description="File ID of the audio file (for audio messages). "
|
||||
"Use GetTelegramFileBlock to download."
|
||||
)
|
||||
file_id: str = SchemaField(
|
||||
description="File ID for document/video messages. "
|
||||
"Use GetTelegramFileBlock to download."
|
||||
)
|
||||
file_name: str = SchemaField(
|
||||
description="Original filename (for document/audio messages)"
|
||||
)
|
||||
caption: str = SchemaField(description="Caption for media messages")
|
||||
is_edited: bool = SchemaField(
|
||||
description="Whether this is an edit of a previously sent message"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="4435e4e0-df6e-4301-8f35-ad70b12fc9ec",
|
||||
description="Triggers when a message is received or edited in your Telegram bot. "
|
||||
"Supports text, photos, voice messages, audio files, documents, and videos.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=TelegramMessageTriggerBlock.Input,
|
||||
output_schema=TelegramMessageTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.TELEGRAM,
|
||||
webhook_type=TelegramWebhookType.BOT,
|
||||
resource_format="bot",
|
||||
event_filter_input="events",
|
||||
event_format="message.{event}",
|
||||
),
|
||||
test_input={
|
||||
"events": {"text": True, "photo": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": EXAMPLE_MESSAGE_PAYLOAD,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", EXAMPLE_MESSAGE_PAYLOAD),
|
||||
("chat_id", 12345678),
|
||||
("message_id", 1),
|
||||
("user_id", 12345678),
|
||||
("username", "johndoe"),
|
||||
("first_name", "John"),
|
||||
("is_edited", False),
|
||||
("event", "text"),
|
||||
("text", "Hello, bot!"),
|
||||
("photo_file_id", ""),
|
||||
("voice_file_id", ""),
|
||||
("audio_file_id", ""),
|
||||
("file_id", ""),
|
||||
("file_name", ""),
|
||||
("caption", ""),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
payload = input_data.payload
|
||||
is_edited = "edited_message" in payload
|
||||
message = payload.get("message") or payload.get("edited_message", {})
|
||||
|
||||
# Extract common fields
|
||||
chat = message.get("chat", {})
|
||||
sender = message.get("from", {})
|
||||
|
||||
yield "payload", payload
|
||||
yield "chat_id", chat.get("id", 0)
|
||||
yield "message_id", message.get("message_id", 0)
|
||||
yield "user_id", sender.get("id", 0)
|
||||
yield "username", sender.get("username", "")
|
||||
yield "first_name", sender.get("first_name", "")
|
||||
yield "is_edited", is_edited
|
||||
|
||||
# For edited messages, yield event as "edited_message" and extract
|
||||
# all content fields from the edited message body
|
||||
if is_edited:
|
||||
yield "event", "edited_message"
|
||||
yield "text", message.get("text", "")
|
||||
photos = message.get("photo", [])
|
||||
yield "photo_file_id", photos[-1].get("file_id", "") if photos else ""
|
||||
voice = message.get("voice", {})
|
||||
yield "voice_file_id", voice.get("file_id", "")
|
||||
audio = message.get("audio", {})
|
||||
yield "audio_file_id", audio.get("file_id", "")
|
||||
document = message.get("document", {})
|
||||
video = message.get("video", {})
|
||||
yield "file_id", (document.get("file_id", "") or video.get("file_id", ""))
|
||||
yield "file_name", (
|
||||
document.get("file_name", "") or audio.get("file_name", "")
|
||||
)
|
||||
yield "caption", message.get("caption", "")
|
||||
# Determine message type and extract content
|
||||
elif "text" in message:
|
||||
yield "event", "text"
|
||||
yield "text", message.get("text", "")
|
||||
yield "photo_file_id", ""
|
||||
yield "voice_file_id", ""
|
||||
yield "audio_file_id", ""
|
||||
yield "file_id", ""
|
||||
yield "file_name", ""
|
||||
yield "caption", ""
|
||||
elif "photo" in message:
|
||||
# Get the largest photo (last in array)
|
||||
photos = message.get("photo", [])
|
||||
photo_fid = photos[-1].get("file_id", "") if photos else ""
|
||||
yield "event", "photo"
|
||||
yield "text", ""
|
||||
yield "photo_file_id", photo_fid
|
||||
yield "voice_file_id", ""
|
||||
yield "audio_file_id", ""
|
||||
yield "file_id", ""
|
||||
yield "file_name", ""
|
||||
yield "caption", message.get("caption", "")
|
||||
elif "voice" in message:
|
||||
voice = message.get("voice", {})
|
||||
yield "event", "voice"
|
||||
yield "text", ""
|
||||
yield "photo_file_id", ""
|
||||
yield "voice_file_id", voice.get("file_id", "")
|
||||
yield "audio_file_id", ""
|
||||
yield "file_id", ""
|
||||
yield "file_name", ""
|
||||
yield "caption", message.get("caption", "")
|
||||
elif "audio" in message:
|
||||
audio = message.get("audio", {})
|
||||
yield "event", "audio"
|
||||
yield "text", ""
|
||||
yield "photo_file_id", ""
|
||||
yield "voice_file_id", ""
|
||||
yield "audio_file_id", audio.get("file_id", "")
|
||||
yield "file_id", ""
|
||||
yield "file_name", audio.get("file_name", "")
|
||||
yield "caption", message.get("caption", "")
|
||||
elif "document" in message:
|
||||
document = message.get("document", {})
|
||||
yield "event", "document"
|
||||
yield "text", ""
|
||||
yield "photo_file_id", ""
|
||||
yield "voice_file_id", ""
|
||||
yield "audio_file_id", ""
|
||||
yield "file_id", document.get("file_id", "")
|
||||
yield "file_name", document.get("file_name", "")
|
||||
yield "caption", message.get("caption", "")
|
||||
elif "video" in message:
|
||||
video = message.get("video", {})
|
||||
yield "event", "video"
|
||||
yield "text", ""
|
||||
yield "photo_file_id", ""
|
||||
yield "voice_file_id", ""
|
||||
yield "audio_file_id", ""
|
||||
yield "file_id", video.get("file_id", "")
|
||||
yield "file_name", video.get("file_name", "")
|
||||
yield "caption", message.get("caption", "")
|
||||
else:
|
||||
yield "event", "other"
|
||||
yield "text", ""
|
||||
yield "photo_file_id", ""
|
||||
yield "voice_file_id", ""
|
||||
yield "audio_file_id", ""
|
||||
yield "file_id", ""
|
||||
yield "file_name", ""
|
||||
yield "caption", ""
|
||||
|
||||
|
||||
# Example payload for reaction trigger testing
|
||||
EXAMPLE_REACTION_PAYLOAD = {
|
||||
"update_id": 123456790,
|
||||
"message_reaction": {
|
||||
"chat": {
|
||||
"id": 12345678,
|
||||
"first_name": "John",
|
||||
"last_name": "Doe",
|
||||
"username": "johndoe",
|
||||
"type": "private",
|
||||
},
|
||||
"message_id": 42,
|
||||
"user": {
|
||||
"id": 12345678,
|
||||
"is_bot": False,
|
||||
"first_name": "John",
|
||||
"username": "johndoe",
|
||||
},
|
||||
"date": 1234567890,
|
||||
"new_reaction": [{"type": "emoji", "emoji": "👍"}],
|
||||
"old_reaction": [],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class TelegramMessageReactionTriggerBlock(TelegramTriggerBase, Block):
|
||||
"""
|
||||
Triggers when a reaction to a message is changed.
|
||||
|
||||
Works automatically in private chats. In group chats, the bot must be
|
||||
an administrator to receive reaction updates.
|
||||
"""
|
||||
|
||||
class Input(TelegramTriggerBase.Input):
|
||||
pass
|
||||
|
||||
class Output(BlockSchemaOutput):
|
||||
payload: dict = SchemaField(
|
||||
description="The complete webhook payload from Telegram"
|
||||
)
|
||||
chat_id: int = SchemaField(
|
||||
description="The chat ID where the reaction occurred"
|
||||
)
|
||||
message_id: int = SchemaField(description="The message ID that was reacted to")
|
||||
user_id: int = SchemaField(description="The user ID who changed the reaction")
|
||||
username: str = SchemaField(description="Username of the user (may be empty)")
|
||||
new_reactions: list = SchemaField(
|
||||
description="List of new reactions on the message"
|
||||
)
|
||||
old_reactions: list = SchemaField(
|
||||
description="List of previous reactions on the message"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="82525328-9368-4966-8f0c-cd78e80181fd",
|
||||
description="Triggers when a reaction to a message is changed. "
|
||||
"Works in private chats automatically. "
|
||||
"In groups, the bot must be an administrator.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
input_schema=TelegramMessageReactionTriggerBlock.Input,
|
||||
output_schema=TelegramMessageReactionTriggerBlock.Output,
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider=ProviderName.TELEGRAM,
|
||||
webhook_type=TelegramWebhookType.BOT,
|
||||
resource_format="bot",
|
||||
event_filter_input="",
|
||||
event_format="message_reaction",
|
||||
),
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": EXAMPLE_REACTION_PAYLOAD,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", EXAMPLE_REACTION_PAYLOAD),
|
||||
("chat_id", 12345678),
|
||||
("message_id", 42),
|
||||
("user_id", 12345678),
|
||||
("username", "johndoe"),
|
||||
("new_reactions", [{"type": "emoji", "emoji": "👍"}]),
|
||||
("old_reactions", []),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
payload = input_data.payload
|
||||
reaction = payload.get("message_reaction", {})
|
||||
|
||||
chat = reaction.get("chat", {})
|
||||
user = reaction.get("user", {})
|
||||
|
||||
yield "payload", payload
|
||||
yield "chat_id", chat.get("id", 0)
|
||||
yield "message_id", reaction.get("message_id", 0)
|
||||
yield "user_id", user.get("id", 0)
|
||||
yield "username", user.get("username", "")
|
||||
yield "new_reactions", reaction.get("new_reaction", [])
|
||||
yield "old_reactions", reaction.get("old_reaction", [])
|
||||
@@ -34,10 +34,12 @@ def main(output: Path, pretty: bool):
|
||||
"""Generate and output the OpenAPI JSON specification."""
|
||||
openapi_schema = get_openapi_schema()
|
||||
|
||||
json_output = json.dumps(openapi_schema, indent=2 if pretty else None)
|
||||
json_output = json.dumps(
|
||||
openapi_schema, indent=2 if pretty else None, ensure_ascii=False
|
||||
)
|
||||
|
||||
if output:
|
||||
output.write_text(json_output)
|
||||
output.write_text(json_output, encoding="utf-8")
|
||||
click.echo(f"✅ OpenAPI specification written to {output}\n\nPreview:")
|
||||
click.echo(f"\n{json_output[:500]} ...")
|
||||
else:
|
||||
|
||||
@@ -1,349 +0,0 @@
|
||||
"""Redis Streams consumer for operation completion messages.
|
||||
|
||||
This module provides a consumer (ChatCompletionConsumer) that listens for
|
||||
completion notifications (OperationCompleteMessage) from external services
|
||||
(like Agent Generator) and triggers the appropriate stream registry and
|
||||
chat service updates via process_operation_success/process_operation_failure.
|
||||
|
||||
Why Redis Streams instead of RabbitMQ?
|
||||
--------------------------------------
|
||||
While the project typically uses RabbitMQ for async task queues (e.g., execution
|
||||
queue), Redis Streams was chosen for chat completion notifications because:
|
||||
|
||||
1. **Unified Infrastructure**: The SSE reconnection feature already uses Redis
|
||||
Streams (via stream_registry) for message persistence and replay. Using Redis
|
||||
Streams for completion notifications keeps all chat streaming infrastructure
|
||||
in one system, simplifying operations and reducing cross-system coordination.
|
||||
|
||||
2. **Message Replay**: Redis Streams support XREAD with arbitrary message IDs,
|
||||
allowing consumers to replay missed messages after reconnection. This aligns
|
||||
with the SSE reconnection pattern where clients can resume from last_message_id.
|
||||
|
||||
3. **Consumer Groups with XAUTOCLAIM**: Redis consumer groups provide automatic
|
||||
load balancing across pods with explicit message claiming (XAUTOCLAIM) for
|
||||
recovering from dead consumers - ideal for the completion callback pattern.
|
||||
|
||||
4. **Lower Latency**: For real-time SSE updates, Redis (already in-memory for
|
||||
stream_registry) provides lower latency than an additional RabbitMQ hop.
|
||||
|
||||
5. **Atomicity with Task State**: Completion processing often needs to update
|
||||
task metadata stored in Redis. Keeping both in Redis enables simpler
|
||||
transactional semantics without distributed coordination.
|
||||
|
||||
The consumer uses Redis Streams with consumer groups for reliable message
|
||||
processing across multiple platform pods, with XAUTOCLAIM for reclaiming
|
||||
stale pending messages from dead consumers.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
import orjson
|
||||
from pydantic import BaseModel
|
||||
from redis.exceptions import ResponseError
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
|
||||
from . import stream_registry
|
||||
from .completion_handler import process_operation_failure, process_operation_success
|
||||
from .config import ChatConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
|
||||
|
||||
class OperationCompleteMessage(BaseModel):
|
||||
"""Message format for operation completion notifications."""
|
||||
|
||||
operation_id: str
|
||||
task_id: str
|
||||
success: bool
|
||||
result: dict | str | None = None
|
||||
error: str | None = None
|
||||
|
||||
|
||||
class ChatCompletionConsumer:
|
||||
"""Consumer for chat operation completion messages from Redis Streams.
|
||||
|
||||
Database operations are handled through the chat_db() accessor, which
|
||||
routes through DatabaseManager RPC when Prisma is not directly connected.
|
||||
|
||||
Uses Redis consumer groups to allow multiple platform pods to consume
|
||||
messages reliably with automatic redelivery on failure.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._consumer_task: asyncio.Task | None = None
|
||||
self._running = False
|
||||
self._consumer_name = f"consumer-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the completion consumer."""
|
||||
if self._running:
|
||||
logger.warning("Completion consumer already running")
|
||||
return
|
||||
|
||||
# Create consumer group if it doesn't exist
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
await redis.xgroup_create(
|
||||
config.stream_completion_name,
|
||||
config.stream_consumer_group,
|
||||
id="0",
|
||||
mkstream=True,
|
||||
)
|
||||
logger.info(
|
||||
f"Created consumer group '{config.stream_consumer_group}' "
|
||||
f"on stream '{config.stream_completion_name}'"
|
||||
)
|
||||
except ResponseError as e:
|
||||
if "BUSYGROUP" in str(e):
|
||||
logger.debug(
|
||||
f"Consumer group '{config.stream_consumer_group}' already exists"
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
self._running = True
|
||||
self._consumer_task = asyncio.create_task(self._consume_messages())
|
||||
logger.info(
|
||||
f"Chat completion consumer started (consumer: {self._consumer_name})"
|
||||
)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the completion consumer."""
|
||||
self._running = False
|
||||
|
||||
if self._consumer_task:
|
||||
self._consumer_task.cancel()
|
||||
try:
|
||||
await self._consumer_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._consumer_task = None
|
||||
|
||||
logger.info("Chat completion consumer stopped")
|
||||
|
||||
async def _consume_messages(self) -> None:
|
||||
"""Main message consumption loop with retry logic."""
|
||||
max_retries = 10
|
||||
retry_delay = 5 # seconds
|
||||
retry_count = 0
|
||||
block_timeout = 5000 # milliseconds
|
||||
|
||||
while self._running and retry_count < max_retries:
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
|
||||
# Reset retry count on successful connection
|
||||
retry_count = 0
|
||||
|
||||
while self._running:
|
||||
# First, claim any stale pending messages from dead consumers
|
||||
# Redis does NOT auto-redeliver pending messages; we must explicitly
|
||||
# claim them using XAUTOCLAIM
|
||||
try:
|
||||
claimed_result = await redis.xautoclaim(
|
||||
name=config.stream_completion_name,
|
||||
groupname=config.stream_consumer_group,
|
||||
consumername=self._consumer_name,
|
||||
min_idle_time=config.stream_claim_min_idle_ms,
|
||||
start_id="0-0",
|
||||
count=10,
|
||||
)
|
||||
# xautoclaim returns: (next_start_id, [(id, data), ...], [deleted_ids])
|
||||
if claimed_result and len(claimed_result) >= 2:
|
||||
claimed_entries = claimed_result[1]
|
||||
if claimed_entries:
|
||||
logger.info(
|
||||
f"Claimed {len(claimed_entries)} stale pending messages"
|
||||
)
|
||||
for entry_id, data in claimed_entries:
|
||||
if not self._running:
|
||||
return
|
||||
await self._process_entry(redis, entry_id, data)
|
||||
except Exception as e:
|
||||
logger.warning(f"XAUTOCLAIM failed (non-fatal): {e}")
|
||||
|
||||
# Read new messages from the stream
|
||||
messages = await redis.xreadgroup(
|
||||
groupname=config.stream_consumer_group,
|
||||
consumername=self._consumer_name,
|
||||
streams={config.stream_completion_name: ">"},
|
||||
block=block_timeout,
|
||||
count=10,
|
||||
)
|
||||
|
||||
if not messages:
|
||||
continue
|
||||
|
||||
for stream_name, entries in messages:
|
||||
for entry_id, data in entries:
|
||||
if not self._running:
|
||||
return
|
||||
await self._process_entry(redis, entry_id, data)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Consumer cancelled")
|
||||
return
|
||||
except Exception as e:
|
||||
retry_count += 1
|
||||
logger.error(
|
||||
f"Consumer error (retry {retry_count}/{max_retries}): {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
if self._running and retry_count < max_retries:
|
||||
await asyncio.sleep(retry_delay)
|
||||
else:
|
||||
logger.error("Max retries reached, stopping consumer")
|
||||
return
|
||||
|
||||
async def _process_entry(
|
||||
self, redis: Any, entry_id: str, data: dict[str, Any]
|
||||
) -> None:
|
||||
"""Process a single stream entry and acknowledge it on success.
|
||||
|
||||
Args:
|
||||
redis: Redis client connection
|
||||
entry_id: The stream entry ID
|
||||
data: The entry data dict
|
||||
"""
|
||||
try:
|
||||
# Handle the message
|
||||
message_data = data.get("data")
|
||||
if message_data:
|
||||
await self._handle_message(
|
||||
message_data.encode()
|
||||
if isinstance(message_data, str)
|
||||
else message_data
|
||||
)
|
||||
|
||||
# Acknowledge the message after successful processing
|
||||
await redis.xack(
|
||||
config.stream_completion_name,
|
||||
config.stream_consumer_group,
|
||||
entry_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error processing completion message {entry_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
# Message remains in pending state and will be claimed by
|
||||
# XAUTOCLAIM after min_idle_time expires
|
||||
|
||||
async def _handle_message(self, body: bytes) -> None:
|
||||
"""Handle a completion message."""
|
||||
try:
|
||||
data = orjson.loads(body)
|
||||
message = OperationCompleteMessage(**data)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse completion message: {e}")
|
||||
return
|
||||
|
||||
logger.info(
|
||||
f"[COMPLETION] Received completion for operation {message.operation_id} "
|
||||
f"(task_id={message.task_id}, success={message.success})"
|
||||
)
|
||||
|
||||
# Find task in registry
|
||||
task = await stream_registry.find_task_by_operation_id(message.operation_id)
|
||||
if task is None:
|
||||
task = await stream_registry.get_task(message.task_id)
|
||||
|
||||
if task is None:
|
||||
logger.warning(
|
||||
f"[COMPLETION] Task not found for operation {message.operation_id} "
|
||||
f"(task_id={message.task_id})"
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
f"[COMPLETION] Found task: task_id={task.task_id}, "
|
||||
f"session_id={task.session_id}, tool_call_id={task.tool_call_id}"
|
||||
)
|
||||
|
||||
# Guard against empty task fields
|
||||
if not task.task_id or not task.session_id or not task.tool_call_id:
|
||||
logger.error(
|
||||
f"[COMPLETION] Task has empty critical fields! "
|
||||
f"task_id={task.task_id!r}, session_id={task.session_id!r}, "
|
||||
f"tool_call_id={task.tool_call_id!r}"
|
||||
)
|
||||
return
|
||||
|
||||
if message.success:
|
||||
await self._handle_success(task, message)
|
||||
else:
|
||||
await self._handle_failure(task, message)
|
||||
|
||||
async def _handle_success(
|
||||
self,
|
||||
task: stream_registry.ActiveTask,
|
||||
message: OperationCompleteMessage,
|
||||
) -> None:
|
||||
"""Handle successful operation completion."""
|
||||
await process_operation_success(task, message.result)
|
||||
|
||||
async def _handle_failure(
|
||||
self,
|
||||
task: stream_registry.ActiveTask,
|
||||
message: OperationCompleteMessage,
|
||||
) -> None:
|
||||
"""Handle failed operation completion."""
|
||||
await process_operation_failure(task, message.error)
|
||||
|
||||
|
||||
# Module-level consumer instance
|
||||
_consumer: ChatCompletionConsumer | None = None
|
||||
|
||||
|
||||
async def start_completion_consumer() -> None:
|
||||
"""Start the global completion consumer."""
|
||||
global _consumer
|
||||
if _consumer is None:
|
||||
_consumer = ChatCompletionConsumer()
|
||||
await _consumer.start()
|
||||
|
||||
|
||||
async def stop_completion_consumer() -> None:
|
||||
"""Stop the global completion consumer."""
|
||||
global _consumer
|
||||
if _consumer:
|
||||
await _consumer.stop()
|
||||
_consumer = None
|
||||
|
||||
|
||||
async def publish_operation_complete(
|
||||
operation_id: str,
|
||||
task_id: str,
|
||||
success: bool,
|
||||
result: dict | str | None = None,
|
||||
error: str | None = None,
|
||||
) -> None:
|
||||
"""Publish an operation completion message to Redis Streams.
|
||||
|
||||
Args:
|
||||
operation_id: The operation ID that completed.
|
||||
task_id: The task ID associated with the operation.
|
||||
success: Whether the operation succeeded.
|
||||
result: The result data (for success).
|
||||
error: The error message (for failure).
|
||||
"""
|
||||
message = OperationCompleteMessage(
|
||||
operation_id=operation_id,
|
||||
task_id=task_id,
|
||||
success=success,
|
||||
result=result,
|
||||
error=error,
|
||||
)
|
||||
|
||||
redis = await get_redis_async()
|
||||
await redis.xadd(
|
||||
config.stream_completion_name,
|
||||
{"data": message.model_dump_json()},
|
||||
maxlen=config.stream_max_length,
|
||||
)
|
||||
logger.info(f"Published completion for operation {operation_id}")
|
||||
@@ -1,329 +0,0 @@
|
||||
"""Shared completion handling for operation success and failure.
|
||||
|
||||
This module provides common logic for handling operation completion from both:
|
||||
- The Redis Streams consumer (completion_consumer.py)
|
||||
- The HTTP webhook endpoint (routes.py)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import orjson
|
||||
|
||||
from backend.data.db_accessors import chat_db
|
||||
|
||||
from . import service as chat_service
|
||||
from . import stream_registry
|
||||
from .response_model import StreamError, StreamToolOutputAvailable
|
||||
from .tools.models import ErrorResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Tools that produce agent_json that needs to be saved to library
|
||||
AGENT_GENERATION_TOOLS = {"create_agent", "edit_agent"}
|
||||
|
||||
# Keys that should be stripped from agent_json when returning in error responses
|
||||
SENSITIVE_KEYS = frozenset(
|
||||
{
|
||||
"api_key",
|
||||
"apikey",
|
||||
"api_secret",
|
||||
"password",
|
||||
"secret",
|
||||
"credentials",
|
||||
"credential",
|
||||
"token",
|
||||
"access_token",
|
||||
"refresh_token",
|
||||
"private_key",
|
||||
"privatekey",
|
||||
"auth",
|
||||
"authorization",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _sanitize_agent_json(obj: Any) -> Any:
|
||||
"""Recursively sanitize agent_json by removing sensitive keys.
|
||||
|
||||
Args:
|
||||
obj: The object to sanitize (dict, list, or primitive)
|
||||
|
||||
Returns:
|
||||
Sanitized copy with sensitive keys removed/redacted
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
return {
|
||||
k: "[REDACTED]" if k.lower() in SENSITIVE_KEYS else _sanitize_agent_json(v)
|
||||
for k, v in obj.items()
|
||||
}
|
||||
elif isinstance(obj, list):
|
||||
return [_sanitize_agent_json(item) for item in obj]
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
class ToolMessageUpdateError(Exception):
|
||||
"""Raised when updating a tool message in the database fails."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
async def _update_tool_message(
|
||||
session_id: str,
|
||||
tool_call_id: str,
|
||||
content: str,
|
||||
) -> None:
|
||||
"""Update tool message in database using the chat_db accessor.
|
||||
|
||||
Routes through DatabaseManager RPC when Prisma is not directly
|
||||
connected (e.g. in the CoPilot Executor microservice).
|
||||
|
||||
Args:
|
||||
session_id: The session ID
|
||||
tool_call_id: The tool call ID to update
|
||||
content: The new content for the message
|
||||
|
||||
Raises:
|
||||
ToolMessageUpdateError: If the database update fails.
|
||||
"""
|
||||
try:
|
||||
updated = await chat_db().update_tool_message_content(
|
||||
session_id=session_id,
|
||||
tool_call_id=tool_call_id,
|
||||
new_content=content,
|
||||
)
|
||||
if not updated:
|
||||
raise ToolMessageUpdateError(
|
||||
f"No message found with tool_call_id="
|
||||
f"{tool_call_id} in session {session_id}"
|
||||
)
|
||||
except ToolMessageUpdateError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[COMPLETION] Failed to update tool message: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
raise ToolMessageUpdateError(
|
||||
f"Failed to update tool message for tool call #{tool_call_id}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
def serialize_result(result: dict | list | str | int | float | bool | None) -> str:
|
||||
"""Serialize result to JSON string with sensible defaults.
|
||||
|
||||
Args:
|
||||
result: The result to serialize. Can be a dict, list, string,
|
||||
number, boolean, or None.
|
||||
|
||||
Returns:
|
||||
JSON string representation of the result. Returns '{"status": "completed"}'
|
||||
only when result is explicitly None.
|
||||
"""
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
if result is None:
|
||||
return '{"status": "completed"}'
|
||||
return orjson.dumps(result).decode("utf-8")
|
||||
|
||||
|
||||
async def _save_agent_from_result(
|
||||
result: dict[str, Any],
|
||||
user_id: str | None,
|
||||
tool_name: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Save agent to library if result contains agent_json.
|
||||
|
||||
Args:
|
||||
result: The result dict that may contain agent_json
|
||||
user_id: The user ID to save the agent for
|
||||
tool_name: The tool name (create_agent or edit_agent)
|
||||
|
||||
Returns:
|
||||
Updated result dict with saved agent details, or original result if no agent_json
|
||||
"""
|
||||
if not user_id:
|
||||
logger.warning("[COMPLETION] Cannot save agent: no user_id in task")
|
||||
return result
|
||||
|
||||
agent_json = result.get("agent_json")
|
||||
if not agent_json:
|
||||
logger.warning(
|
||||
f"[COMPLETION] {tool_name} completed but no agent_json in result"
|
||||
)
|
||||
return result
|
||||
|
||||
try:
|
||||
from .tools.agent_generator import save_agent_to_library
|
||||
|
||||
is_update = tool_name == "edit_agent"
|
||||
created_graph, library_agent = await save_agent_to_library(
|
||||
agent_json, user_id, is_update=is_update
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"[COMPLETION] Saved agent '{created_graph.name}' to library "
|
||||
f"(graph_id={created_graph.id}, library_agent_id={library_agent.id})"
|
||||
)
|
||||
|
||||
# Return a response similar to AgentSavedResponse
|
||||
return {
|
||||
"type": "agent_saved",
|
||||
"message": f"Agent '{created_graph.name}' has been saved to your library!",
|
||||
"agent_id": created_graph.id,
|
||||
"agent_name": created_graph.name,
|
||||
"library_agent_id": library_agent.id,
|
||||
"library_agent_link": f"/library/agents/{library_agent.id}",
|
||||
"agent_page_link": f"/build?flowID={created_graph.id}",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[COMPLETION] Failed to save agent to library: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
# Return error but don't fail the whole operation
|
||||
# Sanitize agent_json to remove sensitive keys before returning
|
||||
return {
|
||||
"type": "error",
|
||||
"message": f"Agent was generated but failed to save: {str(e)}",
|
||||
"error": str(e),
|
||||
"agent_json": _sanitize_agent_json(agent_json),
|
||||
}
|
||||
|
||||
|
||||
async def process_operation_success(
|
||||
task: stream_registry.ActiveTask,
|
||||
result: dict | str | None,
|
||||
) -> None:
|
||||
"""Handle successful operation completion.
|
||||
|
||||
Publishes the result to the stream registry, updates the database,
|
||||
generates LLM continuation, and marks the task as completed.
|
||||
|
||||
Args:
|
||||
task: The active task that completed
|
||||
result: The result data from the operation
|
||||
|
||||
Raises:
|
||||
ToolMessageUpdateError: If the database update fails. The task
|
||||
will be marked as failed instead of completed.
|
||||
"""
|
||||
# For agent generation tools, save the agent to library
|
||||
if task.tool_name in AGENT_GENERATION_TOOLS and isinstance(result, dict):
|
||||
result = await _save_agent_from_result(result, task.user_id, task.tool_name)
|
||||
|
||||
# Serialize result for output (only substitute default when result is exactly None)
|
||||
result_output = result if result is not None else {"status": "completed"}
|
||||
output_str = (
|
||||
result_output
|
||||
if isinstance(result_output, str)
|
||||
else orjson.dumps(result_output).decode("utf-8")
|
||||
)
|
||||
|
||||
# Publish result to stream registry
|
||||
await stream_registry.publish_chunk(
|
||||
task.task_id,
|
||||
StreamToolOutputAvailable(
|
||||
toolCallId=task.tool_call_id,
|
||||
toolName=task.tool_name,
|
||||
output=output_str,
|
||||
success=True,
|
||||
),
|
||||
)
|
||||
|
||||
# Update pending operation in database
|
||||
# If this fails, we must not continue to mark the task as completed
|
||||
result_str = serialize_result(result)
|
||||
try:
|
||||
await _update_tool_message(
|
||||
session_id=task.session_id,
|
||||
tool_call_id=task.tool_call_id,
|
||||
content=result_str,
|
||||
)
|
||||
except ToolMessageUpdateError:
|
||||
# DB update failed - mark task as failed to avoid inconsistent state
|
||||
logger.error(
|
||||
f"[COMPLETION] DB update failed for task {task.task_id}, "
|
||||
"marking as failed instead of completed"
|
||||
)
|
||||
await stream_registry.publish_chunk(
|
||||
task.task_id,
|
||||
StreamError(errorText="Failed to save operation result to database"),
|
||||
)
|
||||
await stream_registry.mark_task_completed(task.task_id, status="failed")
|
||||
raise
|
||||
|
||||
# Generate LLM continuation with streaming
|
||||
try:
|
||||
await chat_service._generate_llm_continuation_with_streaming(
|
||||
session_id=task.session_id,
|
||||
user_id=task.user_id,
|
||||
task_id=task.task_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[COMPLETION] Failed to generate LLM continuation: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Mark task as completed and release Redis lock
|
||||
await stream_registry.mark_task_completed(task.task_id, status="completed")
|
||||
try:
|
||||
await chat_service._mark_operation_completed(task.tool_call_id)
|
||||
except Exception as e:
|
||||
logger.error(f"[COMPLETION] Failed to mark operation completed: {e}")
|
||||
|
||||
logger.info(
|
||||
f"[COMPLETION] Successfully processed completion for task {task.task_id}"
|
||||
)
|
||||
|
||||
|
||||
async def process_operation_failure(
|
||||
task: stream_registry.ActiveTask,
|
||||
error: str | None,
|
||||
) -> None:
|
||||
"""Handle failed operation completion.
|
||||
|
||||
Publishes the error to the stream registry, updates the database
|
||||
with the error response, and marks the task as failed.
|
||||
|
||||
Args:
|
||||
task: The active task that failed
|
||||
error: The error message from the operation
|
||||
"""
|
||||
error_msg = error or "Operation failed"
|
||||
|
||||
# Publish error to stream registry
|
||||
await stream_registry.publish_chunk(
|
||||
task.task_id,
|
||||
StreamError(errorText=error_msg),
|
||||
)
|
||||
|
||||
# Update pending operation with error
|
||||
# If this fails, we still continue to mark the task as failed
|
||||
error_response = ErrorResponse(
|
||||
message=error_msg,
|
||||
error=error,
|
||||
)
|
||||
try:
|
||||
await _update_tool_message(
|
||||
session_id=task.session_id,
|
||||
tool_call_id=task.tool_call_id,
|
||||
content=error_response.model_dump_json(),
|
||||
)
|
||||
except ToolMessageUpdateError:
|
||||
# DB update failed - log but continue with cleanup
|
||||
logger.error(
|
||||
f"[COMPLETION] DB update failed while processing failure for task {task.task_id}, "
|
||||
"continuing with cleanup"
|
||||
)
|
||||
|
||||
# Mark task as failed and release Redis lock
|
||||
await stream_registry.mark_task_completed(task.task_id, status="failed")
|
||||
try:
|
||||
await chat_service._mark_operation_completed(task.tool_call_id)
|
||||
except Exception as e:
|
||||
logger.error(f"[COMPLETION] Failed to mark operation completed: {e}")
|
||||
|
||||
logger.info(f"[COMPLETION] Processed failure for task {task.task_id}: {error_msg}")
|
||||
@@ -27,7 +27,6 @@ class ChatConfig(BaseSettings):
|
||||
session_ttl: int = Field(default=43200, description="Session TTL in seconds")
|
||||
|
||||
# Streaming Configuration
|
||||
stream_timeout: int = Field(default=300, description="Stream timeout in seconds")
|
||||
max_retries: int = Field(
|
||||
default=3,
|
||||
description="Max retries for fallback path (SDK handles retries internally)",
|
||||
@@ -37,52 +36,29 @@ class ChatConfig(BaseSettings):
|
||||
default=30, description="Maximum number of agent schedules"
|
||||
)
|
||||
|
||||
# Long-running operation configuration
|
||||
long_running_operation_ttl: int = Field(
|
||||
default=600,
|
||||
description="TTL in seconds for long-running operation tracking in Redis (safety net if pod dies)",
|
||||
)
|
||||
|
||||
# Stream registry configuration for SSE reconnection
|
||||
stream_ttl: int = Field(
|
||||
default=3600,
|
||||
description="TTL in seconds for stream data in Redis (1 hour)",
|
||||
)
|
||||
stream_lock_ttl: int = Field(
|
||||
default=120,
|
||||
description="TTL in seconds for stream lock (2 minutes). Short timeout allows "
|
||||
"reconnection after refresh/crash without long waits.",
|
||||
)
|
||||
stream_max_length: int = Field(
|
||||
default=10000,
|
||||
description="Maximum number of messages to store per stream",
|
||||
)
|
||||
|
||||
# Redis Streams configuration for completion consumer
|
||||
stream_completion_name: str = Field(
|
||||
default="chat:completions",
|
||||
description="Redis Stream name for operation completions",
|
||||
)
|
||||
stream_consumer_group: str = Field(
|
||||
default="chat_consumers",
|
||||
description="Consumer group name for completion stream",
|
||||
)
|
||||
stream_claim_min_idle_ms: int = Field(
|
||||
default=60000,
|
||||
description="Minimum idle time in milliseconds before claiming pending messages from dead consumers",
|
||||
)
|
||||
|
||||
# Redis key prefixes for stream registry
|
||||
task_meta_prefix: str = Field(
|
||||
session_meta_prefix: str = Field(
|
||||
default="chat:task:meta:",
|
||||
description="Prefix for task metadata hash keys",
|
||||
description="Prefix for session metadata hash keys",
|
||||
)
|
||||
task_stream_prefix: str = Field(
|
||||
turn_stream_prefix: str = Field(
|
||||
default="chat:stream:",
|
||||
description="Prefix for task message stream keys",
|
||||
)
|
||||
task_op_prefix: str = Field(
|
||||
default="chat:task:op:",
|
||||
description="Prefix for operation ID to task ID mapping keys",
|
||||
)
|
||||
internal_api_key: str | None = Field(
|
||||
default=None,
|
||||
description="API key for internal webhook callbacks (env: CHAT_INTERNAL_API_KEY)",
|
||||
description="Prefix for turn message stream keys",
|
||||
)
|
||||
|
||||
# Langfuse Prompt Management Configuration
|
||||
@@ -109,7 +85,7 @@ class ChatConfig(BaseSettings):
|
||||
)
|
||||
claude_agent_max_subtasks: int = Field(
|
||||
default=10,
|
||||
description="Max number of sub-agent Tasks the SDK can spawn per session.",
|
||||
description="Max number of concurrent sub-agent Tasks the SDK can run per session.",
|
||||
)
|
||||
claude_agent_use_resume: bool = Field(
|
||||
default=True,
|
||||
@@ -154,14 +130,6 @@ class ChatConfig(BaseSettings):
|
||||
v = "https://openrouter.ai/api/v1"
|
||||
return v
|
||||
|
||||
@field_validator("internal_api_key", mode="before")
|
||||
@classmethod
|
||||
def get_internal_api_key(cls, v):
|
||||
"""Get internal API key from environment if not provided."""
|
||||
if v is None:
|
||||
v = os.getenv("CHAT_INTERNAL_API_KEY")
|
||||
return v
|
||||
|
||||
@field_validator("use_claude_agent_sdk", mode="before")
|
||||
@classmethod
|
||||
def get_use_claude_agent_sdk(cls, v):
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any, cast
|
||||
from typing import Any
|
||||
|
||||
from prisma.errors import UniqueViolationError
|
||||
from prisma.models import ChatMessage as PrismaChatMessage
|
||||
from prisma.models import ChatSession as PrismaChatSession
|
||||
from prisma.types import (
|
||||
@@ -92,10 +93,9 @@ async def add_chat_message(
|
||||
function_call: dict[str, Any] | None = None,
|
||||
) -> ChatMessage:
|
||||
"""Add a message to a chat session."""
|
||||
# Build input dict dynamically rather than using ChatMessageCreateInput directly
|
||||
# because Prisma's TypedDict validation rejects optional fields set to None.
|
||||
# We only include fields that have values, then cast at the end.
|
||||
data: dict[str, Any] = {
|
||||
# Build ChatMessageCreateInput with only non-None values
|
||||
# (Prisma TypedDict rejects optional fields set to None)
|
||||
data: ChatMessageCreateInput = {
|
||||
"Session": {"connect": {"id": session_id}},
|
||||
"role": role,
|
||||
"sequence": sequence,
|
||||
@@ -123,7 +123,7 @@ async def add_chat_message(
|
||||
where={"id": session_id},
|
||||
data={"updatedAt": datetime.now(UTC)},
|
||||
),
|
||||
PrismaChatMessage.prisma().create(data=cast(ChatMessageCreateInput, data)),
|
||||
PrismaChatMessage.prisma().create(data=data),
|
||||
)
|
||||
return ChatMessage.from_db(message)
|
||||
|
||||
@@ -132,58 +132,93 @@ async def add_chat_messages_batch(
|
||||
session_id: str,
|
||||
messages: list[dict[str, Any]],
|
||||
start_sequence: int,
|
||||
) -> list[ChatMessage]:
|
||||
) -> int:
|
||||
"""Add multiple messages to a chat session in a batch.
|
||||
|
||||
Uses a transaction for atomicity - if any message creation fails,
|
||||
the entire batch is rolled back.
|
||||
Uses collision detection with retry: tries to create messages starting
|
||||
at start_sequence. If a unique constraint violation occurs (e.g., the
|
||||
streaming loop and long-running callback race), queries the latest
|
||||
sequence and retries with the correct offset. This avoids unnecessary
|
||||
upserts and DB queries in the common case (no collision).
|
||||
|
||||
Returns:
|
||||
Next sequence number for the next message to be inserted. This equals
|
||||
start_sequence + len(messages) and allows callers to update their
|
||||
counters even when collision detection adjusts start_sequence.
|
||||
"""
|
||||
if not messages:
|
||||
return []
|
||||
# No messages to add - return current count
|
||||
return start_sequence
|
||||
|
||||
created_messages = []
|
||||
max_retries = 5
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
# Single timestamp for all messages and session update
|
||||
now = datetime.now(UTC)
|
||||
|
||||
async with db.transaction() as tx:
|
||||
for i, msg in enumerate(messages):
|
||||
# Build input dict dynamically rather than using ChatMessageCreateInput
|
||||
# directly because Prisma's TypedDict validation rejects optional fields
|
||||
# set to None. We only include fields that have values, then cast.
|
||||
data: dict[str, Any] = {
|
||||
"Session": {"connect": {"id": session_id}},
|
||||
"role": msg["role"],
|
||||
"sequence": start_sequence + i,
|
||||
}
|
||||
async with db.transaction() as tx:
|
||||
# Build all message data
|
||||
messages_data = []
|
||||
for i, msg in enumerate(messages):
|
||||
# Build ChatMessageCreateInput with only non-None values
|
||||
# (Prisma TypedDict rejects optional fields set to None)
|
||||
# Note: create_many doesn't support nested creates, use sessionId directly
|
||||
data: ChatMessageCreateInput = {
|
||||
"sessionId": session_id,
|
||||
"role": msg["role"],
|
||||
"sequence": start_sequence + i,
|
||||
"createdAt": now,
|
||||
}
|
||||
|
||||
# Add optional string fields
|
||||
if msg.get("content") is not None:
|
||||
data["content"] = msg["content"]
|
||||
if msg.get("name") is not None:
|
||||
data["name"] = msg["name"]
|
||||
if msg.get("tool_call_id") is not None:
|
||||
data["toolCallId"] = msg["tool_call_id"]
|
||||
if msg.get("refusal") is not None:
|
||||
data["refusal"] = msg["refusal"]
|
||||
# Add optional string fields
|
||||
if msg.get("content") is not None:
|
||||
data["content"] = msg["content"]
|
||||
if msg.get("name") is not None:
|
||||
data["name"] = msg["name"]
|
||||
if msg.get("tool_call_id") is not None:
|
||||
data["toolCallId"] = msg["tool_call_id"]
|
||||
if msg.get("refusal") is not None:
|
||||
data["refusal"] = msg["refusal"]
|
||||
|
||||
# Add optional JSON fields only when they have values
|
||||
if msg.get("tool_calls") is not None:
|
||||
data["toolCalls"] = SafeJson(msg["tool_calls"])
|
||||
if msg.get("function_call") is not None:
|
||||
data["functionCall"] = SafeJson(msg["function_call"])
|
||||
# Add optional JSON fields only when they have values
|
||||
if msg.get("tool_calls") is not None:
|
||||
data["toolCalls"] = SafeJson(msg["tool_calls"])
|
||||
if msg.get("function_call") is not None:
|
||||
data["functionCall"] = SafeJson(msg["function_call"])
|
||||
|
||||
created = await PrismaChatMessage.prisma(tx).create(
|
||||
data=cast(ChatMessageCreateInput, data)
|
||||
)
|
||||
created_messages.append(created)
|
||||
messages_data.append(data)
|
||||
|
||||
# Update session's updatedAt timestamp within the same transaction.
|
||||
# Note: Token usage (total_prompt_tokens, total_completion_tokens) is updated
|
||||
# separately via update_chat_session() after streaming completes.
|
||||
await PrismaChatSession.prisma(tx).update(
|
||||
where={"id": session_id},
|
||||
data={"updatedAt": datetime.now(UTC)},
|
||||
)
|
||||
# Run create_many and session update in parallel within transaction
|
||||
# Both use the same timestamp for consistency
|
||||
await asyncio.gather(
|
||||
PrismaChatMessage.prisma(tx).create_many(data=messages_data),
|
||||
PrismaChatSession.prisma(tx).update(
|
||||
where={"id": session_id},
|
||||
data={"updatedAt": now},
|
||||
),
|
||||
)
|
||||
|
||||
return [ChatMessage.from_db(m) for m in created_messages]
|
||||
# Return next sequence number for counter sync
|
||||
return start_sequence + len(messages)
|
||||
|
||||
except UniqueViolationError:
|
||||
if attempt < max_retries - 1:
|
||||
# Collision detected - query MAX(sequence)+1 and retry with correct offset
|
||||
logger.info(
|
||||
f"Collision detected for session {session_id} at sequence "
|
||||
f"{start_sequence}, querying DB for latest sequence"
|
||||
)
|
||||
start_sequence = await get_next_sequence(session_id)
|
||||
logger.info(
|
||||
f"Retrying batch insert with start_sequence={start_sequence}"
|
||||
)
|
||||
continue
|
||||
else:
|
||||
# Max retries exceeded - propagate error
|
||||
raise
|
||||
|
||||
# Should never reach here due to raise in exception handler
|
||||
raise RuntimeError(f"Failed to insert messages after {max_retries} attempts")
|
||||
|
||||
|
||||
async def get_user_chat_sessions(
|
||||
@@ -237,10 +272,20 @@ async def delete_chat_session(session_id: str, user_id: str | None = None) -> bo
|
||||
return False
|
||||
|
||||
|
||||
async def get_chat_session_message_count(session_id: str) -> int:
|
||||
"""Get the number of messages in a chat session."""
|
||||
count = await PrismaChatMessage.prisma().count(where={"sessionId": session_id})
|
||||
return count
|
||||
async def get_next_sequence(session_id: str) -> int:
|
||||
"""Get the next sequence number for a new message in this session.
|
||||
|
||||
Uses MAX(sequence) + 1 for robustness. Returns 0 if no messages exist.
|
||||
More robust than COUNT(*) because it's immune to deleted messages.
|
||||
|
||||
Optimized to select only the sequence column using raw SQL.
|
||||
The unique index on (sessionId, sequence) makes this query fast.
|
||||
"""
|
||||
results = await db.query_raw_with_schema(
|
||||
'SELECT "sequence" FROM {schema_prefix}"ChatMessage" WHERE "sessionId" = $1 ORDER BY "sequence" DESC LIMIT 1',
|
||||
session_id,
|
||||
)
|
||||
return 0 if not results else results[0]["sequence"] + 1
|
||||
|
||||
|
||||
async def update_tool_message_content(
|
||||
|
||||
@@ -4,6 +4,7 @@ This module contains the CoPilotExecutor class that consumes chat tasks from
|
||||
RabbitMQ and processes them using a thread pool, following the graph executor pattern.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
@@ -25,7 +26,7 @@ from backend.util.process import AppProcess
|
||||
from backend.util.retry import continuous_retry
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from .processor import execute_copilot_task, init_worker
|
||||
from .processor import execute_copilot_turn, init_worker
|
||||
from .utils import (
|
||||
COPILOT_CANCEL_QUEUE_NAME,
|
||||
COPILOT_EXECUTION_QUEUE_NAME,
|
||||
@@ -181,13 +182,13 @@ class CoPilotExecutor(AppProcess):
|
||||
self._executor.shutdown(wait=False)
|
||||
|
||||
# Release any remaining locks
|
||||
for task_id, lock in list(self._task_locks.items()):
|
||||
for session_id, lock in list(self._task_locks.items()):
|
||||
try:
|
||||
lock.release()
|
||||
logger.info(f"[cleanup {pid}] Released lock for {task_id}")
|
||||
logger.info(f"[cleanup {pid}] Released lock for {session_id}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[cleanup {pid}] Failed to release lock for {task_id}: {e}"
|
||||
f"[cleanup {pid}] Failed to release lock for {session_id}: {e}"
|
||||
)
|
||||
|
||||
logger.info(f"[cleanup {pid}] Graceful shutdown completed")
|
||||
@@ -267,20 +268,20 @@ class CoPilotExecutor(AppProcess):
|
||||
):
|
||||
"""Handle cancel message from FANOUT exchange."""
|
||||
request = CancelCoPilotEvent.model_validate_json(body)
|
||||
task_id = request.task_id
|
||||
if not task_id:
|
||||
logger.warning("Cancel message missing 'task_id'")
|
||||
session_id = request.session_id
|
||||
if not session_id:
|
||||
logger.warning("Cancel message missing 'session_id'")
|
||||
return
|
||||
if task_id not in self.active_tasks:
|
||||
logger.debug(f"Cancel received for {task_id} but not active")
|
||||
if session_id not in self.active_tasks:
|
||||
logger.debug(f"Cancel received for {session_id} but not active")
|
||||
return
|
||||
|
||||
_, cancel_event = self.active_tasks[task_id]
|
||||
logger.info(f"Received cancel for {task_id}")
|
||||
_, cancel_event = self.active_tasks[session_id]
|
||||
logger.info(f"Received cancel for {session_id}")
|
||||
if not cancel_event.is_set():
|
||||
cancel_event.set()
|
||||
else:
|
||||
logger.debug(f"Cancel already set for {task_id}")
|
||||
logger.debug(f"Cancel already set for {session_id}")
|
||||
|
||||
def _handle_run_message(
|
||||
self,
|
||||
@@ -352,12 +353,12 @@ class CoPilotExecutor(AppProcess):
|
||||
ack_message(reject=True, requeue=False)
|
||||
return
|
||||
|
||||
task_id = entry.task_id
|
||||
session_id = entry.session_id
|
||||
|
||||
# Check for local duplicate - task is already running on this executor
|
||||
if task_id in self.active_tasks:
|
||||
# Check for local duplicate - session is already running on this executor
|
||||
if session_id in self.active_tasks:
|
||||
logger.warning(
|
||||
f"Task {task_id} already running locally, rejecting duplicate"
|
||||
f"Session {session_id} already running locally, rejecting duplicate"
|
||||
)
|
||||
ack_message(reject=True, requeue=False)
|
||||
return
|
||||
@@ -365,64 +366,69 @@ class CoPilotExecutor(AppProcess):
|
||||
# Try to acquire cluster-wide lock
|
||||
cluster_lock = ClusterLock(
|
||||
redis=redis.get_redis(),
|
||||
key=f"copilot:task:{task_id}:lock",
|
||||
key=f"copilot:session:{session_id}:lock",
|
||||
owner_id=self.executor_id,
|
||||
timeout=settings.config.cluster_lock_timeout,
|
||||
)
|
||||
current_owner = cluster_lock.try_acquire()
|
||||
if current_owner != self.executor_id:
|
||||
if current_owner is not None:
|
||||
logger.warning(f"Task {task_id} already running on pod {current_owner}")
|
||||
logger.warning(
|
||||
f"Session {session_id} already running on pod {current_owner}"
|
||||
)
|
||||
ack_message(reject=True, requeue=False)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Could not acquire lock for {task_id} - Redis unavailable"
|
||||
f"Could not acquire lock for {session_id} - Redis unavailable"
|
||||
)
|
||||
ack_message(reject=True, requeue=True)
|
||||
return
|
||||
|
||||
# Execute the task
|
||||
try:
|
||||
self._task_locks[task_id] = cluster_lock
|
||||
self._task_locks[session_id] = cluster_lock
|
||||
|
||||
logger.info(
|
||||
f"Acquired cluster lock for {task_id}, executor_id={self.executor_id}"
|
||||
f"Acquired cluster lock for {session_id}, "
|
||||
f"executor_id={self.executor_id}"
|
||||
)
|
||||
|
||||
cancel_event = threading.Event()
|
||||
future = self.executor.submit(
|
||||
execute_copilot_task, entry, cancel_event, cluster_lock
|
||||
execute_copilot_turn, entry, cancel_event, cluster_lock
|
||||
)
|
||||
self.active_tasks[task_id] = (future, cancel_event)
|
||||
self.active_tasks[session_id] = (future, cancel_event)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to setup execution for {task_id}: {e}")
|
||||
logger.warning(f"Failed to setup execution for {session_id}: {e}")
|
||||
cluster_lock.release()
|
||||
if task_id in self._task_locks:
|
||||
del self._task_locks[task_id]
|
||||
if session_id in self._task_locks:
|
||||
del self._task_locks[session_id]
|
||||
ack_message(reject=True, requeue=True)
|
||||
return
|
||||
|
||||
self._update_metrics()
|
||||
|
||||
def on_run_done(f: Future):
|
||||
logger.info(f"Run completed for {task_id}")
|
||||
logger.info(f"Run completed for {session_id}")
|
||||
error_msg = None
|
||||
try:
|
||||
if exec_error := f.exception():
|
||||
logger.error(f"Execution for {task_id} failed: {exec_error}")
|
||||
# Don't requeue failed tasks - they've been marked as failed
|
||||
# in the stream registry. Requeuing would cause infinite retries
|
||||
# for deterministic failures.
|
||||
error_msg = str(exec_error) or type(exec_error).__name__
|
||||
logger.error(f"Execution for {session_id} failed: {error_msg}")
|
||||
ack_message(reject=True, requeue=False)
|
||||
else:
|
||||
ack_message(reject=False, requeue=False)
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"Run completion callback cancelled for {session_id}")
|
||||
except BaseException as e:
|
||||
logger.exception(f"Error in run completion callback: {e}")
|
||||
error_msg = str(e) or type(e).__name__
|
||||
logger.exception(f"Error in run completion callback: {error_msg}")
|
||||
finally:
|
||||
# Release the cluster lock
|
||||
if task_id in self._task_locks:
|
||||
logger.info(f"Releasing cluster lock for {task_id}")
|
||||
self._task_locks[task_id].release()
|
||||
del self._task_locks[task_id]
|
||||
if session_id in self._task_locks:
|
||||
logger.info(f"Releasing cluster lock for {session_id}")
|
||||
self._task_locks[session_id].release()
|
||||
del self._task_locks[session_id]
|
||||
self._cleanup_completed_tasks()
|
||||
|
||||
future.add_done_callback(on_run_done)
|
||||
@@ -433,11 +439,11 @@ class CoPilotExecutor(AppProcess):
|
||||
"""Remove completed futures from active_tasks and update metrics."""
|
||||
completed_tasks = []
|
||||
with self._active_tasks_lock:
|
||||
for task_id, (future, _) in list(self.active_tasks.items()):
|
||||
for session_id, (future, _) in list(self.active_tasks.items()):
|
||||
if future.done():
|
||||
completed_tasks.append(task_id)
|
||||
self.active_tasks.pop(task_id, None)
|
||||
logger.info(f"Cleaned up completed task {task_id}")
|
||||
completed_tasks.append(session_id)
|
||||
self.active_tasks.pop(session_id, None)
|
||||
logger.info(f"Cleaned up completed session {session_id}")
|
||||
|
||||
self._update_metrics()
|
||||
return completed_tasks
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""CoPilot execution processor - per-worker execution logic.
|
||||
|
||||
This module contains the processor class that handles CoPilot task execution
|
||||
This module contains the processor class that handles CoPilot session execution
|
||||
in a thread-local context, following the graph executor pattern.
|
||||
"""
|
||||
|
||||
@@ -12,7 +12,7 @@ import time
|
||||
from backend.copilot import service as copilot_service
|
||||
from backend.copilot import stream_registry
|
||||
from backend.copilot.config import ChatConfig
|
||||
from backend.copilot.response_model import StreamError, StreamFinish, StreamFinishStep
|
||||
from backend.copilot.response_model import StreamFinish
|
||||
from backend.copilot.sdk import service as sdk_service
|
||||
from backend.executor.cluster_lock import ClusterLock
|
||||
from backend.util.decorator import error_logged
|
||||
@@ -32,17 +32,17 @@ logger = TruncatedLogger(logging.getLogger(__name__), prefix="[CoPilotExecutor]"
|
||||
_tls = threading.local()
|
||||
|
||||
|
||||
def execute_copilot_task(
|
||||
def execute_copilot_turn(
|
||||
entry: CoPilotExecutionEntry,
|
||||
cancel: threading.Event,
|
||||
cluster_lock: ClusterLock,
|
||||
):
|
||||
"""Execute a CoPilot task using the thread-local processor.
|
||||
"""Execute a single CoPilot turn (user message → AI response).
|
||||
|
||||
This function is the entry point called by the thread pool executor.
|
||||
|
||||
Args:
|
||||
entry: The task payload
|
||||
entry: The turn payload
|
||||
cancel: Threading event to signal cancellation
|
||||
cluster_lock: Distributed lock for this execution
|
||||
"""
|
||||
@@ -76,16 +76,16 @@ def cleanup_worker():
|
||||
|
||||
|
||||
class CoPilotProcessor:
|
||||
"""Per-worker execution logic for CoPilot tasks.
|
||||
"""Per-worker execution logic for CoPilot sessions.
|
||||
|
||||
This class is instantiated once per worker thread and handles the execution
|
||||
of CoPilot chat generation tasks. It maintains an async event loop for
|
||||
of CoPilot chat generation sessions. It maintains an async event loop for
|
||||
running the async service code.
|
||||
|
||||
The execution flow:
|
||||
1. CoPilot task is picked from RabbitMQ queue
|
||||
2. Manager submits task to thread pool
|
||||
3. Processor executes the task in its event loop
|
||||
1. Session entry is picked from RabbitMQ queue
|
||||
2. Manager submits to thread pool
|
||||
3. Processor executes in its event loop
|
||||
4. Results are published to Redis Streams
|
||||
"""
|
||||
|
||||
@@ -119,13 +119,16 @@ class CoPilotProcessor:
|
||||
"""
|
||||
from backend.util.workspace_storage import shutdown_workspace_storage
|
||||
|
||||
coro = shutdown_workspace_storage()
|
||||
try:
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
shutdown_workspace_storage(), self.execution_loop
|
||||
)
|
||||
future = asyncio.run_coroutine_threadsafe(coro, self.execution_loop)
|
||||
future.result(timeout=5)
|
||||
except Exception as e:
|
||||
logger.warning(f"[CoPilotExecutor] Worker {self.tid} cleanup error: {e}")
|
||||
coro.close() # Prevent "coroutine was never awaited" warning
|
||||
error_msg = str(e) or type(e).__name__
|
||||
logger.warning(
|
||||
f"[CoPilotExecutor] Worker {self.tid} cleanup error: {error_msg}"
|
||||
)
|
||||
|
||||
# Stop the event loop
|
||||
self.execution_loop.call_soon_threadsafe(self.execution_loop.stop)
|
||||
@@ -139,19 +142,17 @@ class CoPilotProcessor:
|
||||
cancel: threading.Event,
|
||||
cluster_lock: ClusterLock,
|
||||
):
|
||||
"""Execute a CoPilot task.
|
||||
"""Execute a CoPilot turn.
|
||||
|
||||
This is the main entry point for task execution. It runs the async
|
||||
execution logic in the worker's event loop and handles errors.
|
||||
Runs the async logic in the worker's event loop and handles errors.
|
||||
|
||||
Args:
|
||||
entry: The task payload containing session and message info
|
||||
entry: The turn payload containing session and message info
|
||||
cancel: Threading event to signal cancellation
|
||||
cluster_lock: Distributed lock to prevent duplicate execution
|
||||
"""
|
||||
log = CoPilotLogMetadata(
|
||||
logging.getLogger(__name__),
|
||||
task_id=entry.task_id,
|
||||
session_id=entry.session_id,
|
||||
user_id=entry.user_id,
|
||||
)
|
||||
@@ -159,38 +160,30 @@ class CoPilotProcessor:
|
||||
|
||||
start_time = time.monotonic()
|
||||
|
||||
try:
|
||||
# Run the async execution in our event loop
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
self._execute_async(entry, cancel, cluster_lock, log),
|
||||
self.execution_loop,
|
||||
)
|
||||
# Run the async execution in our event loop
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
self._execute_async(entry, cancel, cluster_lock, log),
|
||||
self.execution_loop,
|
||||
)
|
||||
|
||||
# Wait for completion, checking cancel periodically
|
||||
while not future.done():
|
||||
try:
|
||||
future.result(timeout=1.0)
|
||||
except asyncio.TimeoutError:
|
||||
if cancel.is_set():
|
||||
log.info("Cancellation requested")
|
||||
future.cancel()
|
||||
break
|
||||
# Refresh cluster lock to maintain ownership
|
||||
cluster_lock.refresh()
|
||||
# Wait for completion, checking cancel periodically
|
||||
while not future.done():
|
||||
try:
|
||||
future.result(timeout=1.0)
|
||||
except asyncio.TimeoutError:
|
||||
if cancel.is_set():
|
||||
log.info("Cancellation requested")
|
||||
future.cancel()
|
||||
break
|
||||
# Refresh cluster lock to maintain ownership
|
||||
cluster_lock.refresh()
|
||||
|
||||
if not future.cancelled():
|
||||
# Get result to propagate any exceptions
|
||||
future.result()
|
||||
if not future.cancelled():
|
||||
# Get result to propagate any exceptions
|
||||
future.result()
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
log.info(f"Execution completed in {elapsed:.2f}s")
|
||||
|
||||
except Exception as e:
|
||||
elapsed = time.monotonic() - start_time
|
||||
log.error(f"Execution failed after {elapsed:.2f}s: {e}")
|
||||
# Note: _execute_async already marks the task as failed before re-raising,
|
||||
# so we don't call _mark_task_failed here to avoid duplicate error events.
|
||||
raise
|
||||
elapsed = time.monotonic() - start_time
|
||||
log.info(f"Execution completed in {elapsed:.2f}s")
|
||||
|
||||
async def _execute_async(
|
||||
self,
|
||||
@@ -199,19 +192,20 @@ class CoPilotProcessor:
|
||||
cluster_lock: ClusterLock,
|
||||
log: CoPilotLogMetadata,
|
||||
):
|
||||
"""Async execution logic for CoPilot task.
|
||||
"""Async execution logic for a CoPilot turn.
|
||||
|
||||
This method calls the existing stream_chat_completion service function
|
||||
and publishes results to the stream registry.
|
||||
Calls the stream_chat_completion service function and publishes
|
||||
results to the stream registry.
|
||||
|
||||
Args:
|
||||
entry: The task payload
|
||||
entry: The turn payload
|
||||
cancel: Threading event to signal cancellation
|
||||
cluster_lock: Distributed lock for refresh
|
||||
log: Structured logger for this task
|
||||
log: Structured logger
|
||||
"""
|
||||
last_refresh = time.monotonic()
|
||||
refresh_interval = 30.0 # Refresh lock every 30 seconds
|
||||
error_msg = None
|
||||
|
||||
try:
|
||||
# Choose service based on LaunchDarkly flag
|
||||
@@ -228,7 +222,7 @@ class CoPilotProcessor:
|
||||
)
|
||||
log.info(f"Using {'SDK' if use_sdk else 'standard'} service")
|
||||
|
||||
# Stream chat completion and publish chunks to Redis
|
||||
# Stream chat completion and publish chunks to Redis.
|
||||
async for chunk in stream_fn(
|
||||
session_id=entry.session_id,
|
||||
message=entry.message if entry.message else None,
|
||||
@@ -236,52 +230,47 @@ class CoPilotProcessor:
|
||||
user_id=entry.user_id,
|
||||
context=entry.context,
|
||||
):
|
||||
# Check for cancellation
|
||||
if cancel.is_set():
|
||||
log.info("Cancelled during streaming")
|
||||
await stream_registry.publish_chunk(
|
||||
entry.task_id, StreamError(errorText="Operation cancelled")
|
||||
)
|
||||
await stream_registry.publish_chunk(
|
||||
entry.task_id, StreamFinishStep()
|
||||
)
|
||||
await stream_registry.publish_chunk(entry.task_id, StreamFinish())
|
||||
await stream_registry.mark_task_completed(
|
||||
entry.task_id, status="failed"
|
||||
)
|
||||
return
|
||||
log.info("Cancel requested, breaking stream")
|
||||
break
|
||||
|
||||
# Refresh cluster lock periodically
|
||||
current_time = time.monotonic()
|
||||
if current_time - last_refresh >= refresh_interval:
|
||||
cluster_lock.refresh()
|
||||
last_refresh = current_time
|
||||
|
||||
# Publish chunk to stream registry
|
||||
await stream_registry.publish_chunk(entry.task_id, chunk)
|
||||
# Skip StreamFinish — mark_session_completed publishes it.
|
||||
if isinstance(chunk, StreamFinish):
|
||||
continue
|
||||
|
||||
# Mark task as completed
|
||||
await stream_registry.mark_task_completed(entry.task_id, status="completed")
|
||||
log.info("Task completed successfully")
|
||||
try:
|
||||
await stream_registry.publish_chunk(entry.turn_id, chunk)
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"Error publishing chunk {type(chunk).__name__}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
log.info("Task cancelled")
|
||||
await stream_registry.mark_task_completed(entry.task_id, status="failed")
|
||||
# Stream loop completed
|
||||
if cancel.is_set():
|
||||
log.info("Stream cancelled by user")
|
||||
|
||||
except BaseException as e:
|
||||
# Handle all exceptions (including CancelledError) with appropriate logging
|
||||
if isinstance(e, asyncio.CancelledError):
|
||||
log.info("Turn cancelled")
|
||||
error_msg = "Operation cancelled"
|
||||
else:
|
||||
error_msg = str(e) or type(e).__name__
|
||||
log.error(f"Turn failed: {error_msg}")
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Task failed: {e}")
|
||||
await self._mark_task_failed(entry.task_id, str(e))
|
||||
raise
|
||||
|
||||
async def _mark_task_failed(self, task_id: str, error_message: str):
|
||||
"""Mark a task as failed and publish error to stream registry."""
|
||||
try:
|
||||
await stream_registry.publish_chunk(
|
||||
task_id, StreamError(errorText=error_message)
|
||||
)
|
||||
await stream_registry.publish_chunk(task_id, StreamFinishStep())
|
||||
await stream_registry.publish_chunk(task_id, StreamFinish())
|
||||
await stream_registry.mark_task_completed(task_id, status="failed")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to mark task {task_id} as failed: {e}")
|
||||
finally:
|
||||
# If no exception but user cancelled, still mark as cancelled
|
||||
if not error_msg and cancel.is_set():
|
||||
error_msg = "Operation cancelled"
|
||||
try:
|
||||
await stream_registry.mark_session_completed(
|
||||
entry.session_id, error_message=error_msg
|
||||
)
|
||||
except Exception as mark_err:
|
||||
log.error(f"Failed to mark session completed: {mark_err}")
|
||||
|
||||
@@ -28,7 +28,7 @@ class CoPilotLogMetadata(TruncatedLogger):
|
||||
Args:
|
||||
logger: The underlying logger instance
|
||||
max_length: Maximum log message length before truncation
|
||||
**kwargs: Metadata key-value pairs (e.g., task_id="abc", session_id="xyz")
|
||||
**kwargs: Metadata key-value pairs (e.g., session_id="xyz", turn_id="abc")
|
||||
These are added to json_fields in cloud mode, or to the prefix in local mode.
|
||||
"""
|
||||
|
||||
@@ -135,18 +135,15 @@ class CoPilotExecutionEntry(BaseModel):
|
||||
This model represents a chat generation task to be processed by the executor.
|
||||
"""
|
||||
|
||||
task_id: str
|
||||
"""Unique identifier for this task (used for stream registry)"""
|
||||
|
||||
session_id: str
|
||||
"""Chat session ID"""
|
||||
"""Chat session ID (also used for dedup/locking)"""
|
||||
|
||||
turn_id: str = ""
|
||||
"""Per-turn UUID for Redis stream isolation"""
|
||||
|
||||
user_id: str | None
|
||||
"""User ID (may be None for anonymous users)"""
|
||||
|
||||
operation_id: str
|
||||
"""Operation ID for webhook callbacks and completion tracking"""
|
||||
|
||||
message: str
|
||||
"""User's message to process"""
|
||||
|
||||
@@ -156,47 +153,50 @@ class CoPilotExecutionEntry(BaseModel):
|
||||
context: dict[str, str] | None = None
|
||||
"""Optional context for the message (e.g., {url: str, content: str})"""
|
||||
|
||||
file_ids: list[str] | None = None
|
||||
"""Workspace file IDs attached to the user's message"""
|
||||
|
||||
|
||||
class CancelCoPilotEvent(BaseModel):
|
||||
"""Event to cancel a CoPilot operation."""
|
||||
|
||||
task_id: str
|
||||
"""Task ID to cancel"""
|
||||
session_id: str
|
||||
"""Session ID to cancel"""
|
||||
|
||||
|
||||
# ============ Queue Publishing Helpers ============ #
|
||||
|
||||
|
||||
async def enqueue_copilot_task(
|
||||
task_id: str,
|
||||
async def enqueue_copilot_turn(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
operation_id: str,
|
||||
message: str,
|
||||
turn_id: str,
|
||||
is_user_message: bool = True,
|
||||
context: dict[str, str] | None = None,
|
||||
file_ids: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Enqueue a CoPilot task for processing by the executor service.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for this task (used for stream registry)
|
||||
session_id: Chat session ID
|
||||
session_id: Chat session ID (also used for dedup/locking)
|
||||
user_id: User ID (may be None for anonymous users)
|
||||
operation_id: Operation ID for webhook callbacks and completion tracking
|
||||
message: User's message to process
|
||||
turn_id: Per-turn UUID for Redis stream isolation
|
||||
is_user_message: Whether the message is from the user (vs system/assistant)
|
||||
context: Optional context for the message (e.g., {url: str, content: str})
|
||||
file_ids: Optional workspace file IDs attached to the user's message
|
||||
"""
|
||||
from backend.util.clients import get_async_copilot_queue
|
||||
|
||||
entry = CoPilotExecutionEntry(
|
||||
task_id=task_id,
|
||||
session_id=session_id,
|
||||
turn_id=turn_id,
|
||||
user_id=user_id,
|
||||
operation_id=operation_id,
|
||||
message=message,
|
||||
is_user_message=is_user_message,
|
||||
context=context,
|
||||
file_ids=file_ids,
|
||||
)
|
||||
|
||||
queue_client = await get_async_copilot_queue()
|
||||
@@ -207,15 +207,15 @@ async def enqueue_copilot_task(
|
||||
)
|
||||
|
||||
|
||||
async def enqueue_cancel_task(task_id: str) -> None:
|
||||
"""Publish a cancel request for a running CoPilot task.
|
||||
async def enqueue_cancel_task(session_id: str) -> None:
|
||||
"""Publish a cancel request for a running CoPilot session.
|
||||
|
||||
Sends a ``CancelCoPilotEvent`` to the FANOUT exchange so all executor
|
||||
pods receive the cancellation signal.
|
||||
"""
|
||||
from backend.util.clients import get_async_copilot_queue
|
||||
|
||||
event = CancelCoPilotEvent(task_id=task_id)
|
||||
event = CancelCoPilotEvent(session_id=session_id)
|
||||
queue_client = await get_async_copilot_queue()
|
||||
await queue_client.publish_message(
|
||||
routing_key="", # FANOUT ignores routing key
|
||||
|
||||
@@ -434,8 +434,6 @@ async def _get_session_from_db(session_id: str) -> ChatSession | None:
|
||||
|
||||
async def upsert_chat_session(
|
||||
session: ChatSession,
|
||||
*,
|
||||
existing_message_count: int | None = None,
|
||||
) -> ChatSession:
|
||||
"""Update a chat session in both cache and database.
|
||||
|
||||
@@ -443,12 +441,6 @@ async def upsert_chat_session(
|
||||
operations (e.g., background title update and main stream handler)
|
||||
attempt to upsert the same session simultaneously.
|
||||
|
||||
Args:
|
||||
existing_message_count: If provided, skip the DB query to count
|
||||
existing messages. The caller is responsible for tracking this
|
||||
accurately. Useful for incremental saves in a streaming loop
|
||||
where the caller already knows how many messages are persisted.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If the database write fails. The cache is still updated
|
||||
as a best-effort optimization, but the error is propagated to ensure
|
||||
@@ -459,11 +451,8 @@ async def upsert_chat_session(
|
||||
lock = await _get_session_lock(session.session_id)
|
||||
|
||||
async with lock:
|
||||
# Get existing message count from DB for incremental saves
|
||||
if existing_message_count is None:
|
||||
existing_message_count = await chat_db().get_chat_session_message_count(
|
||||
session.session_id
|
||||
)
|
||||
# Always query DB for existing message count to ensure consistency
|
||||
existing_message_count = await chat_db().get_next_sequence(session.session_id)
|
||||
|
||||
db_error: Exception | None = None
|
||||
|
||||
@@ -587,9 +576,7 @@ async def append_and_save_message(session_id: str, message: ChatMessage) -> Chat
|
||||
raise ValueError(f"Session {session_id} not found")
|
||||
|
||||
session.messages.append(message)
|
||||
existing_message_count = await chat_db().get_chat_session_message_count(
|
||||
session_id
|
||||
)
|
||||
existing_message_count = await chat_db().get_next_sequence(session_id)
|
||||
|
||||
try:
|
||||
await _save_session_to_db(session, existing_message_count)
|
||||
|
||||
@@ -331,3 +331,96 @@ def test_to_openai_messages_merges_split_assistants():
|
||||
tc_list = merged.get("tool_calls")
|
||||
assert tc_list is not None and len(list(tc_list)) == 1
|
||||
assert list(tc_list)[0]["id"] == "tc1"
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Concurrent save collision detection #
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
@pytest.mark.asyncio(loop_scope="session")
|
||||
async def test_concurrent_saves_collision_detection(setup_test_user, test_user_id):
|
||||
"""Test that concurrent saves from streaming loop and callback handle collisions correctly.
|
||||
|
||||
Simulates the race condition where:
|
||||
1. Streaming loop starts with saved_msg_count=5
|
||||
2. Long-running callback appends message #5 and saves
|
||||
3. Streaming loop tries to save with stale count=5
|
||||
|
||||
The collision detection should handle this gracefully.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
# Create a session with initial messages
|
||||
session = ChatSession.new(user_id=test_user_id)
|
||||
for i in range(3):
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="user" if i % 2 == 0 else "assistant", content=f"Message {i}"
|
||||
)
|
||||
)
|
||||
|
||||
# Save initial messages
|
||||
session = await upsert_chat_session(session)
|
||||
|
||||
# Simulate streaming loop and callback saving concurrently
|
||||
async def streaming_loop_save():
|
||||
"""Simulates streaming loop saving messages."""
|
||||
# Add 2 messages
|
||||
session.messages.append(ChatMessage(role="user", content="Streaming message 1"))
|
||||
session.messages.append(
|
||||
ChatMessage(role="assistant", content="Streaming message 2")
|
||||
)
|
||||
|
||||
# Wait a bit to let callback potentially save first
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
# Save (will query DB for existing count)
|
||||
return await upsert_chat_session(session)
|
||||
|
||||
async def callback_save():
|
||||
"""Simulates long-running callback saving a message."""
|
||||
# Add 1 message
|
||||
session.messages.append(
|
||||
ChatMessage(role="tool", content="Callback result", tool_call_id="tc1")
|
||||
)
|
||||
|
||||
# Save immediately (will query DB for existing count)
|
||||
return await upsert_chat_session(session)
|
||||
|
||||
# Run both saves concurrently - one will hit collision detection
|
||||
results = await asyncio.gather(streaming_loop_save(), callback_save())
|
||||
|
||||
# Both should succeed
|
||||
assert all(r is not None for r in results)
|
||||
|
||||
# Reload session from DB to verify
|
||||
from backend.data.redis_client import get_redis_async
|
||||
|
||||
redis_key = f"chat:session:{session.session_id}"
|
||||
async_redis = await get_redis_async()
|
||||
await async_redis.delete(redis_key) # Clear cache to force DB load
|
||||
|
||||
loaded_session = await get_chat_session(session.session_id, test_user_id)
|
||||
assert loaded_session is not None
|
||||
|
||||
# Should have all 6 messages (3 initial + 2 streaming + 1 callback)
|
||||
assert len(loaded_session.messages) == 6
|
||||
|
||||
# Verify no duplicate sequences
|
||||
sequences = []
|
||||
for i, msg in enumerate(loaded_session.messages):
|
||||
# Messages should have sequential sequence numbers starting from 0
|
||||
sequences.append(i)
|
||||
|
||||
# All sequences should be unique and sequential
|
||||
assert sequences == list(range(6))
|
||||
|
||||
# Verify message content is preserved
|
||||
contents = [m.content for m in loaded_session.messages]
|
||||
assert "Message 0" in contents
|
||||
assert "Message 1" in contents
|
||||
assert "Message 2" in contents
|
||||
assert "Streaming message 1" in contents
|
||||
assert "Streaming message 2" in contents
|
||||
assert "Callback result" in contents
|
||||
|
||||
@@ -14,7 +14,6 @@ import pytest
|
||||
@pytest.mark.asyncio
|
||||
async def test_parallel_tool_calls_run_concurrently():
|
||||
"""Multiple tool calls should complete in ~max(delays), not sum(delays)."""
|
||||
# Import here to allow module-level mocking if needed
|
||||
from backend.copilot.response_model import (
|
||||
StreamToolInputAvailable,
|
||||
StreamToolOutputAvailable,
|
||||
@@ -32,7 +31,6 @@ async def test_parallel_tool_calls_run_concurrently():
|
||||
for i in range(n_tools)
|
||||
]
|
||||
|
||||
# Minimal session mock
|
||||
class FakeSession:
|
||||
session_id = "test"
|
||||
user_id = "test"
|
||||
@@ -42,7 +40,7 @@ async def test_parallel_tool_calls_run_concurrently():
|
||||
|
||||
original_yield = None
|
||||
|
||||
async def fake_yield(tc_list, idx, sess, lock=None):
|
||||
async def fake_yield(tc_list, idx, sess):
|
||||
yield StreamToolInputAvailable(
|
||||
toolCallId=tc_list[idx]["id"],
|
||||
toolName=tc_list[idx]["function"]["name"],
|
||||
@@ -101,7 +99,7 @@ async def test_single_tool_call_works():
|
||||
def __init__(self):
|
||||
self.messages = []
|
||||
|
||||
async def fake_yield(tc_list, idx, sess, lock=None):
|
||||
async def fake_yield(tc_list, idx, sess):
|
||||
yield StreamToolInputAvailable(toolCallId="call_0", toolName="t", input={})
|
||||
yield StreamToolOutputAvailable(toolCallId="call_0", toolName="t", output="{}")
|
||||
|
||||
@@ -144,7 +142,7 @@ async def test_retryable_error_propagates():
|
||||
def __init__(self):
|
||||
self.messages = []
|
||||
|
||||
async def fake_yield(tc_list, idx, sess, lock=None):
|
||||
async def fake_yield(tc_list, idx, sess):
|
||||
if idx == 1:
|
||||
raise KeyError("bad")
|
||||
from backend.copilot.response_model import StreamToolInputAvailable
|
||||
@@ -175,8 +173,8 @@ async def test_retryable_error_propagates():
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_session_lock_shared():
|
||||
"""All parallel tools should receive the same lock instance."""
|
||||
async def test_session_shared_across_parallel_tools():
|
||||
"""All parallel tools should receive the same session instance."""
|
||||
from backend.copilot.response_model import (
|
||||
StreamToolInputAvailable,
|
||||
StreamToolOutputAvailable,
|
||||
@@ -199,10 +197,10 @@ async def test_session_lock_shared():
|
||||
def __init__(self):
|
||||
self.messages = []
|
||||
|
||||
observed_locks = []
|
||||
observed_sessions = []
|
||||
|
||||
async def fake_yield(tc_list, idx, sess, lock=None):
|
||||
observed_locks.append(lock)
|
||||
async def fake_yield(tc_list, idx, sess):
|
||||
observed_sessions.append(sess)
|
||||
yield StreamToolInputAvailable(
|
||||
toolCallId=tc_list[idx]["id"], toolName=f"t_{idx}", input={}
|
||||
)
|
||||
@@ -222,9 +220,8 @@ async def test_session_lock_shared():
|
||||
finally:
|
||||
svc._yield_tool_call = orig
|
||||
|
||||
assert len(observed_locks) == 3
|
||||
assert observed_locks[0] is observed_locks[1] is observed_locks[2]
|
||||
assert isinstance(observed_locks[0], asyncio.Lock)
|
||||
assert len(observed_sessions) == 3
|
||||
assert observed_sessions[0] is observed_sessions[1] is observed_sessions[2]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -251,7 +248,7 @@ async def test_cancellation_cleans_up():
|
||||
|
||||
started = asyncio.Event()
|
||||
|
||||
async def fake_yield(tc_list, idx, sess, lock=None):
|
||||
async def fake_yield(tc_list, idx, sess):
|
||||
yield StreamToolInputAvailable(
|
||||
toolCallId=tc_list[idx]["id"], toolName=f"t_{idx}", input={}
|
||||
)
|
||||
|
||||
@@ -5,6 +5,8 @@ This module implements the AI SDK UI Stream Protocol (v1) for streaming chat res
|
||||
See: https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
@@ -12,6 +14,8 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from backend.util.json import dumps as json_dumps
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ResponseType(str, Enum):
|
||||
"""Types of streaming responses following AI SDK protocol."""
|
||||
@@ -34,9 +38,6 @@ class ResponseType(str, Enum):
|
||||
TOOL_INPUT_AVAILABLE = "tool-input-available"
|
||||
TOOL_OUTPUT_AVAILABLE = "tool-output-available"
|
||||
|
||||
# Long-running tool notification (custom extension - uses AI SDK DataUIPart format)
|
||||
LONG_RUNNING_START = "data-long-running-start"
|
||||
|
||||
# Other
|
||||
ERROR = "error"
|
||||
USAGE = "usage"
|
||||
@@ -50,7 +51,8 @@ class StreamBaseResponse(BaseModel):
|
||||
|
||||
def to_sse(self) -> str:
|
||||
"""Convert to SSE format."""
|
||||
return f"data: {self.model_dump_json()}\n\n"
|
||||
json_str = self.model_dump_json(exclude_none=True)
|
||||
return f"data: {json_str}\n\n"
|
||||
|
||||
|
||||
# ========== Message Lifecycle ==========
|
||||
@@ -61,15 +63,13 @@ class StreamStart(StreamBaseResponse):
|
||||
|
||||
type: ResponseType = ResponseType.START
|
||||
messageId: str = Field(..., description="Unique message ID")
|
||||
taskId: str | None = Field(
|
||||
sessionId: str | None = Field(
|
||||
default=None,
|
||||
description="Task ID for SSE reconnection. Clients can reconnect using GET /tasks/{taskId}/stream",
|
||||
description="Session ID for SSE reconnection.",
|
||||
)
|
||||
|
||||
def to_sse(self) -> str:
|
||||
"""Convert to SSE format, excluding non-protocol fields like taskId."""
|
||||
import json
|
||||
|
||||
"""Convert to SSE format, excluding non-protocol fields like sessionId."""
|
||||
data: dict[str, Any] = {
|
||||
"type": self.type.value,
|
||||
"messageId": self.messageId,
|
||||
@@ -148,10 +148,6 @@ class StreamToolInputAvailable(StreamBaseResponse):
|
||||
input: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Tool input arguments"
|
||||
)
|
||||
providerMetadata: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="Provider metadata - used to pass isLongRunning flag to frontend",
|
||||
)
|
||||
|
||||
|
||||
class StreamToolOutputAvailable(StreamBaseResponse):
|
||||
@@ -170,8 +166,6 @@ class StreamToolOutputAvailable(StreamBaseResponse):
|
||||
|
||||
def to_sse(self) -> str:
|
||||
"""Convert to SSE format, excluding non-spec fields."""
|
||||
import json
|
||||
|
||||
data = {
|
||||
"type": self.type.value,
|
||||
"toolCallId": self.toolCallId,
|
||||
@@ -180,20 +174,6 @@ class StreamToolOutputAvailable(StreamBaseResponse):
|
||||
return f"data: {json.dumps(data)}\n\n"
|
||||
|
||||
|
||||
class StreamLongRunningStart(StreamBaseResponse):
|
||||
"""Notification that a long-running tool has started.
|
||||
|
||||
Custom extension using AI SDK DataUIPart format. Signals the frontend to show
|
||||
UI feedback while the tool executes.
|
||||
"""
|
||||
|
||||
type: ResponseType = ResponseType.LONG_RUNNING_START
|
||||
data: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Data for the long-running event containing toolCallId and toolName",
|
||||
)
|
||||
|
||||
|
||||
# ========== Other ==========
|
||||
|
||||
|
||||
|
||||
57
autogpt_platform/backend/backend/copilot/sdk/dummy.py
Normal file
57
autogpt_platform/backend/backend/copilot/sdk/dummy.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Dummy SDK service for testing copilot streaming.
|
||||
|
||||
Returns mock streaming responses without calling Claude Agent SDK.
|
||||
Enable via COPILOT_TEST_MODE=true environment variable.
|
||||
|
||||
WARNING: This is for testing only. Do not use in production.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
from ..model import ChatSession
|
||||
from ..response_model import StreamBaseResponse, StreamStart, StreamTextDelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def stream_chat_completion_dummy(
|
||||
session_id: str,
|
||||
message: str | None = None,
|
||||
tool_call_response: str | None = None,
|
||||
is_user_message: bool = True,
|
||||
user_id: str | None = None,
|
||||
retry_count: int = 0,
|
||||
session: ChatSession | None = None,
|
||||
context: dict[str, str] | None = None,
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""Stream dummy chat completion for testing.
|
||||
|
||||
Returns a simple streaming response with text deltas to test:
|
||||
- Streaming infrastructure works
|
||||
- No timeout occurs
|
||||
- Text arrives in chunks
|
||||
- StreamFinish is sent by mark_session_completed
|
||||
"""
|
||||
logger.warning(
|
||||
f"[TEST MODE] Using dummy copilot streaming for session {session_id}"
|
||||
)
|
||||
|
||||
message_id = str(uuid.uuid4())
|
||||
text_block_id = str(uuid.uuid4())
|
||||
|
||||
# Start the stream
|
||||
yield StreamStart(messageId=message_id, sessionId=session_id)
|
||||
|
||||
# Simulate streaming text response with delays
|
||||
dummy_response = "I counted: 1... 2... 3. All done!"
|
||||
words = dummy_response.split()
|
||||
|
||||
for i, word in enumerate(words):
|
||||
# Add space except for last word
|
||||
text = word if i == len(words) - 1 else f"{word} "
|
||||
yield StreamTextDelta(id=text_block_id, delta=text)
|
||||
# Small delay to simulate real streaming
|
||||
await asyncio.sleep(0.1)
|
||||
172
autogpt_platform/backend/backend/copilot/sdk/otel_setup_test.py
Normal file
172
autogpt_platform/backend/backend/copilot/sdk/otel_setup_test.py
Normal file
@@ -0,0 +1,172 @@
|
||||
"""Tests for OTEL tracing setup in the SDK copilot path."""
|
||||
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
class TestSetupLangfuseOtel:
|
||||
"""Tests for _setup_langfuse_otel()."""
|
||||
|
||||
def test_noop_when_langfuse_not_configured(self):
|
||||
"""No env vars should be set when Langfuse credentials are missing."""
|
||||
with patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured", return_value=False
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
# Clear any previously set env vars
|
||||
env_keys = [
|
||||
"LANGSMITH_OTEL_ENABLED",
|
||||
"LANGSMITH_OTEL_ONLY",
|
||||
"LANGSMITH_TRACING",
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||
"OTEL_EXPORTER_OTLP_HEADERS",
|
||||
]
|
||||
saved = {k: os.environ.pop(k, None) for k in env_keys}
|
||||
try:
|
||||
_setup_langfuse_otel()
|
||||
for key in env_keys:
|
||||
assert key not in os.environ, f"{key} should not be set"
|
||||
finally:
|
||||
for k, v in saved.items():
|
||||
if v is not None:
|
||||
os.environ[k] = v
|
||||
|
||||
def test_sets_env_vars_when_langfuse_configured(self):
|
||||
"""OTEL env vars should be set when Langfuse credentials exist."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.langfuse_public_key = "pk-test-123"
|
||||
mock_settings.secrets.langfuse_secret_key = "sk-test-456"
|
||||
mock_settings.secrets.langfuse_host = "https://langfuse.example.com"
|
||||
mock_settings.secrets.langfuse_tracing_environment = "test"
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured",
|
||||
return_value=True,
|
||||
),
|
||||
patch("backend.copilot.sdk.service.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.copilot.sdk.service.configure_claude_agent_sdk",
|
||||
return_value=True,
|
||||
) as mock_configure,
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
# Clear env vars so setdefault works
|
||||
env_keys = [
|
||||
"LANGSMITH_OTEL_ENABLED",
|
||||
"LANGSMITH_OTEL_ONLY",
|
||||
"LANGSMITH_TRACING",
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||
"OTEL_EXPORTER_OTLP_HEADERS",
|
||||
"OTEL_RESOURCE_ATTRIBUTES",
|
||||
]
|
||||
saved = {k: os.environ.pop(k, None) for k in env_keys}
|
||||
try:
|
||||
_setup_langfuse_otel()
|
||||
|
||||
assert os.environ["LANGSMITH_OTEL_ENABLED"] == "true"
|
||||
assert os.environ["LANGSMITH_OTEL_ONLY"] == "true"
|
||||
assert os.environ["LANGSMITH_TRACING"] == "true"
|
||||
assert (
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"]
|
||||
== "https://langfuse.example.com/api/public/otel"
|
||||
)
|
||||
assert "Authorization=Basic" in os.environ["OTEL_EXPORTER_OTLP_HEADERS"]
|
||||
assert (
|
||||
os.environ["OTEL_RESOURCE_ATTRIBUTES"]
|
||||
== "langfuse.environment=test"
|
||||
)
|
||||
|
||||
mock_configure.assert_called_once_with(tags=["sdk"])
|
||||
finally:
|
||||
for k, v in saved.items():
|
||||
if v is not None:
|
||||
os.environ[k] = v
|
||||
elif k in os.environ:
|
||||
del os.environ[k]
|
||||
|
||||
def test_existing_env_vars_not_overwritten(self):
|
||||
"""Explicit env-var overrides should not be clobbered."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.langfuse_public_key = "pk-test"
|
||||
mock_settings.secrets.langfuse_secret_key = "sk-test"
|
||||
mock_settings.secrets.langfuse_host = "https://langfuse.example.com"
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured",
|
||||
return_value=True,
|
||||
),
|
||||
patch("backend.copilot.sdk.service.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.copilot.sdk.service.configure_claude_agent_sdk",
|
||||
return_value=True,
|
||||
),
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
saved = os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT")
|
||||
try:
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://custom.endpoint/v1"
|
||||
_setup_langfuse_otel()
|
||||
assert (
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"]
|
||||
== "https://custom.endpoint/v1"
|
||||
)
|
||||
finally:
|
||||
if saved is not None:
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = saved
|
||||
elif "OTEL_EXPORTER_OTLP_ENDPOINT" in os.environ:
|
||||
del os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"]
|
||||
|
||||
def test_graceful_failure_on_exception(self):
|
||||
"""Setup should not raise even if internal code fails."""
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.sdk.service._is_langfuse_configured",
|
||||
return_value=True,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.sdk.service.Settings",
|
||||
side_effect=RuntimeError("settings unavailable"),
|
||||
),
|
||||
):
|
||||
from backend.copilot.sdk.service import _setup_langfuse_otel
|
||||
|
||||
# Should not raise — just logs and returns
|
||||
_setup_langfuse_otel()
|
||||
|
||||
|
||||
class TestPropagateAttributesImport:
|
||||
"""Verify langfuse.propagate_attributes is available."""
|
||||
|
||||
def test_propagate_attributes_is_importable(self):
|
||||
from langfuse import propagate_attributes
|
||||
|
||||
assert callable(propagate_attributes)
|
||||
|
||||
def test_propagate_attributes_returns_context_manager(self):
|
||||
from langfuse import propagate_attributes
|
||||
|
||||
ctx = propagate_attributes(user_id="u1", session_id="s1", tags=["test"])
|
||||
assert hasattr(ctx, "__enter__")
|
||||
assert hasattr(ctx, "__exit__")
|
||||
|
||||
|
||||
class TestReceiveResponseCompat:
|
||||
"""Verify ClaudeSDKClient.receive_response() exists (langsmith patches it)."""
|
||||
|
||||
def test_receive_response_exists(self):
|
||||
from claude_agent_sdk import ClaudeSDKClient
|
||||
|
||||
assert hasattr(ClaudeSDKClient, "receive_response")
|
||||
|
||||
def test_receive_response_is_async_generator(self):
|
||||
import inspect
|
||||
|
||||
from claude_agent_sdk import ClaudeSDKClient
|
||||
|
||||
method = getattr(ClaudeSDKClient, "receive_response")
|
||||
assert inspect.isfunction(method) or inspect.ismethod(method)
|
||||
@@ -34,7 +34,6 @@ from backend.copilot.response_model import (
|
||||
StreamToolInputStart,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
from backend.copilot.tools import get_tool
|
||||
|
||||
from .tool_adapter import MCP_TOOL_PREFIX, pop_pending_tool_output
|
||||
|
||||
@@ -56,13 +55,8 @@ class SDKResponseAdapter:
|
||||
self.has_ended_text = False
|
||||
self.current_tool_calls: dict[str, dict[str, str]] = {}
|
||||
self.resolved_tool_calls: set[str] = set()
|
||||
self.task_id: str | None = None
|
||||
self.step_open = False
|
||||
|
||||
def set_task_id(self, task_id: str) -> None:
|
||||
"""Set the task ID for reconnection support."""
|
||||
self.task_id = task_id
|
||||
|
||||
@property
|
||||
def has_unresolved_tool_calls(self) -> bool:
|
||||
"""True when there are tool calls that haven't received output yet."""
|
||||
@@ -75,7 +69,7 @@ class SDKResponseAdapter:
|
||||
if isinstance(sdk_message, SystemMessage):
|
||||
if sdk_message.subtype == "init":
|
||||
responses.append(
|
||||
StreamStart(messageId=self.message_id, taskId=self.task_id)
|
||||
StreamStart(messageId=self.message_id, sessionId=self.session_id)
|
||||
)
|
||||
# Open the first step (matches non-SDK: StreamStart then StreamStartStep)
|
||||
responses.append(StreamStartStep())
|
||||
@@ -112,15 +106,6 @@ class SDKResponseAdapter:
|
||||
# instead of "mcp__copilot__find_block".
|
||||
tool_name = block.name.removeprefix(MCP_TOOL_PREFIX)
|
||||
|
||||
# Check if this is a long-running tool to trigger UI feedback
|
||||
tool = get_tool(tool_name)
|
||||
is_long_running = tool.is_long_running if tool else False
|
||||
|
||||
logger.info(
|
||||
f"[ADAPTER] Tool: {tool_name}, has_tool={tool is not None}, "
|
||||
f"is_long_running={is_long_running}"
|
||||
)
|
||||
|
||||
responses.append(
|
||||
StreamToolInputStart(toolCallId=block.id, toolName=tool_name)
|
||||
)
|
||||
@@ -129,15 +114,8 @@ class SDKResponseAdapter:
|
||||
toolCallId=block.id,
|
||||
toolName=tool_name,
|
||||
input=block.input,
|
||||
providerMetadata=(
|
||||
{"isLongRunning": True} if is_long_running else None
|
||||
),
|
||||
)
|
||||
)
|
||||
logger.info(
|
||||
f"[ADAPTER] Created StreamToolInputAvailable with "
|
||||
f"providerMetadata={{'isLongRunning': {is_long_running}}}"
|
||||
)
|
||||
self.current_tool_calls[block.id] = {"name": tool_name}
|
||||
|
||||
elif isinstance(sdk_message, UserMessage):
|
||||
|
||||
@@ -37,9 +37,7 @@ from .tool_adapter import wait_for_stash
|
||||
|
||||
|
||||
def _adapter() -> SDKResponseAdapter:
|
||||
a = SDKResponseAdapter(message_id="msg-1")
|
||||
a.set_task_id("task-1")
|
||||
return a
|
||||
return SDKResponseAdapter(message_id="msg-1", session_id="session-1")
|
||||
|
||||
|
||||
# -- SystemMessage -----------------------------------------------------------
|
||||
@@ -51,7 +49,7 @@ def test_system_init_emits_start_and_step():
|
||||
assert len(results) == 2
|
||||
assert isinstance(results[0], StreamStart)
|
||||
assert results[0].messageId == "msg-1"
|
||||
assert results[0].taskId == "task-1"
|
||||
assert results[0].sessionId == "session-1"
|
||||
assert isinstance(results[1], StreamStartStep)
|
||||
|
||||
|
||||
|
||||
@@ -160,7 +160,7 @@ def create_security_hooks(
|
||||
Args:
|
||||
user_id: Current user ID for isolation validation
|
||||
sdk_cwd: SDK working directory for workspace-scoped tool validation
|
||||
max_subtasks: Maximum Task (sub-agent) spawns allowed per session
|
||||
max_subtasks: Maximum concurrent Task (sub-agent) spawns allowed per session
|
||||
on_stop: Callback ``(transcript_path, sdk_session_id)`` invoked when
|
||||
the SDK finishes processing — used to read the JSONL transcript
|
||||
before the CLI process exits.
|
||||
@@ -172,8 +172,9 @@ def create_security_hooks(
|
||||
from claude_agent_sdk import HookMatcher
|
||||
from claude_agent_sdk.types import HookContext, HookInput, SyncHookJSONOutput
|
||||
|
||||
# Per-session counter for Task sub-agent spawns
|
||||
task_spawn_count = 0
|
||||
# Per-session tracking for Task sub-agent concurrency.
|
||||
# Set of tool_use_ids that consumed a slot — len() is the active count.
|
||||
task_tool_use_ids: set[str] = set()
|
||||
|
||||
async def pre_tool_use_hook(
|
||||
input_data: HookInput,
|
||||
@@ -181,7 +182,6 @@ def create_security_hooks(
|
||||
context: HookContext,
|
||||
) -> SyncHookJSONOutput:
|
||||
"""Combined pre-tool-use validation hook."""
|
||||
nonlocal task_spawn_count
|
||||
_ = context # unused but required by signature
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
tool_input = cast(dict[str, Any], input_data.get("tool_input", {}))
|
||||
@@ -200,18 +200,18 @@ def create_security_hooks(
|
||||
"(remove the run_in_background parameter)."
|
||||
),
|
||||
)
|
||||
if task_spawn_count >= max_subtasks:
|
||||
if len(task_tool_use_ids) >= max_subtasks:
|
||||
logger.warning(
|
||||
f"[SDK] Task limit reached ({max_subtasks}), user={user_id}"
|
||||
)
|
||||
return cast(
|
||||
SyncHookJSONOutput,
|
||||
_deny(
|
||||
f"Maximum {max_subtasks} sub-tasks per session. "
|
||||
"Please continue in the main conversation."
|
||||
f"Maximum {max_subtasks} concurrent sub-tasks. "
|
||||
"Wait for running sub-tasks to finish, "
|
||||
"or continue in the main conversation."
|
||||
),
|
||||
)
|
||||
task_spawn_count += 1
|
||||
|
||||
# Strip MCP prefix for consistent validation
|
||||
is_copilot_tool = tool_name.startswith(MCP_TOOL_PREFIX)
|
||||
@@ -229,9 +229,24 @@ def create_security_hooks(
|
||||
if result:
|
||||
return cast(SyncHookJSONOutput, result)
|
||||
|
||||
# Reserve the Task slot only after all validations pass
|
||||
if tool_name == "Task" and tool_use_id is not None:
|
||||
task_tool_use_ids.add(tool_use_id)
|
||||
|
||||
logger.debug(f"[SDK] Tool start: {tool_name}, user={user_id}")
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
def _release_task_slot(tool_name: str, tool_use_id: str | None) -> None:
|
||||
"""Release a Task concurrency slot if one was reserved."""
|
||||
if tool_name == "Task" and tool_use_id in task_tool_use_ids:
|
||||
task_tool_use_ids.discard(tool_use_id)
|
||||
logger.info(
|
||||
"[SDK] Task slot released, active=%d/%d, user=%s",
|
||||
len(task_tool_use_ids),
|
||||
max_subtasks,
|
||||
user_id,
|
||||
)
|
||||
|
||||
async def post_tool_use_hook(
|
||||
input_data: HookInput,
|
||||
tool_use_id: str | None,
|
||||
@@ -246,6 +261,8 @@ def create_security_hooks(
|
||||
"""
|
||||
_ = context
|
||||
tool_name = cast(str, input_data.get("tool_name", ""))
|
||||
|
||||
_release_task_slot(tool_name, tool_use_id)
|
||||
is_builtin = not tool_name.startswith(MCP_TOOL_PREFIX)
|
||||
logger.info(
|
||||
"[SDK] PostToolUse: %s (builtin=%s, tool_use_id=%s)",
|
||||
@@ -289,6 +306,9 @@ def create_security_hooks(
|
||||
f"[SDK] Tool failed: {tool_name}, error={error}, "
|
||||
f"user={user_id}, tool_use_id={tool_use_id}"
|
||||
)
|
||||
|
||||
_release_task_slot(tool_name, tool_use_id)
|
||||
|
||||
return cast(SyncHookJSONOutput, {})
|
||||
|
||||
async def pre_compact_hook(
|
||||
|
||||
@@ -208,19 +208,22 @@ def test_bash_builtin_blocked_message_clarity():
|
||||
|
||||
@pytest.fixture()
|
||||
def _hooks():
|
||||
"""Create security hooks and return the PreToolUse handler."""
|
||||
"""Create security hooks and return (pre, post, post_failure) handlers."""
|
||||
from .security_hooks import create_security_hooks
|
||||
|
||||
hooks = create_security_hooks(user_id="u1", sdk_cwd=SDK_CWD, max_subtasks=2)
|
||||
pre = hooks["PreToolUse"][0].hooks[0]
|
||||
return pre
|
||||
post = hooks["PostToolUse"][0].hooks[0]
|
||||
post_failure = hooks["PostToolUseFailure"][0].hooks[0]
|
||||
return pre, post, post_failure
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_background_blocked(_hooks):
|
||||
"""Task with run_in_background=true must be denied."""
|
||||
result = await _hooks(
|
||||
pre, _, _ = _hooks
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"run_in_background": True, "prompt": "x"}},
|
||||
tool_use_id=None,
|
||||
context={},
|
||||
@@ -233,9 +236,10 @@ async def test_task_background_blocked(_hooks):
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_foreground_allowed(_hooks):
|
||||
"""Task without run_in_background should be allowed."""
|
||||
result = await _hooks(
|
||||
pre, _, _ = _hooks
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "do stuff"}},
|
||||
tool_use_id=None,
|
||||
tool_use_id="tu-1",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
@@ -245,25 +249,102 @@ async def test_task_foreground_allowed(_hooks):
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_limit_enforced(_hooks):
|
||||
"""Task spawns beyond max_subtasks should be denied."""
|
||||
pre, _, _ = _hooks
|
||||
# First two should pass
|
||||
for _ in range(2):
|
||||
result = await _hooks(
|
||||
for i in range(2):
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
|
||||
tool_use_id=None,
|
||||
tool_use_id=f"tu-limit-{i}",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
# Third should be denied (limit=2)
|
||||
result = await _hooks(
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "over limit"}},
|
||||
tool_use_id=None,
|
||||
tool_use_id="tu-limit-2",
|
||||
context={},
|
||||
)
|
||||
assert _is_denied(result)
|
||||
assert "Maximum" in _reason(result)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_slot_released_on_completion(_hooks):
|
||||
"""Completing a Task should free a slot so new Tasks can be spawned."""
|
||||
pre, post, _ = _hooks
|
||||
# Fill both slots
|
||||
for i in range(2):
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
|
||||
tool_use_id=f"tu-comp-{i}",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
# Third should be denied — at capacity
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "over"}},
|
||||
tool_use_id="tu-comp-2",
|
||||
context={},
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
# Complete first task — frees a slot
|
||||
await post(
|
||||
{"tool_name": "Task", "tool_input": {}},
|
||||
tool_use_id="tu-comp-0",
|
||||
context={},
|
||||
)
|
||||
|
||||
# Now a new Task should be allowed
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "after release"}},
|
||||
tool_use_id="tu-comp-3",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _sdk_available(), reason="claude_agent_sdk not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_slot_released_on_failure(_hooks):
|
||||
"""A failed Task should also free its concurrency slot."""
|
||||
pre, _, post_failure = _hooks
|
||||
# Fill both slots
|
||||
for i in range(2):
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "ok"}},
|
||||
tool_use_id=f"tu-fail-{i}",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
# At capacity
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "over"}},
|
||||
tool_use_id="tu-fail-2",
|
||||
context={},
|
||||
)
|
||||
assert _is_denied(result)
|
||||
|
||||
# Fail first task — should free a slot
|
||||
await post_failure(
|
||||
{"tool_name": "Task", "tool_input": {}, "error": "something broke"},
|
||||
tool_use_id="tu-fail-0",
|
||||
context={},
|
||||
)
|
||||
|
||||
# New Task should be allowed
|
||||
result = await pre(
|
||||
{"tool_name": "Task", "tool_input": {"prompt": "after failure"}},
|
||||
tool_use_id="tu-fail-3",
|
||||
context={},
|
||||
)
|
||||
assert not _is_denied(result)
|
||||
|
||||
|
||||
# -- _is_tool_error_or_denial ------------------------------------------------
|
||||
|
||||
|
||||
@@ -298,7 +379,9 @@ class TestIsToolErrorOrDenial:
|
||||
def test_subtask_limit_denial(self):
|
||||
assert (
|
||||
_is_tool_error_or_denial(
|
||||
"Maximum 2 sub-tasks per session. Please continue in the main conversation."
|
||||
"Maximum 2 concurrent sub-tasks. "
|
||||
"Wait for running sub-tasks to finish, "
|
||||
"or continue in the main conversation."
|
||||
)
|
||||
is True
|
||||
)
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
"""Claude Agent SDK service layer for CoPilot chat completions."""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from collections.abc import AsyncGenerator
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
|
||||
from langfuse import propagate_attributes
|
||||
from langsmith.integrations.claude_agent_sdk import configure_claude_agent_sdk
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.executor.cluster_lock import AsyncClusterLock
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from ..config import ChatConfig
|
||||
from ..model import (
|
||||
@@ -23,14 +31,17 @@ from ..response_model import (
|
||||
StreamBaseResponse,
|
||||
StreamError,
|
||||
StreamFinish,
|
||||
StreamFinishStep,
|
||||
StreamHeartbeat,
|
||||
StreamStart,
|
||||
StreamTextDelta,
|
||||
StreamToolInputAvailable,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
from ..service import _build_system_prompt, _generate_session_title
|
||||
from ..service import (
|
||||
_build_system_prompt,
|
||||
_generate_session_title,
|
||||
_is_langfuse_configured,
|
||||
)
|
||||
from ..tools.sandbox import WORKSPACE_PREFIX, make_session_path
|
||||
from ..tracking import track_user_message
|
||||
from .response_adapter import SDKResponseAdapter
|
||||
@@ -54,6 +65,56 @@ from .transcript import (
|
||||
logger = logging.getLogger(__name__)
|
||||
config = ChatConfig()
|
||||
|
||||
|
||||
def _setup_langfuse_otel() -> None:
|
||||
"""Configure OTEL tracing for the Claude Agent SDK → Langfuse.
|
||||
|
||||
This uses LangSmith's built-in Claude Agent SDK integration to monkey-patch
|
||||
``ClaudeSDKClient``, capturing every tool call and model turn as OTEL spans.
|
||||
Spans are exported via OTLP to Langfuse (or any OTEL-compatible backend).
|
||||
|
||||
To route traces elsewhere, override ``OTEL_EXPORTER_OTLP_ENDPOINT`` and
|
||||
``OTEL_EXPORTER_OTLP_HEADERS`` environment variables — no code changes needed.
|
||||
"""
|
||||
if not _is_langfuse_configured():
|
||||
return
|
||||
|
||||
try:
|
||||
settings = Settings()
|
||||
pk = settings.secrets.langfuse_public_key
|
||||
sk = settings.secrets.langfuse_secret_key
|
||||
host = settings.secrets.langfuse_host
|
||||
|
||||
# OTEL exporter config — these are only set if not already present,
|
||||
# so explicit env-var overrides always win.
|
||||
creds = base64.b64encode(f"{pk}:{sk}".encode()).decode()
|
||||
os.environ.setdefault("LANGSMITH_OTEL_ENABLED", "true")
|
||||
os.environ.setdefault("LANGSMITH_OTEL_ONLY", "true")
|
||||
os.environ.setdefault("LANGSMITH_TRACING", "true")
|
||||
os.environ.setdefault("OTEL_EXPORTER_OTLP_ENDPOINT", f"{host}/api/public/otel")
|
||||
os.environ.setdefault(
|
||||
"OTEL_EXPORTER_OTLP_HEADERS", f"Authorization=Basic {creds}"
|
||||
)
|
||||
|
||||
# Set the Langfuse environment via OTEL resource attributes so the
|
||||
# Langfuse server maps it to the first-class environment field.
|
||||
tracing_env = settings.secrets.langfuse_tracing_environment
|
||||
os.environ.setdefault(
|
||||
"OTEL_RESOURCE_ATTRIBUTES",
|
||||
f"langfuse.environment={tracing_env}",
|
||||
)
|
||||
|
||||
configure_claude_agent_sdk(tags=["sdk"])
|
||||
logger.info(
|
||||
"OTEL tracing configured for Claude Agent SDK → %s [%s]", host, tracing_env
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("OTEL setup skipped — failed to configure", exc_info=True)
|
||||
|
||||
|
||||
_setup_langfuse_otel()
|
||||
|
||||
|
||||
# Set to hold background tasks to prevent garbage collection
|
||||
_background_tasks: set[asyncio.Task[Any]] = set()
|
||||
|
||||
@@ -73,13 +134,23 @@ class CapturedTranscript:
|
||||
|
||||
_SDK_CWD_PREFIX = WORKSPACE_PREFIX
|
||||
|
||||
# Special message prefixes for text-based markers (parsed by frontend).
|
||||
# The hex suffix makes accidental LLM generation of these strings virtually
|
||||
# impossible, avoiding false-positive marker detection in normal conversation.
|
||||
COPILOT_ERROR_PREFIX = "[__COPILOT_ERROR_f7a1__]" # Renders as ErrorCard
|
||||
COPILOT_SYSTEM_PREFIX = "[__COPILOT_SYSTEM_e3b0__]" # Renders as system info message
|
||||
|
||||
# Heartbeat interval — keep SSE alive through proxies/LBs during tool execution.
|
||||
_HEARTBEAT_INTERVAL = 15.0 # seconds
|
||||
# IMPORTANT: Must be less than frontend timeout (12s in useCopilotPage.ts)
|
||||
_HEARTBEAT_INTERVAL = 3.0 # seconds
|
||||
|
||||
|
||||
# Appended to the system prompt to inform the agent about available tools.
|
||||
# The SDK built-in Bash is NOT available — use mcp__copilot__bash_exec instead,
|
||||
# which has kernel-level network isolation (unshare --net).
|
||||
_SDK_TOOL_SUPPLEMENT = """
|
||||
def _build_sdk_tool_supplement(cwd: str) -> str:
|
||||
"""Build the SDK tool supplement with the actual working directory injected."""
|
||||
return f"""
|
||||
|
||||
## Tool notes
|
||||
|
||||
@@ -87,9 +158,16 @@ _SDK_TOOL_SUPPLEMENT = """
|
||||
- The SDK built-in Bash tool is NOT available. Use the `bash_exec` MCP tool
|
||||
for shell commands — it runs in a network-isolated sandbox.
|
||||
|
||||
### Working directory
|
||||
- Your working directory is: `{cwd}`
|
||||
- All SDK Read/Write/Edit/Glob/Grep tools AND `bash_exec` operate inside this
|
||||
directory. This is the ONLY writable path — do not attempt to read or write
|
||||
anywhere else on the filesystem.
|
||||
- Use relative paths or absolute paths under `{cwd}` for all file operations.
|
||||
|
||||
### Two storage systems — CRITICAL to understand
|
||||
|
||||
1. **Ephemeral working directory** (`/tmp/copilot-<session>/`):
|
||||
1. **Ephemeral working directory** (`{cwd}`):
|
||||
- Shared by SDK Read/Write/Edit/Glob/Grep tools AND `bash_exec`
|
||||
- Files here are **lost between turns** — do NOT rely on them persisting
|
||||
- Use for temporary work: running scripts, processing data, etc.
|
||||
@@ -115,10 +193,25 @@ When you create or modify important files (code, configs, outputs), you MUST:
|
||||
2. At the start of a new turn, call `list_workspace_files` to see what files
|
||||
are available from previous turns
|
||||
|
||||
### Sharing files with the user
|
||||
After saving a file to the persistent workspace with `write_workspace_file`,
|
||||
share it with the user by embedding the `download_url` from the response in
|
||||
your message as a Markdown link or image:
|
||||
|
||||
- **Any file** — shows as a clickable download link:
|
||||
`[report.csv](workspace://file_id#text/csv)`
|
||||
- **Image** — renders inline in chat:
|
||||
``
|
||||
- **Video** — renders inline in chat with player controls:
|
||||
``
|
||||
|
||||
The `download_url` field in the `write_workspace_file` response is already
|
||||
in the correct format — paste it directly after the `(` in the Markdown.
|
||||
|
||||
### Long-running tools
|
||||
Long-running tools (create_agent, edit_agent, etc.) run synchronously
|
||||
with heartbeats to keep the connection alive. The frontend shows UI feedback
|
||||
during execution based on stream events.
|
||||
Long-running tools (create_agent, edit_agent, etc.) are handled
|
||||
asynchronously. You will receive an immediate response; the actual result
|
||||
is delivered to the user via a background stream.
|
||||
|
||||
### Sub-agent tasks
|
||||
- When using the Task tool, NEVER set `run_in_background` to true.
|
||||
@@ -126,6 +219,9 @@ during execution based on stream events.
|
||||
"""
|
||||
|
||||
|
||||
STREAM_LOCK_PREFIX = "copilot:stream:lock:"
|
||||
|
||||
|
||||
def _resolve_sdk_model() -> str | None:
|
||||
"""Resolve the model name for the Claude Agent SDK CLI.
|
||||
|
||||
@@ -141,7 +237,10 @@ def _resolve_sdk_model() -> str | None:
|
||||
return model
|
||||
|
||||
|
||||
def _build_sdk_env() -> dict[str, str]:
|
||||
def _build_sdk_env(
|
||||
user_id: str | None = None,
|
||||
session_id: str | None = None,
|
||||
) -> dict[str, str]:
|
||||
"""Build env vars for the SDK CLI process.
|
||||
|
||||
Routes API calls through OpenRouter (or a custom base_url) using
|
||||
@@ -151,6 +250,11 @@ def _build_sdk_env() -> dict[str, str]:
|
||||
Only overrides ``ANTHROPIC_API_KEY`` when a valid proxy URL and auth
|
||||
token are both present — otherwise returns an empty dict so the SDK
|
||||
falls back to its default credentials.
|
||||
|
||||
When ``user_id`` or ``session_id`` are provided they are injected via
|
||||
``CLAUDE_CODE_EXTRA_BODY`` so that OpenRouter (or any proxy) receives
|
||||
them on every API call — mirroring the ``extra_body`` the non-SDK path
|
||||
sends.
|
||||
"""
|
||||
env: dict[str, str] = {}
|
||||
if config.api_key and config.base_url:
|
||||
@@ -165,6 +269,17 @@ def _build_sdk_env() -> dict[str, str]:
|
||||
env["ANTHROPIC_AUTH_TOKEN"] = config.api_key
|
||||
# Must be explicitly empty so the CLI uses AUTH_TOKEN instead
|
||||
env["ANTHROPIC_API_KEY"] = ""
|
||||
|
||||
# Inject user/session metadata via CLAUDE_CODE_EXTRA_BODY so
|
||||
# OpenRouter receives it in every API request body.
|
||||
extra_body: dict[str, Any] = {}
|
||||
if user_id:
|
||||
extra_body["user"] = user_id[:128]
|
||||
if session_id:
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
if extra_body:
|
||||
env["CLAUDE_CODE_EXTRA_BODY"] = json.dumps(extra_body)
|
||||
|
||||
return env
|
||||
|
||||
|
||||
@@ -405,6 +520,23 @@ async def stream_chat_completion_sdk(
|
||||
f"Session {session_id} not found. Please create a new session first."
|
||||
)
|
||||
|
||||
# Type narrowing: session is guaranteed ChatSession after the check above
|
||||
session = cast(ChatSession, session)
|
||||
|
||||
# Clean up stale error markers from previous turn before starting new turn
|
||||
# If the last message contains an error marker, remove it (user is retrying)
|
||||
if (
|
||||
len(session.messages) > 0
|
||||
and session.messages[-1].role == "assistant"
|
||||
and session.messages[-1].content
|
||||
and COPILOT_ERROR_PREFIX in session.messages[-1].content
|
||||
):
|
||||
logger.info(
|
||||
"[SDK] [%s] Removing stale error marker from previous turn",
|
||||
session_id[:12],
|
||||
)
|
||||
session.messages.pop()
|
||||
|
||||
# Append the new message to the session if it's not already there
|
||||
new_message_role = "user" if is_user_message else "assistant"
|
||||
if message and (
|
||||
@@ -434,41 +566,69 @@ async def stream_chat_completion_sdk(
|
||||
_background_tasks.add(task)
|
||||
task.add_done_callback(_background_tasks.discard)
|
||||
|
||||
# Build system prompt (reuses non-SDK path with Langfuse support)
|
||||
has_history = len(session.messages) > 1
|
||||
system_prompt, _ = await _build_system_prompt(
|
||||
user_id, has_conversation_history=has_history
|
||||
)
|
||||
system_prompt += _SDK_TOOL_SUPPLEMENT
|
||||
message_id = str(uuid.uuid4())
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
yield StreamStart(messageId=message_id, taskId=task_id)
|
||||
|
||||
stream_id = str(uuid.uuid4())
|
||||
stream_completed = False
|
||||
# Initialise variables before the try so the finally block can
|
||||
# always attempt transcript upload regardless of errors.
|
||||
sdk_cwd = ""
|
||||
use_resume = False
|
||||
resume_file: str | None = None
|
||||
captured_transcript = CapturedTranscript()
|
||||
sdk_cwd = ""
|
||||
|
||||
try:
|
||||
# Use a session-specific temp dir to avoid cleanup race conditions
|
||||
# between concurrent sessions.
|
||||
sdk_cwd = _make_sdk_cwd(session_id)
|
||||
os.makedirs(sdk_cwd, exist_ok=True)
|
||||
# Acquire stream lock to prevent concurrent streams to the same session
|
||||
lock = AsyncClusterLock(
|
||||
redis=await get_redis_async(),
|
||||
key=f"{STREAM_LOCK_PREFIX}{session_id}",
|
||||
owner_id=stream_id,
|
||||
timeout=config.stream_lock_ttl,
|
||||
)
|
||||
|
||||
set_execution_context(
|
||||
user_id,
|
||||
session,
|
||||
long_running_callback=None,
|
||||
lock_owner = await lock.try_acquire()
|
||||
if lock_owner != stream_id:
|
||||
# Another stream is active
|
||||
logger.warning(
|
||||
f"[SDK] Session {session_id} already has an active stream: {lock_owner}"
|
||||
)
|
||||
yield StreamError(
|
||||
errorText="Another stream is already active for this session. "
|
||||
"Please wait or stop it.",
|
||||
code="stream_already_active",
|
||||
)
|
||||
return
|
||||
|
||||
# OTEL context manager — initialized inside the try and cleaned up in finally.
|
||||
_otel_ctx: Any = None
|
||||
|
||||
# Make sure there is no more code between the lock acquitition and try-block.
|
||||
try:
|
||||
# Build system prompt (reuses non-SDK path with Langfuse support).
|
||||
# Pre-compute the cwd here so the exact working directory path can be
|
||||
# injected into the supplement instead of the generic placeholder.
|
||||
# Catch ValueError early so the failure yields a clean StreamError rather
|
||||
# than propagating outside the stream error-handling path.
|
||||
has_history = len(session.messages) > 1
|
||||
try:
|
||||
sdk_cwd = _make_sdk_cwd(session_id)
|
||||
os.makedirs(sdk_cwd, exist_ok=True)
|
||||
except (ValueError, OSError) as e:
|
||||
logger.error("[SDK] [%s] Invalid SDK cwd: %s", session_id[:12], e)
|
||||
yield StreamError(
|
||||
errorText="Unable to initialize working directory.",
|
||||
code="sdk_cwd_error",
|
||||
)
|
||||
return
|
||||
system_prompt, _ = await _build_system_prompt(
|
||||
user_id, has_conversation_history=has_history
|
||||
)
|
||||
system_prompt += _build_sdk_tool_supplement(sdk_cwd)
|
||||
|
||||
yield StreamStart(messageId=message_id, sessionId=session_id)
|
||||
|
||||
set_execution_context(user_id, session)
|
||||
try:
|
||||
from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient
|
||||
|
||||
# Fail fast when no API credentials are available at all
|
||||
sdk_env = _build_sdk_env()
|
||||
sdk_env = _build_sdk_env(user_id=user_id, session_id=session_id)
|
||||
if not sdk_env and not os.environ.get("ANTHROPIC_API_KEY"):
|
||||
raise RuntimeError(
|
||||
"No API key configured. Set OPEN_ROUTER_API_KEY "
|
||||
@@ -555,7 +715,19 @@ async def stream_chat_completion_sdk(
|
||||
options = ClaudeAgentOptions(**sdk_options_kwargs) # type: ignore[arg-type]
|
||||
|
||||
adapter = SDKResponseAdapter(message_id=message_id, session_id=session_id)
|
||||
adapter.set_task_id(task_id)
|
||||
|
||||
# Propagate user_id/session_id as OTEL context attributes so the
|
||||
# langsmith tracing integration attaches them to every span. This
|
||||
# is what Langfuse (or any OTEL backend) maps to its native
|
||||
# user/session fields.
|
||||
_otel_ctx = propagate_attributes(
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
trace_name="copilot-sdk",
|
||||
tags=["sdk"],
|
||||
metadata={"resume": str(use_resume)},
|
||||
)
|
||||
_otel_ctx.__enter__()
|
||||
|
||||
async with ClaudeSDKClient(options=options) as client:
|
||||
current_message = message or ""
|
||||
@@ -569,7 +741,6 @@ async def stream_chat_completion_sdk(
|
||||
errorText="Message cannot be empty.",
|
||||
code="empty_prompt",
|
||||
)
|
||||
yield StreamFinish()
|
||||
return
|
||||
|
||||
query_message = await _build_query_message(
|
||||
@@ -580,8 +751,7 @@ async def stream_chat_completion_sdk(
|
||||
session_id,
|
||||
)
|
||||
logger.info(
|
||||
"[SDK] [%s] Sending query — resume=%s, "
|
||||
"total_msgs=%d, query_len=%d",
|
||||
"[SDK] [%s] Sending query — resume=%s, total_msgs=%d, query_len=%d",
|
||||
session_id[:12],
|
||||
use_resume,
|
||||
len(session.messages),
|
||||
@@ -593,9 +763,6 @@ async def stream_chat_completion_sdk(
|
||||
accumulated_tool_calls: list[dict[str, Any]] = []
|
||||
has_appended_assistant = False
|
||||
has_tool_results = False
|
||||
# Track persisted message count to skip DB count queries
|
||||
# on incremental saves. Initial save happened at line 545.
|
||||
saved_msg_count = len(session.messages)
|
||||
|
||||
# Use an explicit async iterator with non-cancelling heartbeats.
|
||||
# CRITICAL: we must NOT cancel __anext__() mid-flight — doing so
|
||||
@@ -605,7 +772,7 @@ async def stream_chat_completion_sdk(
|
||||
# Instead, wrap __anext__() in a Task and use asyncio.wait()
|
||||
# with a timeout. On timeout we emit a heartbeat but keep the
|
||||
# Task alive so it can deliver the next message.
|
||||
msg_iter = client.receive_messages().__aiter__()
|
||||
msg_iter = client.receive_response().__aiter__()
|
||||
pending_task: asyncio.Task[Any] | None = None
|
||||
try:
|
||||
while not stream_completed:
|
||||
@@ -622,6 +789,8 @@ async def stream_chat_completion_sdk(
|
||||
|
||||
if not done:
|
||||
# Timeout — emit heartbeat but keep the task alive
|
||||
# Also refresh lock TTL to keep it alive
|
||||
await lock.refresh()
|
||||
yield StreamHeartbeat()
|
||||
continue
|
||||
|
||||
@@ -631,14 +800,13 @@ async def stream_chat_completion_sdk(
|
||||
sdk_msg = done.pop().result()
|
||||
except StopAsyncIteration:
|
||||
logger.info(
|
||||
"[SDK] [%s] Stream ended normally "
|
||||
"(StopAsyncIteration)",
|
||||
"[SDK] [%s] Stream ended normally (StopAsyncIteration)",
|
||||
session_id[:12],
|
||||
)
|
||||
break
|
||||
except Exception as stream_err:
|
||||
# SDK sends {"type": "error"} which raises
|
||||
# Exception in receive_messages() — capture it
|
||||
# Exception in receive_response() — capture it
|
||||
# so the session can still be saved and the
|
||||
# frontend gets a clean finish.
|
||||
logger.error(
|
||||
@@ -705,6 +873,25 @@ async def stream_chat_completion_sdk(
|
||||
- len(adapter.resolved_tool_calls),
|
||||
)
|
||||
|
||||
# Log ResultMessage details for debugging
|
||||
if isinstance(sdk_msg, ResultMessage):
|
||||
logger.info(
|
||||
"[SDK] [%s] Received: ResultMessage %s "
|
||||
"(unresolved=%d, current=%d, resolved=%d)",
|
||||
session_id[:12],
|
||||
sdk_msg.subtype,
|
||||
len(adapter.current_tool_calls)
|
||||
- len(adapter.resolved_tool_calls),
|
||||
len(adapter.current_tool_calls),
|
||||
len(adapter.resolved_tool_calls),
|
||||
)
|
||||
if sdk_msg.subtype in ("error", "error_during_execution"):
|
||||
logger.error(
|
||||
"[SDK] [%s] SDK execution failed with error: %s",
|
||||
session_id[:12],
|
||||
sdk_msg.result or "(no error message provided)",
|
||||
)
|
||||
|
||||
for response in adapter.convert_message(sdk_msg):
|
||||
if isinstance(response, StreamStart):
|
||||
continue
|
||||
@@ -729,6 +916,15 @@ async def stream_chat_completion_sdk(
|
||||
extra,
|
||||
)
|
||||
|
||||
# Log errors being sent to frontend
|
||||
if isinstance(response, StreamError):
|
||||
logger.error(
|
||||
"[SDK] [%s] Sending error to frontend: %s (code=%s)",
|
||||
session_id[:12],
|
||||
response.errorText,
|
||||
response.code,
|
||||
)
|
||||
|
||||
yield response
|
||||
|
||||
if isinstance(response, StreamTextDelta):
|
||||
@@ -769,21 +965,6 @@ async def stream_chat_completion_sdk(
|
||||
if not has_appended_assistant:
|
||||
session.messages.append(assistant_response)
|
||||
has_appended_assistant = True
|
||||
# Save before tool execution starts so the
|
||||
# pending tool call is visible on refresh /
|
||||
# other devices.
|
||||
try:
|
||||
await upsert_chat_session(
|
||||
session,
|
||||
existing_message_count=saved_msg_count,
|
||||
)
|
||||
saved_msg_count = len(session.messages)
|
||||
except Exception as save_err:
|
||||
logger.warning(
|
||||
"[SDK] [%s] Incremental save " "failed: %s",
|
||||
session_id[:12],
|
||||
save_err,
|
||||
)
|
||||
|
||||
elif isinstance(response, StreamToolOutputAvailable):
|
||||
session.messages.append(
|
||||
@@ -798,20 +979,6 @@ async def stream_chat_completion_sdk(
|
||||
)
|
||||
)
|
||||
has_tool_results = True
|
||||
# Save after tool completes so the result is
|
||||
# visible on refresh / other devices.
|
||||
try:
|
||||
await upsert_chat_session(
|
||||
session,
|
||||
existing_message_count=saved_msg_count,
|
||||
)
|
||||
saved_msg_count = len(session.messages)
|
||||
except Exception as save_err:
|
||||
logger.warning(
|
||||
"[SDK] [%s] Incremental save " "failed: %s",
|
||||
session_id[:12],
|
||||
save_err,
|
||||
)
|
||||
|
||||
elif isinstance(response, StreamFinish):
|
||||
stream_completed = True
|
||||
@@ -821,8 +988,7 @@ async def stream_chat_completion_sdk(
|
||||
# server shutdown). Log and let the safety-net / finally
|
||||
# blocks handle cleanup.
|
||||
logger.warning(
|
||||
"[SDK] [%s] Streaming loop cancelled "
|
||||
"(asyncio.CancelledError)",
|
||||
"[SDK] [%s] Streaming loop cancelled (asyncio.CancelledError)",
|
||||
session_id[:12],
|
||||
)
|
||||
raise
|
||||
@@ -864,25 +1030,29 @@ async def stream_chat_completion_sdk(
|
||||
)
|
||||
yield response
|
||||
|
||||
# If the stream ended without a ResultMessage (no
|
||||
# StreamFinish), the SDK CLI exited unexpectedly. Close
|
||||
# the open step and emit StreamFinish so the frontend
|
||||
# transitions to the "ready" state.
|
||||
# If the stream ended without a ResultMessage, the SDK
|
||||
# CLI exited unexpectedly or the user stopped execution.
|
||||
# Close any open text/step so chunks are well-formed, and
|
||||
# append a cancellation message so users see feedback.
|
||||
# StreamFinish is published by mark_session_completed in the processor.
|
||||
if not stream_completed:
|
||||
logger.warning(
|
||||
"[SDK] [%s] Stream ended without ResultMessage "
|
||||
"(StopAsyncIteration) — emitting StreamFinish",
|
||||
logger.info(
|
||||
"[SDK] [%s] Stream ended without ResultMessage (stopped by user)",
|
||||
session_id[:12],
|
||||
)
|
||||
if adapter.step_open:
|
||||
yield StreamFinishStep()
|
||||
adapter.step_open = False
|
||||
closing_responses: list[StreamBaseResponse] = []
|
||||
adapter._end_text_if_open(closing_responses)
|
||||
for r in closing_responses:
|
||||
yield r
|
||||
yield StreamFinish()
|
||||
stream_completed = True
|
||||
|
||||
# Add "Stopped by user" message so it persists after refresh
|
||||
# Use COPILOT_SYSTEM_PREFIX so frontend renders it as system message, not assistant
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="assistant",
|
||||
content=f"{COPILOT_SYSTEM_PREFIX} Execution stopped by user",
|
||||
)
|
||||
)
|
||||
|
||||
if (
|
||||
assistant_response.content or assistant_response.tool_calls
|
||||
@@ -902,7 +1072,7 @@ async def stream_chat_completion_sdk(
|
||||
elif captured_transcript.path:
|
||||
raw_transcript = read_transcript_file(captured_transcript.path)
|
||||
logger.debug(
|
||||
"[SDK] Transcript source: stop hook (%s), " "read result: %s",
|
||||
"[SDK] Transcript source: stop hook (%s), read result: %s",
|
||||
captured_transcript.path,
|
||||
f"{len(raw_transcript)}B" if raw_transcript else "None",
|
||||
)
|
||||
@@ -937,33 +1107,83 @@ async def stream_chat_completion_sdk(
|
||||
"to use the OpenAI-compatible fallback."
|
||||
)
|
||||
|
||||
await asyncio.shield(upsert_chat_session(session))
|
||||
logger.info(
|
||||
"[SDK] [%s] Session saved with %d messages",
|
||||
"[SDK] [%s] Stream completed successfully with %d messages",
|
||||
session_id[:12],
|
||||
len(session.messages),
|
||||
)
|
||||
if not stream_completed:
|
||||
yield StreamFinish()
|
||||
except BaseException as e:
|
||||
# Catch BaseException to handle both Exception and CancelledError
|
||||
# (CancelledError inherits from BaseException in Python 3.8+)
|
||||
if isinstance(e, asyncio.CancelledError):
|
||||
logger.warning("[SDK] [%s] Session cancelled", session_id[:12])
|
||||
error_msg = "Operation cancelled"
|
||||
else:
|
||||
error_msg = str(e) or type(e).__name__
|
||||
# SDK cleanup RuntimeError is expected during cancellation, log as warning
|
||||
if isinstance(e, RuntimeError) and "cancel scope" in str(e):
|
||||
logger.warning(
|
||||
"[SDK] [%s] SDK cleanup error: %s", session_id[:12], error_msg
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"[SDK] [%s] Error: {error_msg}", session_id[:12], exc_info=True
|
||||
)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
# Client disconnect / server shutdown — log but re-raise so
|
||||
# the framework can clean up. The finally block still runs
|
||||
# for transcript upload.
|
||||
logger.warning("[SDK] [%s] Session cancelled (CancelledError)", session_id[:12])
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[SDK] Error: {e}", exc_info=True)
|
||||
try:
|
||||
await asyncio.shield(upsert_chat_session(session))
|
||||
except Exception as save_err:
|
||||
logger.error(f"[SDK] Failed to save session on error: {save_err}")
|
||||
yield StreamError(
|
||||
errorText="An error occurred. Please try again.",
|
||||
code="sdk_error",
|
||||
# Append error marker to session (non-invasive text parsing approach)
|
||||
# The finally block will persist the session with this error marker
|
||||
if session:
|
||||
session.messages.append(
|
||||
ChatMessage(
|
||||
role="assistant", content=f"{COPILOT_ERROR_PREFIX} {error_msg}"
|
||||
)
|
||||
)
|
||||
logger.debug(
|
||||
"[SDK] [%s] Appended error marker, will be persisted in finally",
|
||||
session_id[:12],
|
||||
)
|
||||
|
||||
# Yield StreamError for immediate feedback (only for non-cancellation errors)
|
||||
# Skip for CancelledError and RuntimeError cleanup issues (both are cancellations)
|
||||
is_cancellation = isinstance(e, asyncio.CancelledError) or (
|
||||
isinstance(e, RuntimeError) and "cancel scope" in str(e)
|
||||
)
|
||||
yield StreamFinish()
|
||||
if not is_cancellation:
|
||||
yield StreamError(
|
||||
errorText=error_msg,
|
||||
code="sdk_error",
|
||||
)
|
||||
|
||||
raise
|
||||
finally:
|
||||
# --- Close OTEL context ---
|
||||
if _otel_ctx is not None:
|
||||
try:
|
||||
_otel_ctx.__exit__(*sys.exc_info())
|
||||
except Exception:
|
||||
logger.warning("OTEL context teardown failed", exc_info=True)
|
||||
|
||||
# --- Persist session messages ---
|
||||
# This MUST run in finally to persist messages even when the generator
|
||||
# is stopped early (e.g., user clicks stop, processor breaks stream loop).
|
||||
# Without this, messages disappear after refresh because they were never
|
||||
# saved to the database.
|
||||
if session is not None:
|
||||
try:
|
||||
await asyncio.shield(upsert_chat_session(session))
|
||||
logger.info(
|
||||
"[SDK] [%s] Session persisted in finally with %d messages",
|
||||
session_id[:12],
|
||||
len(session.messages),
|
||||
)
|
||||
except Exception as persist_err:
|
||||
logger.error(
|
||||
"[SDK] [%s] Failed to persist session in finally: %s",
|
||||
session_id[:12],
|
||||
persist_err,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# --- Upload transcript for next-turn --resume ---
|
||||
# This MUST run in finally so the transcript is uploaded even when
|
||||
# the streaming loop raises an exception. The CLI uses
|
||||
@@ -979,7 +1199,7 @@ async def stream_chat_completion_sdk(
|
||||
if not raw_transcript and use_resume and resume_file:
|
||||
raw_transcript = read_transcript_file(resume_file)
|
||||
|
||||
if raw_transcript:
|
||||
if raw_transcript and session is not None:
|
||||
await asyncio.shield(
|
||||
_try_upload_transcript(
|
||||
user_id,
|
||||
@@ -999,6 +1219,9 @@ async def stream_chat_completion_sdk(
|
||||
if sdk_cwd:
|
||||
_cleanup_sdk_tool_results(sdk_cwd)
|
||||
|
||||
# Release stream lock to allow new streams for this session
|
||||
await lock.release()
|
||||
|
||||
|
||||
async def _try_upload_transcript(
|
||||
user_id: str,
|
||||
|
||||
@@ -2,11 +2,6 @@
|
||||
|
||||
This module provides the adapter layer that converts existing BaseTool implementations
|
||||
into in-process MCP tools that can be used with the Claude Agent SDK.
|
||||
|
||||
Long-running tools (``is_long_running=True``) are delegated to the non-SDK
|
||||
background infrastructure (stream_registry, Redis persistence, SSE reconnection)
|
||||
via a callback provided by the service layer. This avoids wasteful SDK polling
|
||||
and makes results survive page refreshes.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -15,7 +10,6 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from collections.abc import Awaitable, Callable
|
||||
from contextvars import ContextVar
|
||||
from typing import Any
|
||||
|
||||
@@ -43,7 +37,8 @@ _current_session: ContextVar[ChatSession | None] = ContextVar(
|
||||
# Keyed by tool_name → full output string. Consumed (popped) by the
|
||||
# response adapter when it builds StreamToolOutputAvailable.
|
||||
_pending_tool_outputs: ContextVar[dict[str, list[str]]] = ContextVar(
|
||||
"pending_tool_outputs", default=None # type: ignore[arg-type]
|
||||
"pending_tool_outputs",
|
||||
default=None, # type: ignore[arg-type]
|
||||
)
|
||||
# Event signaled whenever stash_pending_tool_output() adds a new entry.
|
||||
# Used by the streaming loop to wait for PostToolUse hooks to complete
|
||||
@@ -54,22 +49,10 @@ _stash_event: ContextVar[asyncio.Event | None] = ContextVar(
|
||||
"_stash_event", default=None
|
||||
)
|
||||
|
||||
# Callback type for delegating long-running tools to the non-SDK infrastructure.
|
||||
# Args: (tool_name, arguments, session) → MCP-formatted response dict.
|
||||
LongRunningCallback = Callable[
|
||||
[str, dict[str, Any], ChatSession], Awaitable[dict[str, Any]]
|
||||
]
|
||||
|
||||
# ContextVar so the service layer can inject the callback per-request.
|
||||
_long_running_callback: ContextVar[LongRunningCallback | None] = ContextVar(
|
||||
"long_running_callback", default=None
|
||||
)
|
||||
|
||||
|
||||
def set_execution_context(
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
long_running_callback: LongRunningCallback | None = None,
|
||||
) -> None:
|
||||
"""Set the execution context for tool calls.
|
||||
|
||||
@@ -79,14 +62,11 @@ def set_execution_context(
|
||||
Args:
|
||||
user_id: Current user's ID.
|
||||
session: Current chat session.
|
||||
long_running_callback: Optional callback to delegate long-running tools
|
||||
to the non-SDK background infrastructure (stream_registry + Redis).
|
||||
"""
|
||||
_current_user_id.set(user_id)
|
||||
_current_session.set(session)
|
||||
_pending_tool_outputs.set({})
|
||||
_stash_event.set(asyncio.Event())
|
||||
_long_running_callback.set(long_running_callback)
|
||||
|
||||
|
||||
def get_execution_context() -> tuple[str | None, ChatSession | None]:
|
||||
@@ -276,11 +256,6 @@ def create_tool_handler(base_tool: BaseTool):
|
||||
|
||||
This wraps the existing BaseTool._execute method to be compatible
|
||||
with the Claude Agent SDK MCP tool format.
|
||||
|
||||
Long-running tools (``is_long_running=True``) are delegated to the
|
||||
non-SDK background infrastructure via a callback set in the execution
|
||||
context. The callback persists the operation in Redis (stream_registry)
|
||||
so results survive page refreshes and pod restarts.
|
||||
"""
|
||||
|
||||
async def tool_handler(args: dict[str, Any]) -> dict[str, Any]:
|
||||
@@ -290,25 +265,6 @@ def create_tool_handler(base_tool: BaseTool):
|
||||
if session is None:
|
||||
return _mcp_error("No session context available")
|
||||
|
||||
# --- Long-running: delegate to non-SDK background infrastructure ---
|
||||
if base_tool.is_long_running:
|
||||
callback = _long_running_callback.get(None)
|
||||
if callback:
|
||||
try:
|
||||
return await callback(base_tool.name, args, session)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Long-running callback failed for {base_tool.name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return _mcp_error(f"Failed to start {base_tool.name}: {e}")
|
||||
# No callback — fall through to synchronous execution
|
||||
logger.warning(
|
||||
f"[SDK] No long-running callback for {base_tool.name}, "
|
||||
f"executing synchronously (may block)"
|
||||
)
|
||||
|
||||
# --- Normal (fast) tool: execute synchronously ---
|
||||
try:
|
||||
return await _execute_tool_sync(base_tool, user_id, session, args)
|
||||
except Exception as e:
|
||||
|
||||
@@ -27,13 +27,11 @@ from openai.types.chat import (
|
||||
ChatCompletionToolParam,
|
||||
)
|
||||
|
||||
from backend.data.db_accessors import chat_db, understanding_db
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.data.db_accessors import understanding_db
|
||||
from backend.data.understanding import format_understanding_for_prompt
|
||||
from backend.util.exceptions import NotFoundError
|
||||
from backend.util.settings import AppEnvironment, Settings
|
||||
|
||||
from . import stream_registry
|
||||
from .config import ChatConfig
|
||||
from .model import (
|
||||
ChatMessage,
|
||||
@@ -42,7 +40,6 @@ from .model import (
|
||||
Usage,
|
||||
cache_chat_session,
|
||||
get_chat_session,
|
||||
invalidate_session_cache,
|
||||
update_session_title,
|
||||
upsert_chat_session,
|
||||
)
|
||||
@@ -52,7 +49,6 @@ from .response_model import (
|
||||
StreamFinish,
|
||||
StreamFinishStep,
|
||||
StreamHeartbeat,
|
||||
StreamLongRunningStart,
|
||||
StreamStart,
|
||||
StreamStartStep,
|
||||
StreamTextDelta,
|
||||
@@ -63,12 +59,15 @@ from .response_model import (
|
||||
StreamToolOutputAvailable,
|
||||
StreamUsage,
|
||||
)
|
||||
from .tools import execute_tool, get_tool, tools
|
||||
from .tools import execute_tool, tools
|
||||
from .tools.models import ErrorResponse
|
||||
from .tracking import track_user_message
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Set to hold background tasks to prevent garbage collection
|
||||
_background_tasks: set[asyncio.Task[Any]] = set()
|
||||
|
||||
config = ChatConfig()
|
||||
settings = Settings()
|
||||
client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
@@ -76,10 +75,6 @@ client = openai.AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
|
||||
|
||||
langfuse = get_client()
|
||||
|
||||
# Redis key prefix for tracking running long-running operations
|
||||
# Used for idempotency across Kubernetes pods - prevents duplicate executions on browser refresh
|
||||
RUNNING_OPERATION_PREFIX = "chat:running_operation:"
|
||||
|
||||
# Default system prompt used when Langfuse is not configured
|
||||
# This is a snapshot of the "CoPilot Prompt" from Langfuse (version 11)
|
||||
DEFAULT_SYSTEM_PROMPT = """You are **Otto**, an AI Co-Pilot for AutoGPT and a Forward-Deployed Automation Engineer serving small business owners. Your mission is to help users automate business tasks with AI by delivering tangible value through working automations—not through documentation or lengthy explanations.
|
||||
@@ -171,39 +166,6 @@ Adapt flexibly to the conversation context. Not every interaction requires all s
|
||||
|
||||
You are NOT a chatbot. You are NOT documentation. You are a partner who helps busy business owners get value quickly by showing proof through working automations. Bias toward action over explanation."""
|
||||
|
||||
# Module-level set to hold strong references to background tasks.
|
||||
# This prevents asyncio from garbage collecting tasks before they complete.
|
||||
# Tasks are automatically removed on completion via done_callback.
|
||||
_background_tasks: set[asyncio.Task] = set()
|
||||
|
||||
|
||||
async def _mark_operation_started(tool_call_id: str) -> bool:
|
||||
"""Mark a long-running operation as started (Redis-based).
|
||||
|
||||
Returns True if successfully marked (operation was not already running),
|
||||
False if operation was already running (lost race condition).
|
||||
Raises exception if Redis is unavailable (fail-closed).
|
||||
"""
|
||||
redis = await get_redis_async()
|
||||
key = f"{RUNNING_OPERATION_PREFIX}{tool_call_id}"
|
||||
# SETNX with TTL - atomic "set if not exists"
|
||||
result = await redis.set(key, "1", ex=config.long_running_operation_ttl, nx=True)
|
||||
return result is not None
|
||||
|
||||
|
||||
async def _mark_operation_completed(tool_call_id: str) -> None:
|
||||
"""Mark a long-running operation as completed (remove Redis key).
|
||||
|
||||
This is best-effort - if Redis fails, the TTL will eventually clean up.
|
||||
"""
|
||||
try:
|
||||
redis = await get_redis_async()
|
||||
key = f"{RUNNING_OPERATION_PREFIX}{tool_call_id}"
|
||||
await redis.delete(key)
|
||||
except Exception as e:
|
||||
# Non-critical: TTL will clean up eventually
|
||||
logger.warning(f"Failed to delete running operation key {tool_call_id}: {e}")
|
||||
|
||||
|
||||
def _is_langfuse_configured() -> bool:
|
||||
"""Check if Langfuse credentials are configured."""
|
||||
@@ -348,7 +310,8 @@ async def assign_user_to_session(
|
||||
if not session:
|
||||
raise NotFoundError(f"Session {session_id} not found")
|
||||
session.user_id = user_id
|
||||
return await upsert_chat_session(session)
|
||||
session = await upsert_chat_session(session)
|
||||
return session
|
||||
|
||||
|
||||
async def stream_chat_completion(
|
||||
@@ -363,7 +326,6 @@ async def stream_chat_completion(
|
||||
_continuation_message_id: (
|
||||
str | None
|
||||
) = None, # Internal: reuse message ID for tool call continuations
|
||||
_task_id: str | None = None, # Internal: task ID for SSE reconnection support
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""Main entry point for streaming chat completions with database handling.
|
||||
|
||||
@@ -439,24 +401,20 @@ async def stream_chat_completion(
|
||||
)
|
||||
):
|
||||
session.messages.append(ChatMessage(role=new_message_role, content=message))
|
||||
logger.info(
|
||||
f"Appended message (role={'user' if is_user_message else 'assistant'}), "
|
||||
f"new message_count={len(session.messages)}"
|
||||
)
|
||||
|
||||
# Track user message in PostHog
|
||||
if is_user_message:
|
||||
posthog_start = time.monotonic()
|
||||
track_user_message(
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
message_length=len(message),
|
||||
)
|
||||
posthog_time = (time.monotonic() - posthog_start) * 1000
|
||||
logger.info(
|
||||
f"[TIMING] track_user_message took {posthog_time:.1f}ms",
|
||||
extra={"json_fields": {**log_meta, "duration_ms": posthog_time}},
|
||||
)
|
||||
# Track user message in PostHog
|
||||
if is_user_message and message:
|
||||
posthog_start = time.monotonic()
|
||||
track_user_message(
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
message_length=len(message),
|
||||
)
|
||||
posthog_time = (time.monotonic() - posthog_start) * 1000
|
||||
logger.info(
|
||||
f"[TIMING] track_user_message took {posthog_time:.1f}ms",
|
||||
extra={"json_fields": {**log_meta, "duration_ms": posthog_time}},
|
||||
)
|
||||
|
||||
upsert_start = time.monotonic()
|
||||
session = await upsert_chat_session(session)
|
||||
@@ -474,8 +432,6 @@ async def stream_chat_completion(
|
||||
if is_user_message and first_user_msg and not session.title:
|
||||
if len(user_messages) == 1:
|
||||
# First user message - generate title in background
|
||||
import asyncio
|
||||
|
||||
# Capture only the values we need (not the session object) to avoid
|
||||
# stale data issues when the main flow modifies the session
|
||||
captured_session_id = session_id
|
||||
@@ -500,7 +456,9 @@ async def stream_chat_completion(
|
||||
logger.warning(f"Failed to update session title: {e}")
|
||||
|
||||
# Fire and forget - don't block the chat response
|
||||
asyncio.create_task(_update_title())
|
||||
task = asyncio.create_task(_update_title())
|
||||
_background_tasks.add(task)
|
||||
task.add_done_callback(_background_tasks.discard)
|
||||
|
||||
# Build system prompt with business understanding
|
||||
prompt_start = time.monotonic()
|
||||
@@ -525,7 +483,6 @@ async def stream_chat_completion(
|
||||
has_yielded_end = False
|
||||
has_yielded_error = False
|
||||
has_done_tool_call = False
|
||||
has_long_running_tool_call = False # Track if we had a long-running tool call
|
||||
has_received_text = False
|
||||
text_streaming_ended = False
|
||||
tool_response_messages: list[ChatMessage] = []
|
||||
@@ -545,7 +502,7 @@ async def stream_chat_completion(
|
||||
extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}},
|
||||
)
|
||||
if not is_continuation:
|
||||
yield StreamStart(messageId=message_id, taskId=_task_id)
|
||||
yield StreamStart(messageId=message_id, sessionId=session.session_id)
|
||||
|
||||
# Emit start-step before each LLM call (AI SDK uses this to add step boundaries)
|
||||
yield StreamStartStep()
|
||||
@@ -618,34 +575,13 @@ async def stream_chat_completion(
|
||||
if isinstance(chunk.output, str)
|
||||
else orjson.dumps(chunk.output).decode("utf-8")
|
||||
)
|
||||
# Skip saving long-running operation responses - messages already saved in _yield_tool_call
|
||||
# Use JSON parsing instead of substring matching to avoid false positives
|
||||
is_long_running_response = False
|
||||
try:
|
||||
parsed = orjson.loads(result_content)
|
||||
if isinstance(parsed, dict) and parsed.get("type") in (
|
||||
"operation_started",
|
||||
"operation_in_progress",
|
||||
):
|
||||
is_long_running_response = True
|
||||
except (orjson.JSONDecodeError, TypeError):
|
||||
pass # Not JSON or not a dict - treat as regular response
|
||||
if is_long_running_response:
|
||||
# Remove from accumulated_tool_calls since assistant message was already saved
|
||||
accumulated_tool_calls[:] = [
|
||||
tc
|
||||
for tc in accumulated_tool_calls
|
||||
if tc["id"] != chunk.toolCallId
|
||||
]
|
||||
has_long_running_tool_call = True
|
||||
else:
|
||||
tool_response_messages.append(
|
||||
ChatMessage(
|
||||
role="tool",
|
||||
content=result_content,
|
||||
tool_call_id=chunk.toolCallId,
|
||||
)
|
||||
tool_response_messages.append(
|
||||
ChatMessage(
|
||||
role="tool",
|
||||
content=result_content,
|
||||
tool_call_id=chunk.toolCallId,
|
||||
)
|
||||
)
|
||||
has_done_tool_call = True
|
||||
# Track if any tool execution failed
|
||||
if not chunk.success:
|
||||
@@ -689,9 +625,9 @@ async def stream_chat_completion(
|
||||
has_saved_assistant_message = True
|
||||
|
||||
has_yielded_end = True
|
||||
# Emit finish-step before finish (resets AI SDK text/reasoning state)
|
||||
# Emit finish-step (resets AI SDK text/reasoning state).
|
||||
# StreamFinish is published by mark_session_completed.
|
||||
yield StreamFinishStep()
|
||||
yield chunk
|
||||
elif isinstance(chunk, StreamError):
|
||||
has_yielded_error = True
|
||||
yield chunk
|
||||
@@ -780,7 +716,6 @@ async def stream_chat_completion(
|
||||
yield error_response
|
||||
if not has_yielded_end:
|
||||
yield StreamFinishStep()
|
||||
yield StreamFinish()
|
||||
return
|
||||
|
||||
# Handle retry outside of exception handler to avoid nesting
|
||||
@@ -795,7 +730,6 @@ async def stream_chat_completion(
|
||||
session=session,
|
||||
context=context,
|
||||
_continuation_message_id=message_id, # Reuse message ID since start was already sent
|
||||
_task_id=_task_id,
|
||||
):
|
||||
yield chunk
|
||||
return # Exit after retry to avoid double-saving in finally block
|
||||
@@ -841,14 +775,7 @@ async def stream_chat_completion(
|
||||
logger.info(
|
||||
f"Extended session messages, new message_count={len(session.messages)}"
|
||||
)
|
||||
# Save if there are regular (non-long-running) tool responses or streaming message.
|
||||
# Long-running tools save their own state, but we still need to save regular tools
|
||||
# that may be in the same response.
|
||||
has_regular_tool_responses = len(tool_response_messages) > 0
|
||||
if has_regular_tool_responses or (
|
||||
not has_long_running_tool_call
|
||||
and (messages_to_save or has_appended_streaming_message)
|
||||
):
|
||||
if messages_to_save or has_appended_streaming_message:
|
||||
await upsert_chat_session(session)
|
||||
else:
|
||||
logger.info(
|
||||
@@ -857,9 +784,7 @@ async def stream_chat_completion(
|
||||
)
|
||||
|
||||
# If we did a tool call, stream the chat completion again to get the next response
|
||||
# Skip only if ALL tools were long-running (they handle their own completion)
|
||||
has_regular_tools = len(tool_response_messages) > 0
|
||||
if has_done_tool_call and (has_regular_tools or not has_long_running_tool_call):
|
||||
if has_done_tool_call:
|
||||
logger.info(
|
||||
"Tool call executed, streaming chat completion again to get assistant response"
|
||||
)
|
||||
@@ -870,7 +795,6 @@ async def stream_chat_completion(
|
||||
context=context,
|
||||
tool_call_response=str(tool_response_messages),
|
||||
_continuation_message_id=message_id, # Reuse message ID to avoid duplicates
|
||||
_task_id=_task_id,
|
||||
):
|
||||
yield chunk
|
||||
|
||||
@@ -1036,7 +960,6 @@ async def _stream_chat_chunks(
|
||||
"Please start a new conversation."
|
||||
)
|
||||
)
|
||||
yield StreamFinish()
|
||||
return
|
||||
|
||||
messages = context_result.messages
|
||||
@@ -1239,7 +1162,6 @@ async def _stream_chat_chunks(
|
||||
f"session={session.session_id}, user={session.user_id}",
|
||||
extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
|
||||
)
|
||||
yield StreamFinish()
|
||||
return
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
@@ -1291,7 +1213,6 @@ async def _stream_chat_chunks(
|
||||
code=error_code,
|
||||
)
|
||||
yield error_response
|
||||
yield StreamFinish()
|
||||
return
|
||||
|
||||
# If we exit the retry loop without returning, it means we exhausted retries
|
||||
@@ -1305,21 +1226,9 @@ async def _stream_chat_chunks(
|
||||
retry_count=MAX_RETRIES,
|
||||
)
|
||||
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
||||
yield StreamFinish()
|
||||
return
|
||||
|
||||
|
||||
async def _with_optional_lock(
|
||||
lock: asyncio.Lock | None,
|
||||
coro_fn: Any,
|
||||
) -> Any:
|
||||
"""Run *coro_fn()* under *lock* when provided, otherwise run directly."""
|
||||
if lock:
|
||||
async with lock:
|
||||
return await coro_fn()
|
||||
return await coro_fn()
|
||||
|
||||
|
||||
async def _execute_tool_calls_parallel(
|
||||
tool_calls: list[dict[str, Any]],
|
||||
session: ChatSession,
|
||||
@@ -1327,11 +1236,8 @@ async def _execute_tool_calls_parallel(
|
||||
"""Execute all tool calls concurrently, yielding stream events as they arrive.
|
||||
|
||||
Each tool runs as an ``asyncio.Task``, pushing events into a shared queue.
|
||||
A ``session_lock`` serialises session-state mutations (long-running tool
|
||||
bookkeeping, ``run_agent`` counters).
|
||||
"""
|
||||
queue: asyncio.Queue[StreamBaseResponse | None] = asyncio.Queue()
|
||||
session_lock = asyncio.Lock()
|
||||
n_tools = len(tool_calls)
|
||||
retryable_errors: list[Exception] = []
|
||||
|
||||
@@ -1339,7 +1245,7 @@ async def _execute_tool_calls_parallel(
|
||||
tool_name = tool_calls[idx].get("function", {}).get("name", "unknown")
|
||||
tool_call_id = tool_calls[idx].get("id", f"unknown_{idx}")
|
||||
try:
|
||||
async for event in _yield_tool_call(tool_calls, idx, session, session_lock):
|
||||
async for event in _yield_tool_call(tool_calls, idx, session):
|
||||
await queue.put(event)
|
||||
except (orjson.JSONDecodeError, KeyError, TypeError) as e:
|
||||
logger.error(
|
||||
@@ -1393,21 +1299,18 @@ async def _yield_tool_call(
|
||||
tool_calls: list[dict[str, Any]],
|
||||
yield_idx: int,
|
||||
session: ChatSession,
|
||||
session_lock: asyncio.Lock | None = None,
|
||||
) -> AsyncGenerator[StreamBaseResponse, None]:
|
||||
"""
|
||||
Yield a tool call and its execution result.
|
||||
|
||||
Executes tools synchronously and yields heartbeat events every 15 seconds to
|
||||
keep the SSE connection alive during execution. The is_long_running property
|
||||
is only used by the frontend to display UI feedback during long operations.
|
||||
Yields heartbeat events every 10 seconds to keep the SSE connection alive
|
||||
while the tool executes.
|
||||
|
||||
Raises:
|
||||
orjson.JSONDecodeError: If tool call arguments cannot be parsed as JSON
|
||||
KeyError: If expected tool call fields are missing
|
||||
TypeError: If tool call structure is invalid
|
||||
"""
|
||||
|
||||
tool_name = tool_calls[yield_idx]["function"]["name"]
|
||||
tool_call_id = tool_calls[yield_idx]["id"]
|
||||
|
||||
@@ -1424,17 +1327,7 @@ async def _yield_tool_call(
|
||||
input=arguments,
|
||||
)
|
||||
|
||||
# Notify frontend if this is a long-running tool (e.g., agent generation)
|
||||
tool = get_tool(tool_name)
|
||||
if tool and tool.is_long_running:
|
||||
yield StreamLongRunningStart(
|
||||
data={
|
||||
"toolCallId": tool_call_id,
|
||||
"toolName": tool_name,
|
||||
}
|
||||
)
|
||||
|
||||
# Run tool execution synchronously with heartbeats
|
||||
# Run tool execution in background task with heartbeats
|
||||
tool_task = asyncio.create_task(
|
||||
execute_tool(
|
||||
tool_name=tool_name,
|
||||
@@ -1445,8 +1338,9 @@ async def _yield_tool_call(
|
||||
)
|
||||
)
|
||||
|
||||
# Yield heartbeats every 15 seconds while waiting for tool to complete
|
||||
heartbeat_interval = 15.0 # seconds
|
||||
# Yield heartbeats every 10 seconds while waiting for tool to complete
|
||||
# IMPORTANT: Must be less than frontend timeout (12s in useCopilotPage.ts)
|
||||
heartbeat_interval = 10.0 # seconds
|
||||
while not tool_task.done():
|
||||
try:
|
||||
# Wait for either the task to complete or the heartbeat interval
|
||||
@@ -1486,396 +1380,6 @@ async def _yield_tool_call(
|
||||
yield tool_execution_response
|
||||
|
||||
|
||||
async def _execute_long_running_tool(
|
||||
tool_name: str,
|
||||
parameters: dict[str, Any],
|
||||
tool_call_id: str,
|
||||
operation_id: str,
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> None:
|
||||
"""Execute a long-running tool in background and update chat history with result.
|
||||
|
||||
This function runs independently of the SSE connection, so the operation
|
||||
survives if the user closes their browser tab.
|
||||
|
||||
NOTE: This is the legacy function without stream registry support.
|
||||
Use _execute_long_running_tool_with_streaming for new implementations.
|
||||
"""
|
||||
try:
|
||||
# Load fresh session (not stale reference)
|
||||
session = await get_chat_session(session_id, user_id)
|
||||
if not session:
|
||||
logger.error(f"Session {session_id} not found for background tool")
|
||||
return
|
||||
|
||||
# Execute the actual tool
|
||||
result = await execute_tool(
|
||||
tool_name=tool_name,
|
||||
parameters=parameters,
|
||||
tool_call_id=tool_call_id,
|
||||
user_id=user_id,
|
||||
session=session,
|
||||
)
|
||||
|
||||
# Update the pending message with result
|
||||
await _update_pending_operation(
|
||||
session_id=session_id,
|
||||
tool_call_id=tool_call_id,
|
||||
result=(
|
||||
result.output
|
||||
if isinstance(result.output, str)
|
||||
else orjson.dumps(result.output).decode("utf-8")
|
||||
),
|
||||
)
|
||||
|
||||
logger.info(f"Background tool {tool_name} completed for session {session_id}")
|
||||
|
||||
# Generate LLM continuation so user sees response when they poll/refresh
|
||||
await _generate_llm_continuation(session_id=session_id, user_id=user_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Background tool {tool_name} failed: {e}", exc_info=True)
|
||||
error_response = ErrorResponse(
|
||||
message=f"Tool {tool_name} failed: {str(e)}",
|
||||
)
|
||||
await _update_pending_operation(
|
||||
session_id=session_id,
|
||||
tool_call_id=tool_call_id,
|
||||
result=error_response.model_dump_json(),
|
||||
)
|
||||
# Generate LLM continuation so user sees explanation even for errors
|
||||
try:
|
||||
await _generate_llm_continuation(session_id=session_id, user_id=user_id)
|
||||
except Exception as llm_err:
|
||||
logger.warning(f"Failed to generate LLM continuation for error: {llm_err}")
|
||||
finally:
|
||||
await _mark_operation_completed(tool_call_id)
|
||||
|
||||
|
||||
async def _execute_long_running_tool_with_streaming(
|
||||
tool_name: str,
|
||||
parameters: dict[str, Any],
|
||||
tool_call_id: str,
|
||||
operation_id: str,
|
||||
task_id: str,
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> None:
|
||||
"""Execute a long-running tool with stream registry support for SSE reconnection.
|
||||
|
||||
This function runs independently of the SSE connection, publishes progress
|
||||
to the stream registry, and survives if the user closes their browser tab.
|
||||
Clients can reconnect via GET /chat/tasks/{task_id}/stream to resume streaming.
|
||||
|
||||
If the external service returns a 202 Accepted (async), this function exits
|
||||
early and lets the Redis Streams completion consumer handle the rest.
|
||||
"""
|
||||
# Track whether we delegated to async processing - if so, the Redis Streams
|
||||
# completion consumer (stream_registry / completion_consumer) will handle cleanup, not us
|
||||
delegated_to_async = False
|
||||
|
||||
try:
|
||||
# Load fresh session (not stale reference)
|
||||
session = await get_chat_session(session_id, user_id)
|
||||
if not session:
|
||||
logger.error(f"Session {session_id} not found for background tool")
|
||||
await stream_registry.mark_task_completed(task_id, status="failed")
|
||||
return
|
||||
|
||||
# Pass operation_id and task_id to the tool for async processing
|
||||
enriched_parameters = {
|
||||
**parameters,
|
||||
"_operation_id": operation_id,
|
||||
"_task_id": task_id,
|
||||
}
|
||||
|
||||
# Execute the actual tool
|
||||
result = await execute_tool(
|
||||
tool_name=tool_name,
|
||||
parameters=enriched_parameters,
|
||||
tool_call_id=tool_call_id,
|
||||
user_id=user_id,
|
||||
session=session,
|
||||
)
|
||||
|
||||
# Check if the tool result indicates async processing
|
||||
# (e.g., Agent Generator returned 202 Accepted)
|
||||
try:
|
||||
if isinstance(result.output, dict):
|
||||
result_data = result.output
|
||||
elif result.output:
|
||||
result_data = orjson.loads(result.output)
|
||||
else:
|
||||
result_data = {}
|
||||
if result_data.get("status") == "accepted":
|
||||
logger.info(
|
||||
f"Tool {tool_name} delegated to async processing "
|
||||
f"(operation_id={operation_id}, task_id={task_id}). "
|
||||
f"Redis Streams completion consumer will handle the rest."
|
||||
)
|
||||
# Don't publish result, don't continue with LLM, and don't cleanup
|
||||
# The Redis Streams consumer (completion_consumer) will handle
|
||||
# everything when the external service completes via webhook
|
||||
delegated_to_async = True
|
||||
return
|
||||
except (orjson.JSONDecodeError, TypeError):
|
||||
pass # Not JSON or not async - continue normally
|
||||
|
||||
# Publish tool result to stream registry
|
||||
await stream_registry.publish_chunk(task_id, result)
|
||||
|
||||
# Update the pending message with result
|
||||
result_str = (
|
||||
result.output
|
||||
if isinstance(result.output, str)
|
||||
else orjson.dumps(result.output).decode("utf-8")
|
||||
)
|
||||
await _update_pending_operation(
|
||||
session_id=session_id,
|
||||
tool_call_id=tool_call_id,
|
||||
result=result_str,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Background tool {tool_name} completed for session {session_id} "
|
||||
f"(task_id={task_id})"
|
||||
)
|
||||
|
||||
# Generate LLM continuation and stream chunks to registry
|
||||
await _generate_llm_continuation_with_streaming(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
# Mark task as completed in stream registry
|
||||
await stream_registry.mark_task_completed(task_id, status="completed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Background tool {tool_name} failed: {e}", exc_info=True)
|
||||
error_response = ErrorResponse(
|
||||
message=f"Tool {tool_name} failed: {str(e)}",
|
||||
)
|
||||
|
||||
# Publish error to stream registry followed by finish event
|
||||
await stream_registry.publish_chunk(
|
||||
task_id,
|
||||
StreamError(errorText=str(e)),
|
||||
)
|
||||
await stream_registry.publish_chunk(task_id, StreamFinishStep())
|
||||
await stream_registry.publish_chunk(task_id, StreamFinish())
|
||||
|
||||
await _update_pending_operation(
|
||||
session_id=session_id,
|
||||
tool_call_id=tool_call_id,
|
||||
result=error_response.model_dump_json(),
|
||||
)
|
||||
|
||||
# Mark task as failed in stream registry
|
||||
await stream_registry.mark_task_completed(task_id, status="failed")
|
||||
finally:
|
||||
# Only cleanup if we didn't delegate to async processing
|
||||
# For async path, the Redis Streams completion consumer handles cleanup
|
||||
if not delegated_to_async:
|
||||
await _mark_operation_completed(tool_call_id)
|
||||
|
||||
|
||||
async def _update_pending_operation(
|
||||
session_id: str,
|
||||
tool_call_id: str,
|
||||
result: str,
|
||||
) -> None:
|
||||
"""Update the pending tool message with final result.
|
||||
|
||||
This is called by background tasks when long-running operations complete.
|
||||
"""
|
||||
# Update the message in database
|
||||
updated = await chat_db().update_tool_message_content(
|
||||
session_id=session_id,
|
||||
tool_call_id=tool_call_id,
|
||||
new_content=result,
|
||||
)
|
||||
|
||||
if updated:
|
||||
# Invalidate Redis cache so next load gets fresh data
|
||||
# Wrap in try/except to prevent cache failures from triggering error handling
|
||||
# that would overwrite our successful DB update
|
||||
try:
|
||||
await invalidate_session_cache(session_id)
|
||||
except Exception as e:
|
||||
# Non-critical: cache will eventually be refreshed on next load
|
||||
logger.warning(f"Failed to invalidate cache for session {session_id}: {e}")
|
||||
logger.info(
|
||||
f"Updated pending operation for tool_call_id {tool_call_id} "
|
||||
f"in session {session_id}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Failed to update pending operation for tool_call_id {tool_call_id} "
|
||||
f"in session {session_id}"
|
||||
)
|
||||
|
||||
|
||||
async def _generate_llm_continuation(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
) -> None:
|
||||
"""Generate an LLM response after a long-running tool completes.
|
||||
|
||||
This is called by background tasks to continue the conversation
|
||||
after a tool result is saved. The response is saved to the database
|
||||
so users see it when they refresh or poll.
|
||||
"""
|
||||
try:
|
||||
# Load fresh session from DB (bypass cache to get the updated tool result)
|
||||
await invalidate_session_cache(session_id)
|
||||
session = await get_chat_session(session_id, user_id)
|
||||
if not session:
|
||||
logger.error(f"Session {session_id} not found for LLM continuation")
|
||||
return
|
||||
|
||||
# Build system prompt
|
||||
system_prompt, _ = await _build_system_prompt(user_id)
|
||||
|
||||
messages = session.to_openai_messages()
|
||||
if system_prompt:
|
||||
system_message = ChatCompletionSystemMessageParam(
|
||||
role="system",
|
||||
content=system_prompt,
|
||||
)
|
||||
messages = [system_message] + messages
|
||||
|
||||
# Apply context window management to prevent oversized requests
|
||||
context_result = await _manage_context_window(
|
||||
messages=messages,
|
||||
model=config.model,
|
||||
api_key=config.api_key,
|
||||
base_url=config.base_url,
|
||||
)
|
||||
|
||||
if context_result.error and "System prompt dropped" not in context_result.error:
|
||||
logger.error(
|
||||
f"Context window management failed for session {session_id}: "
|
||||
f"{context_result.error} (tokens={context_result.token_count})"
|
||||
)
|
||||
return
|
||||
|
||||
messages = context_result.messages
|
||||
if context_result.was_compacted:
|
||||
logger.info(
|
||||
f"Context compacted for LLM continuation: "
|
||||
f"{context_result.token_count} tokens"
|
||||
)
|
||||
|
||||
# Build extra_body for tracing
|
||||
extra_body: dict[str, Any] = {
|
||||
"posthogProperties": {
|
||||
"environment": settings.config.app_env.value,
|
||||
},
|
||||
}
|
||||
if user_id:
|
||||
extra_body["user"] = user_id[:128]
|
||||
extra_body["posthogDistinctId"] = user_id
|
||||
if session_id:
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
|
||||
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||
extra_body["reasoning"] = {"enabled": True}
|
||||
|
||||
retry_count = 0
|
||||
last_error: Exception | None = None
|
||||
response = None
|
||||
|
||||
while retry_count <= MAX_RETRIES:
|
||||
try:
|
||||
logger.info(
|
||||
f"Generating LLM continuation for session {session_id}"
|
||||
f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}"
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model=config.model,
|
||||
messages=cast(list[ChatCompletionMessageParam], messages),
|
||||
extra_body=extra_body,
|
||||
)
|
||||
last_error = None # Clear any previous error on success
|
||||
break # Success, exit retry loop
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
|
||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||
retry_count += 1
|
||||
delay = min(
|
||||
BASE_DELAY_SECONDS * (2 ** (retry_count - 1)),
|
||||
MAX_DELAY_SECONDS,
|
||||
)
|
||||
logger.warning(
|
||||
f"Retryable error in LLM continuation: {e!s}. "
|
||||
f"Retrying in {delay:.1f}s (attempt {retry_count}/{MAX_RETRIES})"
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
continue
|
||||
else:
|
||||
# Non-retryable error - log details and exit gracefully
|
||||
_log_api_error(
|
||||
error=e,
|
||||
context="LLM continuation (not retrying)",
|
||||
session_id=session_id,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=config.model,
|
||||
retry_count=retry_count,
|
||||
)
|
||||
return
|
||||
|
||||
if last_error:
|
||||
_log_api_error(
|
||||
error=last_error,
|
||||
context=f"LLM continuation (max retries {MAX_RETRIES} exceeded)",
|
||||
session_id=session_id,
|
||||
message_count=len(messages) if messages else None,
|
||||
model=config.model,
|
||||
retry_count=MAX_RETRIES,
|
||||
)
|
||||
return
|
||||
|
||||
if response and response.choices and response.choices[0].message.content:
|
||||
assistant_content = response.choices[0].message.content
|
||||
|
||||
# Reload session from DB to avoid race condition with user messages
|
||||
# that may have been sent while we were generating the LLM response
|
||||
fresh_session = await get_chat_session(session_id, user_id)
|
||||
if not fresh_session:
|
||||
logger.error(
|
||||
f"Session {session_id} disappeared during LLM continuation"
|
||||
)
|
||||
return
|
||||
|
||||
# Save assistant message to database
|
||||
assistant_message = ChatMessage(
|
||||
role="assistant",
|
||||
content=assistant_content,
|
||||
)
|
||||
fresh_session.messages.append(assistant_message)
|
||||
|
||||
# Save to database (not cache) to persist the response
|
||||
await upsert_chat_session(fresh_session)
|
||||
|
||||
# Invalidate cache so next poll/refresh gets fresh data
|
||||
await invalidate_session_cache(session_id)
|
||||
|
||||
logger.info(
|
||||
f"Generated LLM continuation for session {session_id}, "
|
||||
f"response length: {len(assistant_content)}"
|
||||
)
|
||||
else:
|
||||
logger.warning(f"LLM continuation returned empty response for {session_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
||||
|
||||
|
||||
def _log_api_error(
|
||||
error: Exception,
|
||||
context: str,
|
||||
@@ -1959,135 +1463,3 @@ def _sanitize_error_body(
|
||||
sanitized[field] = value
|
||||
|
||||
return sanitized if sanitized else None
|
||||
|
||||
|
||||
async def _generate_llm_continuation_with_streaming(
|
||||
session_id: str,
|
||||
user_id: str | None,
|
||||
task_id: str,
|
||||
) -> None:
|
||||
"""Generate an LLM response with streaming to the stream registry.
|
||||
|
||||
This is called by background tasks to continue the conversation
|
||||
after a tool result is saved. Chunks are published to the stream registry
|
||||
so reconnecting clients can receive them.
|
||||
"""
|
||||
import uuid as uuid_module
|
||||
|
||||
try:
|
||||
# Load fresh session from DB (bypass cache to get the updated tool result)
|
||||
await invalidate_session_cache(session_id)
|
||||
session = await get_chat_session(session_id, user_id)
|
||||
if not session:
|
||||
logger.error(f"Session {session_id} not found for LLM continuation")
|
||||
return
|
||||
|
||||
# Build system prompt
|
||||
system_prompt, _ = await _build_system_prompt(user_id)
|
||||
|
||||
# Build messages in OpenAI format
|
||||
messages = session.to_openai_messages()
|
||||
if system_prompt:
|
||||
from openai.types.chat import ChatCompletionSystemMessageParam
|
||||
|
||||
system_message = ChatCompletionSystemMessageParam(
|
||||
role="system",
|
||||
content=system_prompt,
|
||||
)
|
||||
messages = [system_message] + messages
|
||||
|
||||
# Build extra_body for tracing
|
||||
extra_body: dict[str, Any] = {
|
||||
"posthogProperties": {
|
||||
"environment": settings.config.app_env.value,
|
||||
},
|
||||
}
|
||||
if user_id:
|
||||
extra_body["user"] = user_id[:128]
|
||||
extra_body["posthogDistinctId"] = user_id
|
||||
if session_id:
|
||||
extra_body["session_id"] = session_id[:128]
|
||||
|
||||
# Enable adaptive thinking for Anthropic models via OpenRouter
|
||||
if config.thinking_enabled and "anthropic" in config.model.lower():
|
||||
extra_body["reasoning"] = {"enabled": True}
|
||||
|
||||
# Make streaming LLM call (no tools - just text response)
|
||||
from typing import cast
|
||||
|
||||
from openai.types.chat import ChatCompletionMessageParam
|
||||
|
||||
# Generate unique IDs for AI SDK protocol
|
||||
message_id = str(uuid_module.uuid4())
|
||||
text_block_id = str(uuid_module.uuid4())
|
||||
|
||||
# Publish start event
|
||||
await stream_registry.publish_chunk(task_id, StreamStart(messageId=message_id))
|
||||
await stream_registry.publish_chunk(task_id, StreamStartStep())
|
||||
await stream_registry.publish_chunk(task_id, StreamTextStart(id=text_block_id))
|
||||
|
||||
# Stream the response
|
||||
stream = await client.chat.completions.create(
|
||||
model=config.model,
|
||||
messages=cast(list[ChatCompletionMessageParam], messages),
|
||||
extra_body=extra_body,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
assistant_content = ""
|
||||
async for chunk in stream:
|
||||
if chunk.choices and chunk.choices[0].delta.content:
|
||||
delta = chunk.choices[0].delta.content
|
||||
assistant_content += delta
|
||||
# Publish delta to stream registry
|
||||
await stream_registry.publish_chunk(
|
||||
task_id,
|
||||
StreamTextDelta(id=text_block_id, delta=delta),
|
||||
)
|
||||
|
||||
# Publish end events
|
||||
await stream_registry.publish_chunk(task_id, StreamTextEnd(id=text_block_id))
|
||||
await stream_registry.publish_chunk(task_id, StreamFinishStep())
|
||||
|
||||
if assistant_content:
|
||||
# Reload session from DB to avoid race condition with user messages
|
||||
fresh_session = await get_chat_session(session_id, user_id)
|
||||
if not fresh_session:
|
||||
logger.error(
|
||||
f"Session {session_id} disappeared during LLM continuation"
|
||||
)
|
||||
return
|
||||
|
||||
# Save assistant message to database
|
||||
assistant_message = ChatMessage(
|
||||
role="assistant",
|
||||
content=assistant_content,
|
||||
)
|
||||
fresh_session.messages.append(assistant_message)
|
||||
|
||||
# Save to database (not cache) to persist the response
|
||||
await upsert_chat_session(fresh_session)
|
||||
|
||||
# Invalidate cache so next poll/refresh gets fresh data
|
||||
await invalidate_session_cache(session_id)
|
||||
|
||||
logger.info(
|
||||
f"Generated streaming LLM continuation for session {session_id} "
|
||||
f"(task_id={task_id}), response length: {len(assistant_content)}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Streaming LLM continuation returned empty response for {session_id}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to generate streaming LLM continuation: {e}", exc_info=True
|
||||
)
|
||||
# Publish error to stream registry followed by finish event
|
||||
await stream_registry.publish_chunk(
|
||||
task_id,
|
||||
StreamError(errorText=f"Failed to generate response: {e}"),
|
||||
)
|
||||
await stream_registry.publish_chunk(task_id, StreamFinishStep())
|
||||
await stream_registry.publish_chunk(task_id, StreamFinish())
|
||||
|
||||
@@ -6,12 +6,7 @@ import pytest
|
||||
|
||||
from . import service as chat_service
|
||||
from .model import create_chat_session, get_chat_session, upsert_chat_session
|
||||
from .response_model import (
|
||||
StreamError,
|
||||
StreamFinish,
|
||||
StreamTextDelta,
|
||||
StreamToolOutputAvailable,
|
||||
)
|
||||
from .response_model import StreamError, StreamTextDelta, StreamToolOutputAvailable
|
||||
from .sdk import service as sdk_service
|
||||
from .sdk.transcript import download_transcript
|
||||
|
||||
@@ -30,7 +25,6 @@ async def test_stream_chat_completion(setup_test_user, test_user_id):
|
||||
session = await create_chat_session(test_user_id)
|
||||
|
||||
has_errors = False
|
||||
has_ended = False
|
||||
assistant_message = ""
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session.session_id, "Hello, how are you?", user_id=session.user_id
|
||||
@@ -40,10 +34,9 @@ async def test_stream_chat_completion(setup_test_user, test_user_id):
|
||||
has_errors = True
|
||||
if isinstance(chunk, StreamTextDelta):
|
||||
assistant_message += chunk.delta
|
||||
if isinstance(chunk, StreamFinish):
|
||||
has_ended = True
|
||||
|
||||
assert has_ended, "Chat completion did not end"
|
||||
# StreamFinish is published by mark_session_completed (processor layer),
|
||||
# not by the service. The generator completing means the stream ended.
|
||||
assert not has_errors, "Error occurred while streaming chat completion"
|
||||
assert assistant_message, "Assistant message is empty"
|
||||
|
||||
@@ -61,7 +54,6 @@ async def test_stream_chat_completion_with_tool_calls(setup_test_user, test_user
|
||||
session = await upsert_chat_session(session)
|
||||
|
||||
has_errors = False
|
||||
has_ended = False
|
||||
had_tool_calls = False
|
||||
async for chunk in chat_service.stream_chat_completion(
|
||||
session.session_id,
|
||||
@@ -71,13 +63,9 @@ async def test_stream_chat_completion_with_tool_calls(setup_test_user, test_user
|
||||
logger.info(chunk)
|
||||
if isinstance(chunk, StreamError):
|
||||
has_errors = True
|
||||
|
||||
if isinstance(chunk, StreamFinish):
|
||||
has_ended = True
|
||||
if isinstance(chunk, StreamToolOutputAvailable):
|
||||
had_tool_calls = True
|
||||
|
||||
assert has_ended, "Chat completion did not end"
|
||||
assert not has_errors, "Error occurred while streaming chat completion"
|
||||
assert had_tool_calls, "Tool calls did not occur"
|
||||
session = await get_chat_session(session.session_id)
|
||||
@@ -114,7 +102,6 @@ async def test_sdk_resume_multi_turn(setup_test_user, test_user_id):
|
||||
)
|
||||
turn1_text = ""
|
||||
turn1_errors: list[str] = []
|
||||
turn1_ended = False
|
||||
|
||||
async for chunk in sdk_service.stream_chat_completion_sdk(
|
||||
session.session_id,
|
||||
@@ -125,10 +112,7 @@ async def test_sdk_resume_multi_turn(setup_test_user, test_user_id):
|
||||
turn1_text += chunk.delta
|
||||
elif isinstance(chunk, StreamError):
|
||||
turn1_errors.append(chunk.errorText)
|
||||
elif isinstance(chunk, StreamFinish):
|
||||
turn1_ended = True
|
||||
|
||||
assert turn1_ended, "Turn 1 did not finish"
|
||||
assert not turn1_errors, f"Turn 1 errors: {turn1_errors}"
|
||||
assert turn1_text, "Turn 1 produced no text"
|
||||
|
||||
@@ -159,7 +143,6 @@ async def test_sdk_resume_multi_turn(setup_test_user, test_user_id):
|
||||
turn2_msg = "What was the special keyword I asked you to remember?"
|
||||
turn2_text = ""
|
||||
turn2_errors: list[str] = []
|
||||
turn2_ended = False
|
||||
|
||||
async for chunk in sdk_service.stream_chat_completion_sdk(
|
||||
session.session_id,
|
||||
@@ -171,10 +154,7 @@ async def test_sdk_resume_multi_turn(setup_test_user, test_user_id):
|
||||
turn2_text += chunk.delta
|
||||
elif isinstance(chunk, StreamError):
|
||||
turn2_errors.append(chunk.errorText)
|
||||
elif isinstance(chunk, StreamFinish):
|
||||
turn2_ended = True
|
||||
|
||||
assert turn2_ended, "Turn 2 did not finish"
|
||||
assert not turn2_errors, f"Turn 2 errors: {turn2_errors}"
|
||||
assert turn2_text, "Turn 2 produced no text"
|
||||
assert keyword in turn2_text, (
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
401
autogpt_platform/backend/backend/copilot/test_copilot_e2e.py
Normal file
401
autogpt_platform/backend/backend/copilot/test_copilot_e2e.py
Normal file
@@ -0,0 +1,401 @@
|
||||
"""End-to-end tests for Copilot streaming with dummy implementations.
|
||||
|
||||
These tests verify the complete copilot flow using dummy implementations
|
||||
for agent generator and SDK service, allowing automated testing without
|
||||
external LLM calls.
|
||||
|
||||
Enable test mode with COPILOT_TEST_MODE=true environment variable.
|
||||
|
||||
Note: StreamFinish is NOT emitted by the dummy service — it is published
|
||||
by mark_session_completed in the processor layer. These tests only cover
|
||||
the service-level streaming output (StreamStart + StreamTextDelta).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.copilot.model import ChatMessage, ChatSession, upsert_chat_session
|
||||
from backend.copilot.response_model import (
|
||||
StreamError,
|
||||
StreamHeartbeat,
|
||||
StreamStart,
|
||||
StreamTextDelta,
|
||||
)
|
||||
from backend.copilot.sdk.dummy import stream_chat_completion_dummy
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def enable_test_mode():
|
||||
"""Enable test mode for all tests in this module."""
|
||||
os.environ["COPILOT_TEST_MODE"] = "true"
|
||||
yield
|
||||
os.environ.pop("COPILOT_TEST_MODE", None)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dummy_streaming_basic_flow():
|
||||
"""Test that dummy streaming produces correct event sequence."""
|
||||
events = []
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-session-basic",
|
||||
message="Hello",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
events.append(event)
|
||||
|
||||
# Verify we got events
|
||||
assert len(events) > 0, "Should receive events"
|
||||
|
||||
# Verify StreamStart
|
||||
start_events = [e for e in events if isinstance(e, StreamStart)]
|
||||
assert len(start_events) == 1
|
||||
assert start_events[0].messageId
|
||||
assert start_events[0].sessionId
|
||||
|
||||
# Verify StreamTextDelta events
|
||||
text_events = [e for e in events if isinstance(e, StreamTextDelta)]
|
||||
assert len(text_events) > 0
|
||||
full_text = "".join(e.delta for e in text_events)
|
||||
assert len(full_text) > 0
|
||||
|
||||
# Verify order: start before text
|
||||
start_idx = events.index(start_events[0])
|
||||
first_text_idx = events.index(text_events[0]) if text_events else -1
|
||||
if first_text_idx >= 0:
|
||||
assert start_idx < first_text_idx
|
||||
|
||||
print(f"✅ Basic flow: {len(events)} events, {len(text_events)} text deltas")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_no_timeout():
|
||||
"""Test that streaming completes within reasonable time without timeout."""
|
||||
import time
|
||||
|
||||
start_time = time.monotonic()
|
||||
event_count = 0
|
||||
|
||||
async for _event in stream_chat_completion_dummy(
|
||||
session_id="test-session-timeout",
|
||||
message="count to 10",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
event_count += 1
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
|
||||
# Should complete in < 5 seconds (dummy has 0.1s delays between words)
|
||||
assert elapsed < 5.0, f"Streaming took {elapsed:.1f}s, expected < 5s"
|
||||
assert event_count > 0, "Should receive events"
|
||||
|
||||
print(f"✅ No timeout: completed in {elapsed:.2f}s with {event_count} events")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_event_types():
|
||||
"""Test that all expected event types are present."""
|
||||
event_types = set()
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-session-types",
|
||||
message="test",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
event_types.add(type(event).__name__)
|
||||
|
||||
# Required event types (StreamFinish is published by processor, not service)
|
||||
assert "StreamStart" in event_types, "Missing StreamStart"
|
||||
assert "StreamTextDelta" in event_types, "Missing StreamTextDelta"
|
||||
|
||||
print(f"✅ Event types: {sorted(event_types)}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_text_content():
|
||||
"""Test that streamed text is coherent and complete."""
|
||||
text_events = []
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-session-content",
|
||||
message="count to 3",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
if isinstance(event, StreamTextDelta):
|
||||
text_events.append(event)
|
||||
|
||||
# Verify text deltas
|
||||
assert len(text_events) > 0, "Should have text deltas"
|
||||
|
||||
# Reconstruct full text
|
||||
full_text = "".join(e.delta for e in text_events)
|
||||
assert len(full_text) > 0, "Text should not be empty"
|
||||
assert (
|
||||
"1" in full_text or "counted" in full_text.lower()
|
||||
), "Text should contain count"
|
||||
|
||||
# Verify all deltas have IDs
|
||||
for text_event in text_events:
|
||||
assert text_event.id, "Text delta must have ID"
|
||||
assert text_event.delta, "Text delta must have content"
|
||||
|
||||
print(f"✅ Text content: '{full_text}' ({len(text_events)} deltas)")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_heartbeat_timing():
|
||||
"""Test that heartbeats are sent at correct interval during long operations."""
|
||||
# This test would need a dummy that takes longer
|
||||
# For now, just verify heartbeat structure if we receive one
|
||||
heartbeats = []
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-session-heartbeat",
|
||||
message="test",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
if isinstance(event, StreamHeartbeat):
|
||||
heartbeats.append(event)
|
||||
|
||||
# Dummy is fast, so we might not get heartbeats
|
||||
# But if we do, verify they're valid
|
||||
if heartbeats:
|
||||
print(f"✅ Heartbeat structure verified ({len(heartbeats)} received)")
|
||||
else:
|
||||
print("✅ No heartbeats (dummy executes quickly)")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_error_handling():
|
||||
"""Test that errors are properly formatted and sent."""
|
||||
# This would require a dummy that can trigger errors
|
||||
# For now, just verify error event structure
|
||||
|
||||
error = StreamError(errorText="Test error", code="test_error")
|
||||
assert error.errorText == "Test error"
|
||||
assert error.code == "test_error"
|
||||
assert str(error.type.value) in ["error", "error"]
|
||||
|
||||
print("✅ Error structure verified")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_concurrent_sessions():
|
||||
"""Test that multiple sessions can stream concurrently."""
|
||||
|
||||
async def stream_session(session_id: str) -> int:
|
||||
count = 0
|
||||
async for _event in stream_chat_completion_dummy(
|
||||
session_id=session_id,
|
||||
message="test",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
count += 1
|
||||
return count
|
||||
|
||||
# Run 3 concurrent sessions
|
||||
results = await asyncio.gather(
|
||||
stream_session("session-1"),
|
||||
stream_session("session-2"),
|
||||
stream_session("session-3"),
|
||||
)
|
||||
|
||||
# All should complete successfully
|
||||
assert all(count > 0 for count in results), "All sessions should produce events"
|
||||
print(f"✅ Concurrent sessions: {results} events each")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.xfail(
|
||||
reason="Event loop isolation issue with DB operations in tests - needs fixture refactoring"
|
||||
)
|
||||
async def test_session_state_persistence():
|
||||
"""Test that session state is maintained across multiple messages."""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
session_id = f"test-session-{uuid4()}"
|
||||
user_id = "test-user"
|
||||
|
||||
# Create session with first message
|
||||
session = ChatSession(
|
||||
session_id=session_id,
|
||||
user_id=user_id,
|
||||
messages=[
|
||||
ChatMessage(role="user", content="Hello"),
|
||||
ChatMessage(role="assistant", content="Hi there!"),
|
||||
],
|
||||
usage=[],
|
||||
started_at=datetime.now(timezone.utc),
|
||||
updated_at=datetime.now(timezone.utc),
|
||||
)
|
||||
await upsert_chat_session(session)
|
||||
|
||||
# Stream second message
|
||||
events = []
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id=session_id,
|
||||
message="How are you?",
|
||||
is_user_message=True,
|
||||
user_id=user_id,
|
||||
session=session, # Pass existing session
|
||||
):
|
||||
events.append(event)
|
||||
|
||||
# Verify events were produced
|
||||
assert len(events) > 0, "Should produce events for second message"
|
||||
|
||||
print(f"✅ Session persistence: {len(events)} events for second message")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_message_deduplication():
|
||||
"""Test that duplicate messages are filtered out."""
|
||||
|
||||
# Simulate receiving duplicate events (e.g., from reconnection)
|
||||
events = []
|
||||
|
||||
# First stream
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-dedup-1",
|
||||
message="Hello",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
events.append(event)
|
||||
|
||||
# Count unique message IDs in StreamStart events
|
||||
start_events = [e for e in events if isinstance(e, StreamStart)]
|
||||
message_ids = [e.messageId for e in start_events]
|
||||
|
||||
# Verify all IDs are present
|
||||
assert len(message_ids) == len(set(message_ids)), "Message IDs should be unique"
|
||||
|
||||
print(f"✅ Deduplication: {len(events)} events, all unique")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_event_ordering():
|
||||
"""Test that events arrive in correct order."""
|
||||
events = []
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-ordering",
|
||||
message="Test",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
events.append(event)
|
||||
|
||||
# Find event indices
|
||||
start_idx = next(
|
||||
(i for i, e in enumerate(events) if isinstance(e, StreamStart)), None
|
||||
)
|
||||
text_indices = [i for i, e in enumerate(events) if isinstance(e, StreamTextDelta)]
|
||||
|
||||
# Verify ordering
|
||||
assert start_idx is not None, "Should have StreamStart"
|
||||
assert start_idx == 0, "StreamStart should be first"
|
||||
|
||||
if text_indices:
|
||||
assert all(
|
||||
start_idx < i for i in text_indices
|
||||
), "Text deltas should be after start"
|
||||
|
||||
print(f"✅ Event ordering: start({start_idx}) < text deltas")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stream_completeness():
|
||||
"""Test that stream includes all required event types."""
|
||||
events = []
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-completeness",
|
||||
message="Complete stream test",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
events.append(event)
|
||||
|
||||
# Check for required events (StreamFinish is published by processor)
|
||||
has_start = any(isinstance(e, StreamStart) for e in events)
|
||||
has_text = any(isinstance(e, StreamTextDelta) for e in events)
|
||||
|
||||
assert has_start, "Stream must include StreamStart"
|
||||
assert has_text, "Stream must include text deltas"
|
||||
|
||||
# Verify exactly one start
|
||||
start_count = sum(1 for e in events if isinstance(e, StreamStart))
|
||||
assert start_count == 1, f"Should have exactly 1 StreamStart, got {start_count}"
|
||||
|
||||
print(
|
||||
f"✅ Completeness: 1 start, {sum(1 for e in events if isinstance(e, StreamTextDelta))} text deltas"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_text_delta_consistency():
|
||||
"""Test that text deltas have consistent IDs and build coherent text."""
|
||||
text_events = []
|
||||
|
||||
async for event in stream_chat_completion_dummy(
|
||||
session_id="test-consistency",
|
||||
message="Test consistency",
|
||||
is_user_message=True,
|
||||
user_id="test-user",
|
||||
):
|
||||
if isinstance(event, StreamTextDelta):
|
||||
text_events.append(event)
|
||||
|
||||
# Verify all text deltas have IDs
|
||||
assert all(e.id for e in text_events), "All text deltas must have IDs"
|
||||
|
||||
# Verify all deltas have the same ID (same text block)
|
||||
if text_events:
|
||||
first_id = text_events[0].id
|
||||
assert all(
|
||||
e.id == first_id for e in text_events
|
||||
), "All text deltas should share the same block ID"
|
||||
|
||||
# Verify deltas build coherent text
|
||||
full_text = "".join(e.delta for e in text_events)
|
||||
assert len(full_text) > 0, "Deltas should build non-empty text"
|
||||
assert (
|
||||
full_text == full_text.strip()
|
||||
), "Text should not have leading/trailing whitespace artifacts"
|
||||
|
||||
print(
|
||||
f"✅ Consistency: {len(text_events)} deltas with ID '{text_events[0].id if text_events else 'N/A'}', text: '{full_text}'"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run tests directly
|
||||
|
||||
print("Running Copilot E2E tests with dummy implementations...")
|
||||
print("=" * 60)
|
||||
|
||||
asyncio.run(test_dummy_streaming_basic_flow())
|
||||
asyncio.run(test_streaming_no_timeout())
|
||||
asyncio.run(test_streaming_event_types())
|
||||
asyncio.run(test_streaming_text_content())
|
||||
asyncio.run(test_streaming_heartbeat_timing())
|
||||
asyncio.run(test_error_handling())
|
||||
asyncio.run(test_concurrent_sessions())
|
||||
asyncio.run(test_session_state_persistence())
|
||||
asyncio.run(test_message_deduplication())
|
||||
asyncio.run(test_event_ordering())
|
||||
asyncio.run(test_stream_completeness())
|
||||
asyncio.run(test_text_delta_consistency())
|
||||
|
||||
print("=" * 60)
|
||||
print("✅ All E2E tests passed!")
|
||||
@@ -10,7 +10,6 @@ from .add_understanding import AddUnderstandingTool
|
||||
from .agent_output import AgentOutputTool
|
||||
from .base import BaseTool
|
||||
from .bash_exec import BashExecTool
|
||||
from .check_operation_status import CheckOperationStatusTool
|
||||
from .create_agent import CreateAgentTool
|
||||
from .customize_agent import CustomizeAgentTool
|
||||
from .edit_agent import EditAgentTool
|
||||
@@ -47,7 +46,6 @@ TOOL_REGISTRY: dict[str, BaseTool] = {
|
||||
"run_agent": RunAgentTool(),
|
||||
"run_block": RunBlockTool(),
|
||||
"view_agent_output": AgentOutputTool(),
|
||||
"check_operation_status": CheckOperationStatusTool(),
|
||||
"search_docs": SearchDocsTool(),
|
||||
"get_doc_page": GetDocPageTool(),
|
||||
# Web fetch for safe URL retrieval
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
from os import getenv
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from prisma.types import ProfileCreateInput
|
||||
from pydantic import SecretStr
|
||||
|
||||
@@ -11,12 +13,34 @@ from backend.blocks.firecrawl.scrape import FirecrawlScrapeBlock
|
||||
from backend.blocks.io import AgentInputBlock, AgentOutputBlock
|
||||
from backend.blocks.llm import AITextGeneratorBlock
|
||||
from backend.copilot.model import ChatSession
|
||||
from backend.data import db as db_module
|
||||
from backend.data.db import prisma
|
||||
from backend.data.graph import Graph, Link, Node, create_graph
|
||||
from backend.data.model import APIKeyCredentials
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.integrations.credentials_store import IntegrationCredentialsStore
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def _ensure_db_connected() -> None:
|
||||
"""Ensure the Prisma connection is alive on the current event loop.
|
||||
|
||||
On Python 3.11, the httpx transport inside Prisma can reference a stale
|
||||
(closed) event loop when session-scoped async fixtures are evaluated long
|
||||
after the initial ``server`` fixture connected Prisma. A cheap health-check
|
||||
followed by a reconnect fixes this without affecting other fixtures.
|
||||
"""
|
||||
try:
|
||||
await prisma.query_raw("SELECT 1")
|
||||
except Exception:
|
||||
_logger.info("Prisma connection stale – reconnecting")
|
||||
try:
|
||||
await db_module.disconnect()
|
||||
except Exception:
|
||||
pass
|
||||
await db_module.connect()
|
||||
|
||||
|
||||
def make_session(user_id: str):
|
||||
return ChatSession(
|
||||
@@ -31,15 +55,19 @@ def make_session(user_id: str):
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def setup_test_data():
|
||||
@pytest_asyncio.fixture(scope="session", loop_scope="session")
|
||||
async def setup_test_data(server):
|
||||
"""
|
||||
Set up test data for run_agent tests:
|
||||
1. Create a test user
|
||||
2. Create a test graph (agent input -> agent output)
|
||||
3. Create a store listing and store listing version
|
||||
4. Approve the store listing version
|
||||
|
||||
Depends on ``server`` to ensure Prisma is connected.
|
||||
"""
|
||||
await _ensure_db_connected()
|
||||
|
||||
# 1. Create a test user
|
||||
user_data = {
|
||||
"sub": f"test-user-{uuid.uuid4()}",
|
||||
@@ -150,15 +178,19 @@ async def setup_test_data():
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def setup_llm_test_data():
|
||||
@pytest_asyncio.fixture(scope="session", loop_scope="session")
|
||||
async def setup_llm_test_data(server):
|
||||
"""
|
||||
Set up test data for LLM agent tests:
|
||||
1. Create a test user
|
||||
2. Create test OpenAI credentials for the user
|
||||
3. Create a test graph with input -> LLM block -> output
|
||||
4. Create and approve a store listing
|
||||
|
||||
Depends on ``server`` to ensure Prisma is connected.
|
||||
"""
|
||||
await _ensure_db_connected()
|
||||
|
||||
key = getenv("OPENAI_API_KEY")
|
||||
if not key:
|
||||
return pytest.skip("OPENAI_API_KEY is not set")
|
||||
@@ -315,14 +347,18 @@ async def setup_llm_test_data():
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def setup_firecrawl_test_data():
|
||||
@pytest_asyncio.fixture(scope="session", loop_scope="session")
|
||||
async def setup_firecrawl_test_data(server):
|
||||
"""
|
||||
Set up test data for Firecrawl agent tests (missing credentials scenario):
|
||||
1. Create a test user (WITHOUT Firecrawl credentials)
|
||||
2. Create a test graph with input -> Firecrawl block -> output
|
||||
3. Create and approve a store listing
|
||||
|
||||
Depends on ``server`` to ensure Prisma is connected.
|
||||
"""
|
||||
await _ensure_db_connected()
|
||||
|
||||
# 1. Create a test user
|
||||
user_data = {
|
||||
"sub": f"test-user-{uuid.uuid4()}",
|
||||
|
||||
@@ -19,6 +19,7 @@ from .core import (
|
||||
get_all_relevant_agents_for_generation,
|
||||
get_library_agent_by_graph_id,
|
||||
get_library_agent_by_id,
|
||||
get_library_agents_by_ids,
|
||||
get_library_agents_for_generation,
|
||||
graph_to_json,
|
||||
json_to_graph,
|
||||
@@ -49,6 +50,7 @@ __all__ = [
|
||||
"get_all_relevant_agents_for_generation",
|
||||
"get_library_agent_by_graph_id",
|
||||
"get_library_agent_by_id",
|
||||
"get_library_agents_by_ids",
|
||||
"get_library_agents_for_generation",
|
||||
"get_user_message_for_error",
|
||||
"graph_to_json",
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, NotRequired, TypedDict
|
||||
|
||||
from backend.data.db_accessors import graph_db, library_db, store_db
|
||||
@@ -78,7 +79,7 @@ AgentSummary = LibraryAgentSummary | MarketplaceAgentSummary | dict[str, Any]
|
||||
|
||||
|
||||
def _to_dict_list(
|
||||
agents: list[AgentSummary] | list[dict[str, Any]] | None,
|
||||
agents: Sequence[AgentSummary] | Sequence[dict[str, Any]] | None,
|
||||
) -> list[dict[str, Any]] | None:
|
||||
"""Convert typed agent summaries to plain dicts for external service calls."""
|
||||
if agents is None:
|
||||
@@ -190,6 +191,36 @@ async def get_library_agent_by_id(
|
||||
get_library_agent_by_graph_id = get_library_agent_by_id
|
||||
|
||||
|
||||
async def get_library_agents_by_ids(
|
||||
user_id: str,
|
||||
agent_ids: list[str],
|
||||
) -> list[LibraryAgentSummary]:
|
||||
"""Fetch multiple library agents by their IDs.
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
agent_ids: List of agent IDs (can be graph_ids or library agent IDs)
|
||||
|
||||
Returns:
|
||||
List of LibraryAgentSummary for found agents (silently skips not found)
|
||||
"""
|
||||
agents: list[LibraryAgentSummary] = []
|
||||
for agent_id in agent_ids:
|
||||
try:
|
||||
agent = await get_library_agent_by_id(user_id, agent_id)
|
||||
if agent:
|
||||
agents.append(agent)
|
||||
logger.debug(f"Fetched library agent by ID: {agent['name']}")
|
||||
else:
|
||||
logger.warning(f"Library agent not found for ID: {agent_id}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agent {agent_id}: {e}")
|
||||
continue
|
||||
|
||||
logger.info(f"Fetched {len(agents)}/{len(agent_ids)} library agents by ID")
|
||||
return agents
|
||||
|
||||
|
||||
async def get_library_agents_for_generation(
|
||||
user_id: str,
|
||||
search_query: str | None = None,
|
||||
@@ -214,10 +245,17 @@ async def get_library_agents_for_generation(
|
||||
Returns:
|
||||
List of LibraryAgentSummary with schemas and recent executions for sub-agent composition
|
||||
"""
|
||||
search_term = search_query.strip() if search_query else None
|
||||
if search_term and len(search_term) > 100:
|
||||
raise ValueError(
|
||||
f"Search query is too long ({len(search_term)} chars, max 100). "
|
||||
f"Please use a shorter, more specific search term."
|
||||
)
|
||||
|
||||
try:
|
||||
response = await library_db().list_library_agents(
|
||||
user_id=user_id,
|
||||
search_term=search_query,
|
||||
search_term=search_term,
|
||||
page=1,
|
||||
page_size=max_results,
|
||||
include_executions=True,
|
||||
@@ -271,9 +309,16 @@ async def search_marketplace_agents_for_generation(
|
||||
Returns:
|
||||
List of LibraryAgentSummary with full input/output schemas
|
||||
"""
|
||||
search_term = search_query.strip()
|
||||
if len(search_term) > 100:
|
||||
raise ValueError(
|
||||
f"Search query is too long ({len(search_term)} chars, max 100). "
|
||||
f"Please use a shorter, more specific search term."
|
||||
)
|
||||
|
||||
try:
|
||||
response = await store_db().get_store_agents(
|
||||
search_query=search_query,
|
||||
search_query=search_term,
|
||||
page=1,
|
||||
page_size=max_results,
|
||||
)
|
||||
@@ -424,7 +469,7 @@ def extract_search_terms_from_steps(
|
||||
async def enrich_library_agents_from_steps(
|
||||
user_id: str,
|
||||
decomposition_result: DecompositionResult | dict[str, Any],
|
||||
existing_agents: list[AgentSummary] | list[dict[str, Any]],
|
||||
existing_agents: Sequence[AgentSummary] | Sequence[dict[str, Any]],
|
||||
exclude_graph_id: str | None = None,
|
||||
include_marketplace: bool = True,
|
||||
max_additional_results: int = 10,
|
||||
@@ -448,7 +493,7 @@ async def enrich_library_agents_from_steps(
|
||||
search_terms = extract_search_terms_from_steps(decomposition_result)
|
||||
|
||||
if not search_terms:
|
||||
return existing_agents
|
||||
return list(existing_agents)
|
||||
|
||||
existing_ids: set[str] = set()
|
||||
existing_names: set[str] = set()
|
||||
@@ -511,7 +556,7 @@ async def enrich_library_agents_from_steps(
|
||||
async def decompose_goal(
|
||||
description: str,
|
||||
context: str = "",
|
||||
library_agents: list[AgentSummary] | None = None,
|
||||
library_agents: Sequence[AgentSummary] | None = None,
|
||||
) -> DecompositionResult | None:
|
||||
"""Break down a goal into steps or return clarifying questions.
|
||||
|
||||
@@ -539,7 +584,7 @@ async def decompose_goal(
|
||||
|
||||
async def generate_agent(
|
||||
instructions: DecompositionResult | dict[str, Any],
|
||||
library_agents: list[AgentSummary] | list[dict[str, Any]] | None = None,
|
||||
library_agents: Sequence[AgentSummary] | Sequence[dict[str, Any]] | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Generate agent JSON from instructions.
|
||||
|
||||
@@ -748,7 +793,7 @@ async def get_agent_as_json(
|
||||
async def generate_agent_patch(
|
||||
update_request: str,
|
||||
current_agent: dict[str, Any],
|
||||
library_agents: list[AgentSummary] | None = None,
|
||||
library_agents: Sequence[AgentSummary] | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Update an existing agent using natural language.
|
||||
|
||||
|
||||
@@ -101,9 +101,16 @@ async def decompose_goal_dummy(
|
||||
async def generate_agent_dummy(
|
||||
instructions: dict[str, Any],
|
||||
library_agents: list[dict[str, Any]] | None = None,
|
||||
operation_id: str | None = None,
|
||||
session_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Return dummy agent JSON after a simulated delay."""
|
||||
logger.info("Using dummy agent generator for generate_agent (30s delay)")
|
||||
"""Return dummy agent synchronously (blocks for 30s, returns agent JSON).
|
||||
|
||||
Note: operation_id and session_id parameters are ignored - we always use synchronous mode.
|
||||
"""
|
||||
logger.info(
|
||||
"Using dummy agent generator (sync mode): returning agent JSON after 30s"
|
||||
)
|
||||
await asyncio.sleep(30)
|
||||
return _generate_dummy_agent_json()
|
||||
|
||||
@@ -112,9 +119,17 @@ async def generate_agent_patch_dummy(
|
||||
update_request: str,
|
||||
current_agent: dict[str, Any],
|
||||
library_agents: list[dict[str, Any]] | None = None,
|
||||
operation_id: str | None = None,
|
||||
session_id: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Return dummy patched agent (returns the current agent with updated description)."""
|
||||
logger.info("Using dummy agent generator for generate_agent_patch")
|
||||
"""Return dummy patched agent synchronously (blocks for 30s, returns patched agent JSON).
|
||||
|
||||
Note: operation_id and session_id parameters are ignored - we always use synchronous mode.
|
||||
"""
|
||||
logger.info(
|
||||
"Using dummy agent generator patch (sync mode): returning patched agent after 30s"
|
||||
)
|
||||
await asyncio.sleep(30)
|
||||
patched = current_agent.copy()
|
||||
patched["description"] = (
|
||||
f"{current_agent.get('description', '')} (updated: {update_request})"
|
||||
|
||||
@@ -302,9 +302,11 @@ async def generate_agent_patch_external(
|
||||
update_request: Natural language description of changes
|
||||
current_agent: Current agent JSON
|
||||
library_agents: User's library agents available for sub-agent composition
|
||||
operation_id: Operation ID for async processing (enables Redis Streams callback)
|
||||
session_id: Session ID for async processing (enables Redis Streams callback)
|
||||
|
||||
Returns:
|
||||
Updated agent JSON, clarifying questions dict, or error dict on error
|
||||
Updated agent JSON, clarifying questions dict, {"status": "accepted"} for async, or error dict on error
|
||||
"""
|
||||
if _is_dummy_mode():
|
||||
return await generate_agent_patch_dummy(
|
||||
@@ -377,6 +379,8 @@ async def customize_template_external(
|
||||
template_agent: The template agent JSON to customize
|
||||
modification_request: Natural language description of customizations
|
||||
context: Additional context (e.g., answers to previous questions)
|
||||
operation_id: Operation ID for async processing (enables Redis Streams callback)
|
||||
session_id: Session ID for async processing (enables Redis Streams callback)
|
||||
|
||||
Returns:
|
||||
Customized agent JSON, clarifying questions dict, or error dict on error
|
||||
|
||||
@@ -5,7 +5,7 @@ import re
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from backend.api.features.library.model import LibraryAgent
|
||||
from backend.copilot.model import ChatSession
|
||||
@@ -13,6 +13,7 @@ from backend.data.db_accessors import execution_db, library_db
|
||||
from backend.data.execution import ExecutionStatus, GraphExecution, GraphExecutionMeta
|
||||
|
||||
from .base import BaseTool
|
||||
from .execution_utils import TERMINAL_STATUSES, wait_for_execution
|
||||
from .models import (
|
||||
AgentOutputResponse,
|
||||
ErrorResponse,
|
||||
@@ -33,6 +34,7 @@ class AgentOutputInput(BaseModel):
|
||||
store_slug: str = ""
|
||||
execution_id: str = ""
|
||||
run_time: str = "latest"
|
||||
wait_if_running: int = Field(default=0, ge=0, le=300)
|
||||
|
||||
@field_validator(
|
||||
"agent_name",
|
||||
@@ -116,6 +118,11 @@ class AgentOutputTool(BaseTool):
|
||||
Select which run to retrieve using:
|
||||
- execution_id: Specific execution ID
|
||||
- run_time: 'latest' (default), 'yesterday', 'last week', or ISO date 'YYYY-MM-DD'
|
||||
|
||||
Wait for completion (optional):
|
||||
- wait_if_running: Max seconds to wait if execution is still running (0-300).
|
||||
If the execution is running/queued, waits up to this many seconds for completion.
|
||||
Returns current status on timeout. If already finished, returns immediately.
|
||||
"""
|
||||
|
||||
@property
|
||||
@@ -145,6 +152,13 @@ class AgentOutputTool(BaseTool):
|
||||
"Time filter: 'latest', 'yesterday', 'last week', or 'YYYY-MM-DD'"
|
||||
),
|
||||
},
|
||||
"wait_if_running": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait if execution is still running (0-300). "
|
||||
"If running, waits for completion. Returns current state on timeout."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
@@ -224,10 +238,14 @@ class AgentOutputTool(BaseTool):
|
||||
execution_id: str | None,
|
||||
time_start: datetime | None,
|
||||
time_end: datetime | None,
|
||||
include_running: bool = False,
|
||||
) -> tuple[GraphExecution | None, list[GraphExecutionMeta], str | None]:
|
||||
"""
|
||||
Fetch execution(s) based on filters.
|
||||
Returns (single_execution, available_executions_meta, error_message).
|
||||
|
||||
Args:
|
||||
include_running: If True, also look for running/queued executions (for waiting)
|
||||
"""
|
||||
exec_db = execution_db()
|
||||
|
||||
@@ -242,11 +260,25 @@ class AgentOutputTool(BaseTool):
|
||||
return None, [], f"Execution '{execution_id}' not found"
|
||||
return execution, [], None
|
||||
|
||||
# Get completed executions with time filters
|
||||
# Determine which statuses to query
|
||||
statuses = [ExecutionStatus.COMPLETED]
|
||||
if include_running:
|
||||
statuses.extend(
|
||||
[
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
ExecutionStatus.REVIEW,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
]
|
||||
)
|
||||
|
||||
# Get executions with time filters
|
||||
executions = await exec_db.get_graph_executions(
|
||||
graph_id=graph_id,
|
||||
user_id=user_id,
|
||||
statuses=[ExecutionStatus.COMPLETED],
|
||||
statuses=statuses,
|
||||
created_time_gte=time_start,
|
||||
created_time_lte=time_end,
|
||||
limit=10,
|
||||
@@ -313,10 +345,33 @@ class AgentOutputTool(BaseTool):
|
||||
for e in available_executions[:5]
|
||||
]
|
||||
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
# Build appropriate message based on execution status
|
||||
if execution.status == ExecutionStatus.COMPLETED:
|
||||
message = f"Found execution outputs for agent '{agent.name}'"
|
||||
elif execution.status == ExecutionStatus.FAILED:
|
||||
message = f"Execution for agent '{agent.name}' failed"
|
||||
elif execution.status == ExecutionStatus.TERMINATED:
|
||||
message = f"Execution for agent '{agent.name}' was terminated"
|
||||
elif execution.status == ExecutionStatus.REVIEW:
|
||||
message = (
|
||||
f"Execution for agent '{agent.name}' is awaiting human review. "
|
||||
"The user needs to approve it before it can continue."
|
||||
)
|
||||
elif execution.status in (
|
||||
ExecutionStatus.RUNNING,
|
||||
ExecutionStatus.QUEUED,
|
||||
ExecutionStatus.INCOMPLETE,
|
||||
):
|
||||
message = (
|
||||
f"Execution for agent '{agent.name}' is still {execution.status.value}. "
|
||||
"Results may be incomplete. Use wait_if_running to wait for completion."
|
||||
)
|
||||
else:
|
||||
message = f"Found execution for agent '{agent.name}' (status: {execution.status.value})"
|
||||
|
||||
if len(available_executions) > 1:
|
||||
message += (
|
||||
f". Showing latest of {len(available_executions)} matching executions."
|
||||
f" Showing latest of {len(available_executions)} matching executions."
|
||||
)
|
||||
|
||||
return AgentOutputResponse(
|
||||
@@ -431,13 +486,17 @@ class AgentOutputTool(BaseTool):
|
||||
# Parse time expression
|
||||
time_start, time_end = parse_time_expression(input_data.run_time)
|
||||
|
||||
# Fetch execution(s)
|
||||
# Check if we should wait for running executions
|
||||
wait_timeout = input_data.wait_if_running
|
||||
|
||||
# Fetch execution(s) - include running if we're going to wait
|
||||
execution, available_executions, exec_error = await self._get_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=input_data.execution_id or None,
|
||||
time_start=time_start,
|
||||
time_end=time_end,
|
||||
include_running=wait_timeout > 0,
|
||||
)
|
||||
|
||||
if exec_error:
|
||||
@@ -446,4 +505,17 @@ class AgentOutputTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# If we have an execution that's still running and we should wait
|
||||
if execution and wait_timeout > 0 and execution.status not in TERMINAL_STATUSES:
|
||||
logger.info(
|
||||
f"Execution {execution.id} is {execution.status}, "
|
||||
f"waiting up to {wait_timeout}s for completion"
|
||||
)
|
||||
execution = await wait_for_execution(
|
||||
user_id=user_id,
|
||||
graph_id=agent.graph_id,
|
||||
execution_id=execution.id,
|
||||
timeout_seconds=wait_timeout,
|
||||
)
|
||||
|
||||
return self._build_response(agent, execution, available_executions, session_id)
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
"""Shared agent search functionality for find_agent and find_library_agent tools."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Literal
|
||||
from typing import TYPE_CHECKING, Literal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from backend.api.features.library.model import LibraryAgent
|
||||
|
||||
from backend.data.db_accessors import library_db, store_db
|
||||
from backend.util.exceptions import DatabaseError, NotFoundError
|
||||
@@ -24,94 +29,24 @@ _UUID_PATTERN = re.compile(
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def _is_uuid(text: str) -> bool:
|
||||
"""Check if text is a valid UUID v4."""
|
||||
return bool(_UUID_PATTERN.match(text.strip()))
|
||||
|
||||
|
||||
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
|
||||
"""Fetch a library agent by ID (library agent ID or graph_id).
|
||||
|
||||
Tries multiple lookup strategies:
|
||||
1. First by graph_id (AgentGraph primary key)
|
||||
2. Then by library agent ID (LibraryAgent primary key)
|
||||
|
||||
Args:
|
||||
user_id: The user ID
|
||||
agent_id: The ID to look up (can be graph_id or library agent ID)
|
||||
|
||||
Returns:
|
||||
AgentInfo if found, None otherwise
|
||||
"""
|
||||
lib_db = library_db()
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent_by_graph_id(user_id, agent_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by graph_id: {agent.name}")
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by graph_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent(agent_id, user_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by library_id: {agent.name}")
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by library_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by library_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
# Keywords that should be treated as "list all" rather than a literal search
|
||||
_LIST_ALL_KEYWORDS = frozenset({"all", "*", "everything", "any", ""})
|
||||
|
||||
|
||||
async def search_agents(
|
||||
query: str,
|
||||
source: SearchSource,
|
||||
session_id: str | None,
|
||||
session_id: str | None = None,
|
||||
user_id: str | None = None,
|
||||
) -> ToolResponseBase:
|
||||
"""
|
||||
Search for agents in marketplace or user library.
|
||||
|
||||
For library searches, keywords like "all", "*", "everything", or an empty
|
||||
query will list all agents without filtering.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
query: Search query string. Special keywords list all library agents.
|
||||
source: "marketplace" or "library"
|
||||
session_id: Chat session ID
|
||||
user_id: User ID (required for library search)
|
||||
@@ -119,7 +54,11 @@ async def search_agents(
|
||||
Returns:
|
||||
AgentsFoundResponse, NoResultsResponse, or ErrorResponse
|
||||
"""
|
||||
if not query:
|
||||
# Normalize list-all keywords to empty string for library searches
|
||||
if source == "library" and query.lower().strip() in _LIST_ALL_KEYWORDS:
|
||||
query = ""
|
||||
|
||||
if source == "marketplace" and not query:
|
||||
return ErrorResponse(
|
||||
message="Please provide a search query", session_id=session_id
|
||||
)
|
||||
@@ -159,28 +98,18 @@ async def search_agents(
|
||||
logger.info(f"Found agent by direct ID lookup: {agent.name}")
|
||||
|
||||
if not agents:
|
||||
logger.info(f"Searching user library for: {query}")
|
||||
search_term = query or None
|
||||
logger.info(
|
||||
f"{'Listing all agents in' if not query else 'Searching'} "
|
||||
f"user library{'' if not query else f' for: {query}'}"
|
||||
)
|
||||
results = await library_db().list_library_agents(
|
||||
user_id=user_id, # type: ignore[arg-type]
|
||||
search_term=query,
|
||||
page_size=10,
|
||||
search_term=search_term,
|
||||
page_size=50 if not query else 10,
|
||||
)
|
||||
for agent in results.agents:
|
||||
agents.append(
|
||||
AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
)
|
||||
agents.append(_library_agent_to_info(agent))
|
||||
logger.info(f"Found {len(agents)} agents in {source}")
|
||||
except NotFoundError:
|
||||
pass
|
||||
@@ -193,42 +122,62 @@ async def search_agents(
|
||||
)
|
||||
|
||||
if not agents:
|
||||
suggestions = (
|
||||
[
|
||||
if source == "marketplace":
|
||||
suggestions = [
|
||||
"Try more general terms",
|
||||
"Browse categories in the marketplace",
|
||||
"Check spelling",
|
||||
]
|
||||
if source == "marketplace"
|
||||
else [
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Let the user know they can "
|
||||
"try different keywords or browse the marketplace. Also let them "
|
||||
"know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
elif not query:
|
||||
# User asked to list all but library is empty
|
||||
suggestions = [
|
||||
"Browse the marketplace to find and add agents",
|
||||
"Use find_agent to search the marketplace",
|
||||
]
|
||||
no_results_msg = (
|
||||
"Your library is empty. Let the user know they can browse the "
|
||||
"marketplace to find agents, or you can create a custom agent "
|
||||
"for them based on their needs."
|
||||
)
|
||||
else:
|
||||
suggestions = [
|
||||
"Try different keywords",
|
||||
"Use find_agent to search the marketplace",
|
||||
"Check your library at /library",
|
||||
]
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents found matching '{query}'. Let the user know they can try different keywords or browse the marketplace. Also let them know you can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else f"No agents matching '{query}' found in your library. Let the user know you can create a custom agent for them based on their needs."
|
||||
)
|
||||
no_results_msg = (
|
||||
f"No agents matching '{query}' found in your library. Let the "
|
||||
"user know you can create a custom agent for them based on "
|
||||
"their needs."
|
||||
)
|
||||
return NoResultsResponse(
|
||||
message=no_results_msg, session_id=session_id, suggestions=suggestions
|
||||
)
|
||||
|
||||
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} "
|
||||
title += (
|
||||
f"for '{query}'"
|
||||
if source == "marketplace"
|
||||
else f"in your library for '{query}'"
|
||||
)
|
||||
if source == "marketplace":
|
||||
title = (
|
||||
f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} for '{query}'"
|
||||
)
|
||||
elif not query:
|
||||
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} in your library"
|
||||
else:
|
||||
title = f"Found {len(agents)} agent{'s' if len(agents) != 1 else ''} in your library for '{query}'"
|
||||
|
||||
message = (
|
||||
"Now you have found some options for the user to choose from. "
|
||||
"You can add a link to a recommended agent at: /marketplace/agent/agent_id "
|
||||
"Please ask the user if they would like to use any of these agents. Let the user know we can create a custom agent for them based on their needs."
|
||||
"Please ask the user if they would like to use any of these agents. "
|
||||
"Let the user know we can create a custom agent for them based on their needs."
|
||||
if source == "marketplace"
|
||||
else "Found agents in the user's library. You can provide a link to view an agent at: "
|
||||
"/library/agents/{agent_id}. Use agent_output to get execution results, or run_agent to execute. Let the user know we can create a custom agent for them based on their needs."
|
||||
else "Found agents in the user's library. You can provide a link to view "
|
||||
"an agent at: /library/agents/{agent_id}. Use agent_output to get "
|
||||
"execution results, or run_agent to execute. Let the user know we can "
|
||||
"create a custom agent for them based on their needs."
|
||||
)
|
||||
|
||||
return AgentsFoundResponse(
|
||||
@@ -238,3 +187,67 @@ async def search_agents(
|
||||
count=len(agents),
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
|
||||
def _is_uuid(text: str) -> bool:
|
||||
"""Check if text is a valid UUID v4."""
|
||||
return bool(_UUID_PATTERN.match(text.strip()))
|
||||
|
||||
|
||||
def _library_agent_to_info(agent: LibraryAgent) -> AgentInfo:
|
||||
"""Convert a library agent model to an AgentInfo."""
|
||||
return AgentInfo(
|
||||
id=agent.id,
|
||||
name=agent.name,
|
||||
description=agent.description or "",
|
||||
source="library",
|
||||
in_library=True,
|
||||
creator=agent.creator_name,
|
||||
status=agent.status.value,
|
||||
can_access_graph=agent.can_access_graph,
|
||||
has_external_trigger=agent.has_external_trigger,
|
||||
new_output=agent.new_output,
|
||||
graph_id=agent.graph_id,
|
||||
)
|
||||
|
||||
|
||||
async def _get_library_agent_by_id(user_id: str, agent_id: str) -> AgentInfo | None:
|
||||
"""Fetch a library agent by ID (library agent ID or graph_id).
|
||||
|
||||
Tries multiple lookup strategies:
|
||||
1. First by graph_id (AgentGraph primary key)
|
||||
2. Then by library agent ID (LibraryAgent primary key)
|
||||
"""
|
||||
lib_db = library_db()
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent_by_graph_id(user_id, agent_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by graph_id: {agent.name}")
|
||||
return _library_agent_to_info(agent)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by graph_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by graph_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
try:
|
||||
agent = await lib_db.get_library_agent(agent_id, user_id)
|
||||
if agent:
|
||||
logger.debug(f"Found library agent by library_id: {agent.name}")
|
||||
return _library_agent_to_info(agent)
|
||||
except NotFoundError:
|
||||
logger.debug(f"Library agent not found by library_id: {agent_id}")
|
||||
except DatabaseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not fetch library agent by library_id {agent_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
@@ -36,11 +36,6 @@ class BaseTool:
|
||||
"""Whether this tool requires authentication."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_long_running(self) -> bool:
|
||||
"""Whether this tool takes a long time to execute (triggers long-running UI)."""
|
||||
return False
|
||||
|
||||
def as_openai_tool(self) -> ChatCompletionToolParam:
|
||||
"""Convert to OpenAI tool format."""
|
||||
return ChatCompletionToolParam(
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
"""CheckOperationStatusTool — query the status of a long-running operation."""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.copilot.model import ChatSession
|
||||
|
||||
from .base import BaseTool
|
||||
from .models import ErrorResponse, ResponseType, ToolResponseBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OperationStatusResponse(ToolResponseBase):
|
||||
"""Response for check_operation_status tool."""
|
||||
|
||||
type: ResponseType = ResponseType.OPERATION_STATUS
|
||||
task_id: str
|
||||
operation_id: str
|
||||
status: str # "running", "completed", "failed"
|
||||
tool_name: str | None = None
|
||||
message: str = ""
|
||||
|
||||
|
||||
class CheckOperationStatusTool(BaseTool):
|
||||
"""Check the status of a long-running operation (create_agent, edit_agent, etc.).
|
||||
|
||||
The CoPilot uses this tool to report back to the user whether an
|
||||
operation that was started earlier has completed, failed, or is still
|
||||
running.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return "check_operation_status"
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Check the current status of a long-running operation such as "
|
||||
"create_agent or edit_agent. Accepts either an operation_id or "
|
||||
"task_id from a previous operation_started response. "
|
||||
"Returns the current status: running, completed, or failed."
|
||||
)
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"operation_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The operation_id from an operation_started response."
|
||||
),
|
||||
},
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The task_id from an operation_started response. "
|
||||
"Used as fallback if operation_id is not provided."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return False
|
||||
|
||||
async def _execute(
|
||||
self,
|
||||
user_id: str | None,
|
||||
session: ChatSession,
|
||||
**kwargs,
|
||||
) -> ToolResponseBase:
|
||||
from backend.copilot import stream_registry
|
||||
|
||||
operation_id = (kwargs.get("operation_id") or "").strip()
|
||||
task_id = (kwargs.get("task_id") or "").strip()
|
||||
|
||||
if not operation_id and not task_id:
|
||||
return ErrorResponse(
|
||||
message="Please provide an operation_id or task_id.",
|
||||
error="missing_parameter",
|
||||
)
|
||||
|
||||
task = None
|
||||
if operation_id:
|
||||
task = await stream_registry.find_task_by_operation_id(operation_id)
|
||||
if task is None and task_id:
|
||||
task = await stream_registry.get_task(task_id)
|
||||
|
||||
if task is None:
|
||||
# Task not in Redis — it may have already expired (TTL).
|
||||
# Check conversation history for the result instead.
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Operation not found — it may have already completed and "
|
||||
"expired from the status tracker. Check the conversation "
|
||||
"history for the result."
|
||||
),
|
||||
error="not_found",
|
||||
)
|
||||
|
||||
status_messages = {
|
||||
"running": (
|
||||
f"The {task.tool_name or 'operation'} is still running. "
|
||||
"Please wait for it to complete."
|
||||
),
|
||||
"completed": (
|
||||
f"The {task.tool_name or 'operation'} has completed successfully."
|
||||
),
|
||||
"failed": f"The {task.tool_name or 'operation'} has failed.",
|
||||
}
|
||||
|
||||
return OperationStatusResponse(
|
||||
task_id=task.task_id,
|
||||
operation_id=task.operation_id,
|
||||
status=task.status,
|
||||
tool_name=task.tool_name,
|
||||
message=status_messages.get(task.status, f"Status: {task.status}"),
|
||||
)
|
||||
@@ -10,7 +10,6 @@ from .agent_generator import (
|
||||
decompose_goal,
|
||||
enrich_library_agents_from_steps,
|
||||
generate_agent,
|
||||
get_all_relevant_agents_for_generation,
|
||||
get_user_message_for_error,
|
||||
save_agent_to_library,
|
||||
)
|
||||
@@ -39,18 +38,16 @@ class CreateAgentTool(BaseTool):
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Create a new agent workflow from a natural language description. "
|
||||
"First generates a preview, then saves to library if save=true."
|
||||
"First generates a preview, then saves to library if save=true. "
|
||||
"\n\nIMPORTANT: Before calling this tool, search for relevant existing agents "
|
||||
"using find_library_agent that could be used as building blocks. "
|
||||
"Pass their IDs in the library_agent_ids parameter so the generator can compose them."
|
||||
)
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_long_running(self) -> bool:
|
||||
"""Agent generation takes several minutes."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
@@ -70,6 +67,15 @@ class CreateAgentTool(BaseTool):
|
||||
"Include any preferences or constraints mentioned by the user."
|
||||
),
|
||||
},
|
||||
"library_agent_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"List of library agent IDs to use as building blocks. "
|
||||
"Search for relevant agents using find_library_agent first, "
|
||||
"then pass their IDs here so they can be composed into the new agent."
|
||||
),
|
||||
},
|
||||
"save": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
@@ -97,9 +103,15 @@ class CreateAgentTool(BaseTool):
|
||||
"""
|
||||
description = kwargs.get("description", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
library_agent_ids = kwargs.get("library_agent_ids", [])
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] START - description_len={len(description)}, "
|
||||
f"library_agent_ids={library_agent_ids}, save={save}, user_id={user_id}, session_id={session_id}"
|
||||
)
|
||||
|
||||
if not description:
|
||||
return ErrorResponse(
|
||||
message="Please provide a description of what the agent should do.",
|
||||
@@ -107,25 +119,34 @@ class CreateAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Fetch library agents by IDs if provided
|
||||
library_agents = None
|
||||
if user_id:
|
||||
if user_id and library_agent_ids:
|
||||
try:
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
from .agent_generator import get_library_agents_by_ids
|
||||
|
||||
library_agents = await get_library_agents_by_ids(
|
||||
user_id=user_id,
|
||||
search_query=description,
|
||||
include_marketplace=True,
|
||||
agent_ids=library_agent_ids,
|
||||
)
|
||||
logger.debug(
|
||||
f"Found {len(library_agents)} relevant agents for sub-agent composition"
|
||||
f"Fetched {len(library_agents)} library agents by ID for sub-agent composition"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
logger.warning(f"Failed to fetch library agents by IDs: {e}")
|
||||
|
||||
try:
|
||||
decomposition_result = await decompose_goal(
|
||||
description, context, library_agents
|
||||
)
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] DECOMPOSE - type={decomposition_result.get('type') if decomposition_result else None}, "
|
||||
f"session_id={session_id}"
|
||||
)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
logger.error(
|
||||
f"[AGENT_CREATE_DEBUG] ERROR - AgentGeneratorNotConfigured, session_id={session_id}"
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Agent generation is not available. "
|
||||
@@ -227,7 +248,16 @@ class CreateAgentTool(BaseTool):
|
||||
decomposition_result,
|
||||
library_agents,
|
||||
)
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] GENERATE - "
|
||||
f"success={agent_json is not None}, "
|
||||
f"is_error={isinstance(agent_json, dict) and agent_json.get('type') == 'error'}, "
|
||||
f"session_id={session_id}"
|
||||
)
|
||||
except AgentGeneratorNotConfiguredError:
|
||||
logger.error(
|
||||
f"[AGENT_CREATE_DEBUG] ERROR - AgentGeneratorNotConfigured during generation, session_id={session_id}"
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
"Agent generation is not available. "
|
||||
@@ -275,7 +305,15 @@ class CreateAgentTool(BaseTool):
|
||||
node_count = len(agent_json.get("nodes", []))
|
||||
link_count = len(agent_json.get("links", []))
|
||||
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] AGENT_JSON - name={agent_name}, "
|
||||
f"nodes={node_count}, links={link_count}, save={save}, session_id={session_id}"
|
||||
)
|
||||
|
||||
if not save:
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] RETURN - AgentPreviewResponse, session_id={session_id}"
|
||||
)
|
||||
return AgentPreviewResponse(
|
||||
message=(
|
||||
f"I've generated an agent called '{agent_name}' with {node_count} blocks. "
|
||||
@@ -301,6 +339,13 @@ class CreateAgentTool(BaseTool):
|
||||
agent_json, user_id
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] SAVED - graph_id={created_graph.id}, "
|
||||
f"library_agent_id={library_agent.id}, session_id={session_id}"
|
||||
)
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] RETURN - AgentSavedResponse, session_id={session_id}"
|
||||
)
|
||||
return AgentSavedResponse(
|
||||
message=f"Agent '{created_graph.name}' has been saved to your library!",
|
||||
agent_id=created_graph.id,
|
||||
@@ -311,6 +356,12 @@ class CreateAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[AGENT_CREATE_DEBUG] ERROR - save_failed: {str(e)}, session_id={session_id}"
|
||||
)
|
||||
logger.info(
|
||||
f"[AGENT_CREATE_DEBUG] RETURN - ErrorResponse (save_failed), session_id={session_id}"
|
||||
)
|
||||
return ErrorResponse(
|
||||
message=f"Failed to save the agent: {str(e)}",
|
||||
error="save_failed",
|
||||
|
||||
@@ -43,11 +43,6 @@ async def test_vague_goal_returns_suggested_goal_response(tool, session):
|
||||
}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.create_agent.get_all_relevant_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.create_agent.decompose_goal",
|
||||
new_callable=AsyncMock,
|
||||
@@ -78,11 +73,6 @@ async def test_unachievable_goal_returns_suggested_goal_response(tool, session):
|
||||
}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.create_agent.get_all_relevant_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.create_agent.decompose_goal",
|
||||
new_callable=AsyncMock,
|
||||
@@ -120,11 +110,6 @@ async def test_clarifying_questions_returns_clarification_needed_response(
|
||||
}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.create_agent.get_all_relevant_agents_for_generation",
|
||||
new_callable=AsyncMock,
|
||||
return_value=[],
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.create_agent.decompose_goal",
|
||||
new_callable=AsyncMock,
|
||||
|
||||
@@ -46,11 +46,6 @@ class CustomizeAgentTool(BaseTool):
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_long_running(self) -> bool:
|
||||
"""Agent customization takes several minutes."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
|
||||
@@ -9,7 +9,6 @@ from .agent_generator import (
|
||||
AgentGeneratorNotConfiguredError,
|
||||
generate_agent_patch,
|
||||
get_agent_as_json,
|
||||
get_all_relevant_agents_for_generation,
|
||||
get_user_message_for_error,
|
||||
save_agent_to_library,
|
||||
)
|
||||
@@ -37,18 +36,16 @@ class EditAgentTool(BaseTool):
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Edit an existing agent from the user's library using natural language. "
|
||||
"Generates updates to the agent while preserving unchanged parts."
|
||||
"Generates updates to the agent while preserving unchanged parts. "
|
||||
"\n\nIMPORTANT: Before calling this tool, if the changes involve adding new "
|
||||
"functionality, search for relevant existing agents using find_library_agent "
|
||||
"that could be used as building blocks. Pass their IDs in library_agent_ids."
|
||||
)
|
||||
|
||||
@property
|
||||
def requires_auth(self) -> bool:
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_long_running(self) -> bool:
|
||||
"""Agent editing takes several minutes."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def parameters(self) -> dict[str, Any]:
|
||||
return {
|
||||
@@ -74,6 +71,15 @@ class EditAgentTool(BaseTool):
|
||||
"Additional context or answers to previous clarifying questions."
|
||||
),
|
||||
},
|
||||
"library_agent_ids": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"List of library agent IDs to use as building blocks for the changes. "
|
||||
"If adding new functionality, search for relevant agents using "
|
||||
"find_library_agent first, then pass their IDs here."
|
||||
),
|
||||
},
|
||||
"save": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
@@ -102,6 +108,7 @@ class EditAgentTool(BaseTool):
|
||||
agent_id = kwargs.get("agent_id", "").strip()
|
||||
changes = kwargs.get("changes", "").strip()
|
||||
context = kwargs.get("context", "")
|
||||
library_agent_ids = kwargs.get("library_agent_ids", [])
|
||||
save = kwargs.get("save", True)
|
||||
session_id = session.session_id if session else None
|
||||
|
||||
@@ -128,21 +135,25 @@ class EditAgentTool(BaseTool):
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
# Fetch library agents by IDs if provided
|
||||
library_agents = None
|
||||
if user_id:
|
||||
if user_id and library_agent_ids:
|
||||
try:
|
||||
from .agent_generator import get_library_agents_by_ids
|
||||
|
||||
graph_id = current_agent.get("id")
|
||||
library_agents = await get_all_relevant_agents_for_generation(
|
||||
# Filter out the current agent being edited
|
||||
filtered_ids = [id for id in library_agent_ids if id != graph_id]
|
||||
|
||||
library_agents = await get_library_agents_by_ids(
|
||||
user_id=user_id,
|
||||
search_query=changes,
|
||||
exclude_graph_id=graph_id,
|
||||
include_marketplace=True,
|
||||
agent_ids=filtered_ids,
|
||||
)
|
||||
logger.debug(
|
||||
f"Found {len(library_agents)} relevant agents for sub-agent composition"
|
||||
f"Fetched {len(library_agents)} library agents by ID for sub-agent composition"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fetch library agents: {e}")
|
||||
logger.warning(f"Failed to fetch library agents by IDs: {e}")
|
||||
|
||||
update_request = changes
|
||||
if context:
|
||||
|
||||
@@ -0,0 +1,186 @@
|
||||
"""Shared utilities for execution waiting and status handling."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from backend.data.db_accessors import execution_db
|
||||
from backend.data.execution import (
|
||||
AsyncRedisExecutionEventBus,
|
||||
ExecutionStatus,
|
||||
GraphExecution,
|
||||
GraphExecutionEvent,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Terminal statuses that indicate execution is complete
|
||||
TERMINAL_STATUSES = frozenset(
|
||||
{
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.FAILED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
}
|
||||
)
|
||||
|
||||
# Statuses where execution is paused but not finished (e.g. human-in-the-loop)
|
||||
PAUSED_STATUSES = frozenset(
|
||||
{
|
||||
ExecutionStatus.REVIEW,
|
||||
}
|
||||
)
|
||||
|
||||
# Statuses that mean "stop waiting" (terminal or paused)
|
||||
STOP_WAITING_STATUSES = TERMINAL_STATUSES | PAUSED_STATUSES
|
||||
|
||||
_POST_SUBSCRIBE_RECHECK_DELAY = 0.1 # seconds to wait for subscription to establish
|
||||
|
||||
|
||||
async def wait_for_execution(
|
||||
user_id: str,
|
||||
graph_id: str,
|
||||
execution_id: str,
|
||||
timeout_seconds: int,
|
||||
) -> GraphExecution | None:
|
||||
"""
|
||||
Wait for an execution to reach a terminal or paused status using Redis pubsub.
|
||||
|
||||
Handles the race condition between checking status and subscribing by
|
||||
re-checking the DB after the subscription is established.
|
||||
|
||||
Args:
|
||||
user_id: User ID
|
||||
graph_id: Graph ID
|
||||
execution_id: Execution ID to wait for
|
||||
timeout_seconds: Max seconds to wait
|
||||
|
||||
Returns:
|
||||
The execution with current status, or None if not found
|
||||
"""
|
||||
exec_db = execution_db()
|
||||
|
||||
# Quick check — maybe it's already done
|
||||
execution = await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if not execution:
|
||||
return None
|
||||
|
||||
if execution.status in STOP_WAITING_STATUSES:
|
||||
logger.debug(
|
||||
f"Execution {execution_id} already in stop-waiting state: "
|
||||
f"{execution.status}"
|
||||
)
|
||||
return execution
|
||||
|
||||
logger.info(
|
||||
f"Waiting up to {timeout_seconds}s for execution {execution_id} "
|
||||
f"(current status: {execution.status})"
|
||||
)
|
||||
|
||||
event_bus = AsyncRedisExecutionEventBus()
|
||||
channel_key = f"{user_id}/{graph_id}/{execution_id}"
|
||||
|
||||
# Mutable container so _subscribe_and_wait can surface the task even if
|
||||
# asyncio.wait_for cancels the coroutine before it returns.
|
||||
task_holder: list[asyncio.Task] = []
|
||||
|
||||
try:
|
||||
result = await asyncio.wait_for(
|
||||
_subscribe_and_wait(
|
||||
event_bus, channel_key, user_id, execution_id, exec_db, task_holder
|
||||
),
|
||||
timeout=timeout_seconds,
|
||||
)
|
||||
return result
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(f"Timeout waiting for execution {execution_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error waiting for execution: {e}", exc_info=True)
|
||||
finally:
|
||||
for task in task_holder:
|
||||
if not task.done():
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
await event_bus.close()
|
||||
|
||||
# Return current state on timeout/error
|
||||
return await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
|
||||
|
||||
async def _subscribe_and_wait(
|
||||
event_bus: AsyncRedisExecutionEventBus,
|
||||
channel_key: str,
|
||||
user_id: str,
|
||||
execution_id: str,
|
||||
exec_db: Any,
|
||||
task_holder: list[asyncio.Task],
|
||||
) -> GraphExecution | None:
|
||||
"""
|
||||
Subscribe to execution events and wait for a terminal/paused status.
|
||||
|
||||
Appends the consumer task to ``task_holder`` so the caller can clean it up
|
||||
even if this coroutine is cancelled by ``asyncio.wait_for``.
|
||||
|
||||
To avoid the race condition where the execution completes between the
|
||||
initial DB check and the Redis subscription, we:
|
||||
1. Start listening (which subscribes internally)
|
||||
2. Re-check the DB after subscription is active
|
||||
3. If still running, wait for pubsub events
|
||||
"""
|
||||
listen_iter = event_bus.listen_events(channel_key).__aiter__()
|
||||
|
||||
done = asyncio.Event()
|
||||
result_execution: GraphExecution | None = None
|
||||
|
||||
async def _consume() -> None:
|
||||
nonlocal result_execution
|
||||
try:
|
||||
async for event in listen_iter:
|
||||
if isinstance(event, GraphExecutionEvent):
|
||||
logger.debug(f"Received execution update: {event.status}")
|
||||
if event.status in STOP_WAITING_STATUSES:
|
||||
result_execution = await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
done.set()
|
||||
return
|
||||
except Exception as e:
|
||||
logger.error(f"Error in execution consumer: {e}", exc_info=True)
|
||||
done.set()
|
||||
|
||||
consume_task = asyncio.create_task(_consume())
|
||||
task_holder.append(consume_task)
|
||||
|
||||
# Give the subscription a moment to establish, then re-check DB
|
||||
await asyncio.sleep(_POST_SUBSCRIBE_RECHECK_DELAY)
|
||||
|
||||
execution = await exec_db.get_graph_execution(
|
||||
user_id=user_id,
|
||||
execution_id=execution_id,
|
||||
include_node_executions=False,
|
||||
)
|
||||
if execution and execution.status in STOP_WAITING_STATUSES:
|
||||
return execution
|
||||
|
||||
# Wait for the pubsub consumer to find a terminal event
|
||||
await done.wait()
|
||||
return result_execution
|
||||
|
||||
|
||||
def get_execution_outputs(execution: GraphExecution | None) -> dict[str, Any] | None:
|
||||
"""Extract outputs from an execution, or return None."""
|
||||
if execution is None:
|
||||
return None
|
||||
return execution.outputs
|
||||
@@ -366,12 +366,15 @@ class TestFindBlockFiltering:
|
||||
return_value=(search_results, len(search_results))
|
||||
)
|
||||
|
||||
with patch(
|
||||
"backend.copilot.tools.find_block.search",
|
||||
return_value=mock_search_db,
|
||||
), patch(
|
||||
"backend.copilot.tools.find_block.get_block",
|
||||
side_effect=lambda bid: mock_blocks.get(bid),
|
||||
with (
|
||||
patch(
|
||||
"backend.copilot.tools.find_block.search",
|
||||
return_value=mock_search_db,
|
||||
),
|
||||
patch(
|
||||
"backend.copilot.tools.find_block.get_block",
|
||||
side_effect=lambda bid: mock_blocks.get(bid),
|
||||
),
|
||||
):
|
||||
tool = FindBlockTool()
|
||||
response = await tool._execute(
|
||||
|
||||
@@ -19,9 +19,10 @@ class FindLibraryAgentTool(BaseTool):
|
||||
@property
|
||||
def description(self) -> str:
|
||||
return (
|
||||
"Search for agents in the user's library. Use this to find agents "
|
||||
"the user has already added to their library, including agents they "
|
||||
"created or added from the marketplace."
|
||||
"Search for or list agents in the user's library. Use this to find "
|
||||
"agents the user has already added to their library, including agents "
|
||||
"they created or added from the marketplace. "
|
||||
"Omit the query to list all agents."
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -31,10 +32,13 @@ class FindLibraryAgentTool(BaseTool):
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query to find agents by name or description.",
|
||||
"description": (
|
||||
"Search query to find agents by name or description. "
|
||||
"Omit to list all agents in the library."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
"required": [],
|
||||
}
|
||||
|
||||
@property
|
||||
@@ -45,7 +49,7 @@ class FindLibraryAgentTool(BaseTool):
|
||||
self, user_id: str | None, session: ChatSession, **kwargs
|
||||
) -> ToolResponseBase:
|
||||
return await search_agents(
|
||||
query=kwargs.get("query", "").strip(),
|
||||
query=(kwargs.get("query") or "").strip(),
|
||||
source="library",
|
||||
session_id=session.session_id,
|
||||
user_id=user_id,
|
||||
|
||||
@@ -36,8 +36,6 @@ class ResponseType(str, Enum):
|
||||
WORKSPACE_FILE_WRITTEN = "workspace_file_written"
|
||||
WORKSPACE_FILE_DELETED = "workspace_file_deleted"
|
||||
# Long-running operation types
|
||||
OPERATION_STARTED = "operation_started"
|
||||
OPERATION_PENDING = "operation_pending"
|
||||
OPERATION_IN_PROGRESS = "operation_in_progress"
|
||||
# Input validation
|
||||
INPUT_VALIDATION_ERROR = "input_validation_error"
|
||||
@@ -45,8 +43,6 @@ class ResponseType(str, Enum):
|
||||
WEB_FETCH = "web_fetch"
|
||||
# Code execution
|
||||
BASH_EXEC = "bash_exec"
|
||||
# Operation status check
|
||||
OPERATION_STATUS = "operation_status"
|
||||
# Feature request types
|
||||
FEATURE_REQUEST_SEARCH = "feature_request_search"
|
||||
FEATURE_REQUEST_CREATED = "feature_request_created"
|
||||
@@ -420,34 +416,6 @@ class BlockOutputResponse(ToolResponseBase):
|
||||
|
||||
|
||||
# Long-running operation models
|
||||
class OperationStartedResponse(ToolResponseBase):
|
||||
"""Response when a long-running operation has been started in the background.
|
||||
|
||||
This is returned immediately to the client while the operation continues
|
||||
to execute. The user can close the tab and check back later.
|
||||
|
||||
The task_id can be used to reconnect to the SSE stream via
|
||||
GET /chat/tasks/{task_id}/stream?last_idx=0
|
||||
"""
|
||||
|
||||
type: ResponseType = ResponseType.OPERATION_STARTED
|
||||
operation_id: str
|
||||
tool_name: str
|
||||
task_id: str | None = None # For SSE reconnection
|
||||
|
||||
|
||||
class OperationPendingResponse(ToolResponseBase):
|
||||
"""Response stored in chat history while a long-running operation is executing.
|
||||
|
||||
This is persisted to the database so users see a pending state when they
|
||||
refresh before the operation completes.
|
||||
"""
|
||||
|
||||
type: ResponseType = ResponseType.OPERATION_PENDING
|
||||
operation_id: str
|
||||
tool_name: str
|
||||
|
||||
|
||||
class OperationInProgressResponse(ToolResponseBase):
|
||||
"""Response when an operation is already in progress.
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from backend.copilot.config import ChatConfig
|
||||
from backend.copilot.model import ChatSession
|
||||
from backend.copilot.tracking import track_agent_run_success, track_agent_scheduled
|
||||
from backend.data.db_accessors import graph_db, library_db, user_db
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.graph import GraphModel
|
||||
from backend.data.model import CredentialsMetaInput
|
||||
from backend.executor import utils as execution_utils
|
||||
@@ -20,12 +21,15 @@ from backend.util.timezone_utils import (
|
||||
)
|
||||
|
||||
from .base import BaseTool
|
||||
from .execution_utils import get_execution_outputs, wait_for_execution
|
||||
from .helpers import get_inputs_from_schema
|
||||
from .models import (
|
||||
AgentDetails,
|
||||
AgentDetailsResponse,
|
||||
AgentOutputResponse,
|
||||
ErrorResponse,
|
||||
ExecutionOptions,
|
||||
ExecutionOutputInfo,
|
||||
ExecutionStartedResponse,
|
||||
InputValidationErrorResponse,
|
||||
SetupInfo,
|
||||
@@ -66,6 +70,7 @@ class RunAgentInput(BaseModel):
|
||||
schedule_name: str = ""
|
||||
cron: str = ""
|
||||
timezone: str = "UTC"
|
||||
wait_for_result: int = Field(default=0, ge=0, le=300)
|
||||
|
||||
@field_validator(
|
||||
"username_agent_slug",
|
||||
@@ -147,6 +152,14 @@ class RunAgentTool(BaseTool):
|
||||
"type": "string",
|
||||
"description": "IANA timezone for schedule (default: UTC)",
|
||||
},
|
||||
"wait_for_result": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Max seconds to wait for execution to complete (0-300). "
|
||||
"If >0, blocks until the execution finishes or times out. "
|
||||
"Returns execution outputs when complete."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
}
|
||||
@@ -341,6 +354,7 @@ class RunAgentTool(BaseTool):
|
||||
graph=graph,
|
||||
graph_credentials=graph_credentials,
|
||||
inputs=params.inputs,
|
||||
wait_for_result=params.wait_for_result,
|
||||
)
|
||||
|
||||
except NotFoundError as e:
|
||||
@@ -424,8 +438,9 @@ class RunAgentTool(BaseTool):
|
||||
graph: GraphModel,
|
||||
graph_credentials: dict[str, CredentialsMetaInput],
|
||||
inputs: dict[str, Any],
|
||||
wait_for_result: int = 0,
|
||||
) -> ToolResponseBase:
|
||||
"""Execute an agent immediately."""
|
||||
"""Execute an agent immediately, optionally waiting for completion."""
|
||||
session_id = session.session_id
|
||||
|
||||
# Check rate limits
|
||||
@@ -462,6 +477,91 @@ class RunAgentTool(BaseTool):
|
||||
)
|
||||
|
||||
library_agent_link = f"/library/agents/{library_agent.id}"
|
||||
|
||||
# If wait_for_result is requested, wait for execution to complete
|
||||
if wait_for_result > 0:
|
||||
logger.info(
|
||||
f"Waiting up to {wait_for_result}s for execution {execution.id}"
|
||||
)
|
||||
completed = await wait_for_execution(
|
||||
user_id=user_id,
|
||||
graph_id=library_agent.graph_id,
|
||||
execution_id=execution.id,
|
||||
timeout_seconds=wait_for_result,
|
||||
)
|
||||
|
||||
if completed and completed.status == ExecutionStatus.COMPLETED:
|
||||
outputs = get_execution_outputs(completed)
|
||||
return AgentOutputResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' completed successfully. "
|
||||
f"View at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
agent_name=library_agent.name,
|
||||
agent_id=library_agent.graph_id,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
execution=ExecutionOutputInfo(
|
||||
execution_id=execution.id,
|
||||
status=completed.status.value,
|
||||
started_at=completed.started_at,
|
||||
ended_at=completed.ended_at,
|
||||
outputs=outputs or {},
|
||||
),
|
||||
)
|
||||
elif completed and completed.status == ExecutionStatus.FAILED:
|
||||
error_detail = completed.stats.error if completed.stats else None
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution failed. "
|
||||
f"View details at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
error=error_detail,
|
||||
)
|
||||
elif completed and completed.status == ExecutionStatus.TERMINATED:
|
||||
error_detail = completed.stats.error if completed.stats else None
|
||||
return ErrorResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution was terminated. "
|
||||
f"View details at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
error=error_detail,
|
||||
)
|
||||
elif completed and completed.status == ExecutionStatus.REVIEW:
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' is awaiting human review. "
|
||||
f"Check at {library_agent_link}."
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
status=ExecutionStatus.REVIEW.value,
|
||||
)
|
||||
else:
|
||||
status = completed.status.value if completed else "unknown"
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' is still {status} after "
|
||||
f"{wait_for_result}s. Check results later at "
|
||||
f"{library_agent_link}. "
|
||||
f"Use view_agent_output with wait_if_running to check again."
|
||||
),
|
||||
session_id=session_id,
|
||||
execution_id=execution.id,
|
||||
graph_id=library_agent.graph_id,
|
||||
graph_name=library_agent.name,
|
||||
library_agent_id=library_agent.id,
|
||||
library_agent_link=library_agent_link,
|
||||
status=status,
|
||||
)
|
||||
|
||||
return ExecutionStartedResponse(
|
||||
message=(
|
||||
f"Agent '{library_agent.name}' execution started successfully. "
|
||||
|
||||
@@ -160,9 +160,10 @@ class RunBlockTool(BaseTool):
|
||||
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
|
||||
|
||||
creds_manager = IntegrationCredentialsManager()
|
||||
matched_credentials, missing_credentials = (
|
||||
await self._resolve_block_credentials(user_id, block, input_data)
|
||||
)
|
||||
(
|
||||
matched_credentials,
|
||||
missing_credentials,
|
||||
) = await self._resolve_block_credentials(user_id, block, input_data)
|
||||
|
||||
# Get block schemas for details/validation
|
||||
try:
|
||||
|
||||
@@ -214,7 +214,11 @@ class WorkspaceWriteResponse(ToolResponseBase):
|
||||
file_id: str
|
||||
name: str
|
||||
path: str
|
||||
mime_type: str
|
||||
size_bytes: int
|
||||
# workspace:// URL the agent can embed directly in chat to give the user a link.
|
||||
# Format: workspace://<file_id>#<mime_type> (frontend resolves to download URL)
|
||||
download_url: str
|
||||
source: str | None = None # "content", "base64", or "copied from <path>"
|
||||
content_preview: str | None = None # First 200 chars for text files
|
||||
|
||||
@@ -680,11 +684,21 @@ class WriteWorkspaceFileTool(BaseTool):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Strip MIME parameters (e.g. "text/html; charset=utf-8" → "text/html")
|
||||
# and normalise to lowercase so the fragment is URL-safe.
|
||||
normalized_mime = (rec.mime_type or "").split(";", 1)[0].strip().lower()
|
||||
download_url = (
|
||||
f"workspace://{rec.id}#{normalized_mime}"
|
||||
if normalized_mime
|
||||
else f"workspace://{rec.id}"
|
||||
)
|
||||
return WorkspaceWriteResponse(
|
||||
file_id=rec.id,
|
||||
name=rec.name,
|
||||
path=rec.path,
|
||||
mime_type=normalized_mime,
|
||||
size_bytes=rec.size_bytes,
|
||||
download_url=download_url,
|
||||
source=source,
|
||||
content_preview=preview,
|
||||
message=msg,
|
||||
|
||||
@@ -178,9 +178,13 @@ async def test_block_credit_reset(server: SpinTestServer):
|
||||
assert month2_balance == 1100 # Balance persists, no reset
|
||||
|
||||
# Now test the refill behavior when balance is low
|
||||
# Set balance below refill threshold
|
||||
# Set balance below refill threshold and backdate updatedAt to month2 so
|
||||
# the month3 refill check sees a different (month2 → month3) transition.
|
||||
# Without the explicit updatedAt, Prisma sets it to real-world NOW which
|
||||
# may share the same calendar month as the mocked month3, suppressing refill.
|
||||
await UserBalance.prisma().update(
|
||||
where={"userId": DEFAULT_USER_ID}, data={"balance": 400}
|
||||
where={"userId": DEFAULT_USER_ID},
|
||||
data={"balance": 400, "updatedAt": month2},
|
||||
)
|
||||
|
||||
# Create a month 2 transaction to update the last transaction time
|
||||
|
||||
@@ -303,7 +303,7 @@ class DatabaseManager(AppService):
|
||||
get_user_chat_sessions = _(chat_db.get_user_chat_sessions)
|
||||
get_user_session_count = _(chat_db.get_user_session_count)
|
||||
delete_chat_session = _(chat_db.delete_chat_session)
|
||||
get_chat_session_message_count = _(chat_db.get_chat_session_message_count)
|
||||
get_next_sequence = _(chat_db.get_next_sequence)
|
||||
update_tool_message_content = _(chat_db.update_tool_message_content)
|
||||
|
||||
|
||||
@@ -473,5 +473,5 @@ class DatabaseManagerAsyncClient(AppServiceClient):
|
||||
get_user_chat_sessions = d.get_user_chat_sessions
|
||||
get_user_session_count = d.get_user_session_count
|
||||
delete_chat_session = d.delete_chat_session
|
||||
get_chat_session_message_count = d.get_chat_session_message_count
|
||||
get_next_sequence = d.get_next_sequence
|
||||
update_tool_message_content = d.update_tool_message_content
|
||||
|
||||
@@ -79,6 +79,12 @@ INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
|
||||
}
|
||||
|
||||
|
||||
LIBRARY_FOLDER_INCLUDE: prisma.types.LibraryFolderInclude = {
|
||||
"LibraryAgents": {"where": {"isDeleted": False}},
|
||||
"Children": {"where": {"isDeleted": False}},
|
||||
}
|
||||
|
||||
|
||||
def library_agent_include(
|
||||
user_id: str,
|
||||
include_nodes: bool = True,
|
||||
@@ -105,6 +111,7 @@ def library_agent_include(
|
||||
"""
|
||||
result: prisma.types.LibraryAgentInclude = {
|
||||
"Creator": True, # Always needed for creator info
|
||||
"Folder": True, # Always needed for folder info
|
||||
}
|
||||
|
||||
# Build AgentGraph include based on requested options
|
||||
|
||||
@@ -184,7 +184,7 @@ async def find_webhook_by_credentials_and_props(
|
||||
credentials_id: str,
|
||||
webhook_type: str,
|
||||
resource: str,
|
||||
events: list[str],
|
||||
events: Optional[list[str]],
|
||||
) -> Webhook | None:
|
||||
webhook = await IntegrationWebhook.prisma().find_first(
|
||||
where={
|
||||
@@ -192,7 +192,7 @@ async def find_webhook_by_credentials_and_props(
|
||||
"credentialsId": credentials_id,
|
||||
"webhookType": webhook_type,
|
||||
"resource": resource,
|
||||
"events": {"has_every": events},
|
||||
**({"events": {"has_every": events}} if events else {}),
|
||||
},
|
||||
)
|
||||
return Webhook.from_db(webhook) if webhook else None
|
||||
|
||||
426
autogpt_platform/backend/backend/data/tally.py
Normal file
426
autogpt_platform/backend/backend/data/tally.py
Normal file
@@ -0,0 +1,426 @@
|
||||
"""Tally form integration: cache submissions, match by email, extract business understanding."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from backend.data.redis_client import get_redis_async
|
||||
from backend.data.understanding import (
|
||||
BusinessUnderstandingInput,
|
||||
get_business_understanding,
|
||||
upsert_business_understanding,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TALLY_API_BASE = "https://api.tally.so"
|
||||
_settings = Settings()
|
||||
TALLY_FORM_ID = _settings.secrets.tally_form_id
|
||||
|
||||
# Redis key templates
|
||||
_EMAIL_INDEX_KEY = "tally:form:{form_id}:email_index"
|
||||
_QUESTIONS_KEY = "tally:form:{form_id}:questions"
|
||||
_LAST_FETCH_KEY = "tally:form:{form_id}:last_fetch"
|
||||
|
||||
# TTLs — keep aligned so last_fetch never outlives the index
|
||||
_INDEX_TTL = 3600 # 1 hour
|
||||
_LAST_FETCH_TTL = 3600 # 1 hour (same as index)
|
||||
|
||||
# Pagination
|
||||
_PAGE_LIMIT = 500
|
||||
_MAX_PAGES = 100
|
||||
|
||||
# LLM extraction timeout (seconds)
|
||||
_LLM_TIMEOUT = 30
|
||||
|
||||
|
||||
def _mask_email(email: str) -> str:
|
||||
"""Mask an email for safe logging: 'alice@example.com' -> 'a***e@example.com'."""
|
||||
try:
|
||||
local, domain = email.rsplit("@", 1)
|
||||
if len(local) <= 2:
|
||||
masked_local = local[0] + "***"
|
||||
else:
|
||||
masked_local = local[0] + "***" + local[-1]
|
||||
return f"{masked_local}@{domain}"
|
||||
except (ValueError, IndexError):
|
||||
return "***"
|
||||
|
||||
|
||||
async def _fetch_tally_page(
|
||||
client: Requests,
|
||||
form_id: str,
|
||||
page: int,
|
||||
limit: int = _PAGE_LIMIT,
|
||||
start_date: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""Fetch a single page of submissions from the Tally API."""
|
||||
url = f"{TALLY_API_BASE}/forms/{form_id}/submissions?page={page}&limit={limit}"
|
||||
if start_date:
|
||||
url += f"&startDate={start_date}"
|
||||
|
||||
response = await client.get(url)
|
||||
return response.json()
|
||||
|
||||
|
||||
def _make_tally_client(api_key: str) -> Requests:
|
||||
"""Create a Requests client configured for the Tally API."""
|
||||
return Requests(
|
||||
trusted_origins=[TALLY_API_BASE],
|
||||
raise_for_status=True,
|
||||
extra_headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def _fetch_all_submissions(
|
||||
client: Requests,
|
||||
form_id: str,
|
||||
start_date: Optional[str] = None,
|
||||
max_pages: int = _MAX_PAGES,
|
||||
) -> tuple[list[dict], list[dict]]:
|
||||
"""Paginate through all Tally submissions. Returns (questions, submissions)."""
|
||||
|
||||
questions: list[dict] = []
|
||||
all_submissions: list[dict] = []
|
||||
page = 1
|
||||
|
||||
while True:
|
||||
data = await _fetch_tally_page(client, form_id, page, start_date=start_date)
|
||||
|
||||
if page == 1:
|
||||
questions = data.get("questions", [])
|
||||
|
||||
submissions = data.get("submissions", [])
|
||||
all_submissions.extend(submissions)
|
||||
|
||||
# Tally API uses `hasMore` for pagination
|
||||
has_more = data.get("hasMore", False)
|
||||
if not has_more:
|
||||
break
|
||||
if page >= max_pages:
|
||||
total = data.get("totalNumberOfSubmissionsPerFilter", {}).get("all", "?")
|
||||
logger.warning(
|
||||
f"Tally: hit max page cap ({max_pages}) for form {form_id}, "
|
||||
f"fetched {len(all_submissions)} of {total} total submissions"
|
||||
)
|
||||
break
|
||||
page += 1
|
||||
|
||||
return questions, all_submissions
|
||||
|
||||
|
||||
def _build_email_index(
|
||||
submissions: list[dict], questions: list[dict]
|
||||
) -> dict[str, dict]:
|
||||
"""Build an {email -> submission_data} index from submissions.
|
||||
|
||||
Scans question titles for email/contact fields to find the email answer.
|
||||
"""
|
||||
# Find question IDs that are likely email fields
|
||||
email_question_ids: list[str] = []
|
||||
for q in questions:
|
||||
label = (q.get("label") or q.get("title") or q.get("name") or "").lower()
|
||||
q_type = (q.get("type") or "").lower()
|
||||
if q_type in ("input_email", "email"):
|
||||
email_question_ids.append(q["id"])
|
||||
elif any(kw in label for kw in ("email", "e-mail", "contact")):
|
||||
email_question_ids.append(q["id"])
|
||||
|
||||
index: dict[str, dict] = {}
|
||||
for sub in submissions:
|
||||
email = _extract_email_from_submission(sub, email_question_ids)
|
||||
if email:
|
||||
index[email.lower()] = {
|
||||
"responses": sub.get("responses", sub.get("fields", [])),
|
||||
"submitted_at": sub.get("submittedAt", sub.get("createdAt", "")),
|
||||
"questions": sub.get("questions", []),
|
||||
}
|
||||
return index
|
||||
|
||||
|
||||
def _extract_email_from_submission(
|
||||
submission: dict, email_question_ids: list[str]
|
||||
) -> Optional[str]:
|
||||
"""Extract email address from a submission by checking respondentEmail, then field responses."""
|
||||
# Try respondent email first (Tally often includes this)
|
||||
respondent_email = submission.get("respondentEmail")
|
||||
if respondent_email:
|
||||
return respondent_email
|
||||
|
||||
# Search through responses/fields for matching question IDs
|
||||
responses = submission.get("responses", submission.get("fields", []))
|
||||
if isinstance(responses, list):
|
||||
for resp in responses:
|
||||
q_id = resp.get("questionId") or resp.get("key") or resp.get("id")
|
||||
if q_id in email_question_ids:
|
||||
value = resp.get("value") or resp.get("answer")
|
||||
if isinstance(value, str) and "@" in value:
|
||||
return value
|
||||
elif isinstance(responses, dict):
|
||||
for q_id in email_question_ids:
|
||||
value = responses.get(q_id)
|
||||
if isinstance(value, str) and "@" in value:
|
||||
return value
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def _get_cached_index(
|
||||
form_id: str,
|
||||
) -> tuple[Optional[dict], Optional[list]]:
|
||||
"""Return (email_index, questions) from Redis, or (None, None) on cache miss."""
|
||||
redis = await get_redis_async()
|
||||
index_key = _EMAIL_INDEX_KEY.format(form_id=form_id)
|
||||
questions_key = _QUESTIONS_KEY.format(form_id=form_id)
|
||||
|
||||
raw_index = await redis.get(index_key)
|
||||
raw_questions = await redis.get(questions_key)
|
||||
|
||||
if raw_index and raw_questions:
|
||||
return json.loads(raw_index), json.loads(raw_questions)
|
||||
return None, None
|
||||
|
||||
|
||||
async def _refresh_cache(form_id: str) -> tuple[dict, list]:
|
||||
"""Refresh the Tally submission cache. Uses incremental fetch when possible.
|
||||
|
||||
Returns (email_index, questions).
|
||||
"""
|
||||
settings = Settings()
|
||||
client = _make_tally_client(settings.secrets.tally_api_key)
|
||||
|
||||
redis = await get_redis_async()
|
||||
last_fetch_key = _LAST_FETCH_KEY.format(form_id=form_id)
|
||||
index_key = _EMAIL_INDEX_KEY.format(form_id=form_id)
|
||||
questions_key = _QUESTIONS_KEY.format(form_id=form_id)
|
||||
|
||||
last_fetch = await redis.get(last_fetch_key)
|
||||
|
||||
if last_fetch:
|
||||
# Try to load existing index for incremental merge
|
||||
raw_existing = await redis.get(index_key)
|
||||
|
||||
if raw_existing is None:
|
||||
# Index expired but last_fetch still present — fall back to full fetch
|
||||
logger.info("Tally: last_fetch present but index missing, doing full fetch")
|
||||
questions, submissions = await _fetch_all_submissions(client, form_id)
|
||||
email_index = _build_email_index(submissions, questions)
|
||||
else:
|
||||
# Incremental fetch: only get new submissions since last fetch
|
||||
logger.info(f"Tally incremental fetch since {last_fetch}")
|
||||
questions, new_submissions = await _fetch_all_submissions(
|
||||
client, form_id, start_date=last_fetch
|
||||
)
|
||||
|
||||
existing_index: dict[str, dict] = json.loads(raw_existing)
|
||||
|
||||
if not questions:
|
||||
raw_q = await redis.get(questions_key)
|
||||
if raw_q:
|
||||
questions = json.loads(raw_q)
|
||||
|
||||
new_index = _build_email_index(new_submissions, questions)
|
||||
existing_index.update(new_index)
|
||||
email_index = existing_index
|
||||
else:
|
||||
# Full initial fetch
|
||||
logger.info("Tally full initial fetch")
|
||||
questions, submissions = await _fetch_all_submissions(client, form_id)
|
||||
email_index = _build_email_index(submissions, questions)
|
||||
|
||||
# Store in Redis
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
await redis.setex(index_key, _INDEX_TTL, json.dumps(email_index))
|
||||
await redis.setex(questions_key, _INDEX_TTL, json.dumps(questions))
|
||||
await redis.setex(last_fetch_key, _LAST_FETCH_TTL, now)
|
||||
|
||||
logger.info(f"Tally cache refreshed: {len(email_index)} emails indexed")
|
||||
return email_index, questions
|
||||
|
||||
|
||||
async def find_submission_by_email(
|
||||
form_id: str, email: str
|
||||
) -> Optional[tuple[dict, list]]:
|
||||
"""Look up a Tally submission by email. Uses cache when available.
|
||||
|
||||
Returns (submission_data, questions) or None.
|
||||
"""
|
||||
email_lower = email.lower()
|
||||
|
||||
# Try cache first
|
||||
email_index, questions = await _get_cached_index(form_id)
|
||||
if email_index is not None and questions is not None:
|
||||
sub = email_index.get(email_lower)
|
||||
if sub is not None:
|
||||
return sub, questions
|
||||
return None
|
||||
|
||||
# Cache miss - refresh
|
||||
email_index, questions = await _refresh_cache(form_id)
|
||||
sub = email_index.get(email_lower)
|
||||
if sub is not None:
|
||||
return sub, questions
|
||||
return None
|
||||
|
||||
|
||||
def format_submission_for_llm(submission: dict, questions: list[dict]) -> str:
|
||||
"""Format a submission as readable Q&A text for LLM consumption."""
|
||||
# Build question ID -> title lookup
|
||||
q_titles: dict[str, str] = {}
|
||||
for q in questions:
|
||||
q_id = q.get("id", "")
|
||||
title = q.get("label") or q.get("title") or q.get("name") or f"Question {q_id}"
|
||||
q_titles[q_id] = title
|
||||
|
||||
lines: list[str] = []
|
||||
responses = submission.get("responses", [])
|
||||
|
||||
if isinstance(responses, list):
|
||||
for resp in responses:
|
||||
q_id = resp.get("questionId") or resp.get("key") or resp.get("id") or ""
|
||||
title = q_titles.get(q_id, f"Question {q_id}")
|
||||
value = resp.get("value") or resp.get("answer") or ""
|
||||
lines.append(f"Q: {title}\nA: {_format_answer(value)}")
|
||||
elif isinstance(responses, dict):
|
||||
for q_id, value in responses.items():
|
||||
title = q_titles.get(q_id, f"Question {q_id}")
|
||||
lines.append(f"Q: {title}\nA: {_format_answer(value)}")
|
||||
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def _format_answer(value: object) -> str:
|
||||
"""Convert an answer value (str, list, dict, None) to a human-readable string."""
|
||||
if value is None:
|
||||
return "(no answer)"
|
||||
if isinstance(value, list):
|
||||
return ", ".join(str(v) for v in value)
|
||||
if isinstance(value, dict):
|
||||
parts = [f"{k}: {v}" for k, v in value.items() if v]
|
||||
return "; ".join(parts) if parts else "(no answer)"
|
||||
return str(value)
|
||||
|
||||
|
||||
_EXTRACTION_PROMPT = """\
|
||||
You are a business analyst. Given the following form submission data, extract structured business understanding information.
|
||||
|
||||
Return a JSON object with ONLY the fields that can be confidently extracted. Use null for fields that cannot be determined.
|
||||
|
||||
Fields:
|
||||
- user_name (string): the person's name
|
||||
- job_title (string): their job title
|
||||
- business_name (string): company/business name
|
||||
- industry (string): industry or sector
|
||||
- business_size (string): company size e.g. "1-10", "11-50", "51-200"
|
||||
- user_role (string): their role context e.g. "decision maker", "implementer"
|
||||
- key_workflows (list of strings): key business workflows
|
||||
- daily_activities (list of strings): daily activities performed
|
||||
- pain_points (list of strings): current pain points
|
||||
- bottlenecks (list of strings): process bottlenecks
|
||||
- manual_tasks (list of strings): manual/repetitive tasks
|
||||
- automation_goals (list of strings): desired automation goals
|
||||
- current_software (list of strings): software/tools currently used
|
||||
- existing_automation (list of strings): existing automations
|
||||
- additional_notes (string): any additional context
|
||||
|
||||
Form data:
|
||||
"""
|
||||
|
||||
_EXTRACTION_SUFFIX = "\n\nReturn ONLY valid JSON."
|
||||
|
||||
|
||||
async def extract_business_understanding(
|
||||
formatted_text: str,
|
||||
) -> BusinessUnderstandingInput:
|
||||
"""Use an LLM to extract structured business understanding from form text.
|
||||
|
||||
Raises on timeout or unparseable response so the caller can handle it.
|
||||
"""
|
||||
settings = Settings()
|
||||
api_key = settings.secrets.open_router_api_key
|
||||
client = AsyncOpenAI(api_key=api_key, base_url="https://openrouter.ai/api/v1")
|
||||
|
||||
try:
|
||||
response = await asyncio.wait_for(
|
||||
client.chat.completions.create(
|
||||
model="openai/gpt-4o-mini",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"{_EXTRACTION_PROMPT}{formatted_text}{_EXTRACTION_SUFFIX}",
|
||||
}
|
||||
],
|
||||
response_format={"type": "json_object"},
|
||||
temperature=0.0,
|
||||
),
|
||||
timeout=_LLM_TIMEOUT,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("Tally: LLM extraction timed out")
|
||||
raise
|
||||
|
||||
raw = response.choices[0].message.content or "{}"
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Tally: LLM returned invalid JSON, skipping extraction")
|
||||
raise
|
||||
|
||||
# Filter out null values before constructing
|
||||
cleaned = {k: v for k, v in data.items() if v is not None}
|
||||
return BusinessUnderstandingInput(**cleaned)
|
||||
|
||||
|
||||
async def populate_understanding_from_tally(user_id: str, email: str) -> None:
|
||||
"""Main orchestrator: check Tally for a matching submission and populate understanding.
|
||||
|
||||
Fire-and-forget safe — all exceptions are caught and logged.
|
||||
"""
|
||||
try:
|
||||
# Check if understanding already exists (idempotency)
|
||||
existing = await get_business_understanding(user_id)
|
||||
if existing is not None:
|
||||
logger.debug(
|
||||
f"Tally: user {user_id} already has business understanding, skipping"
|
||||
)
|
||||
return
|
||||
|
||||
# Check API key is configured
|
||||
settings = Settings()
|
||||
if not settings.secrets.tally_api_key:
|
||||
logger.debug("Tally: no API key configured, skipping")
|
||||
return
|
||||
|
||||
# Look up submission by email
|
||||
masked = _mask_email(email)
|
||||
result = await find_submission_by_email(TALLY_FORM_ID, email)
|
||||
if result is None:
|
||||
logger.debug(f"Tally: no submission found for {masked}")
|
||||
return
|
||||
|
||||
submission, questions = result
|
||||
logger.info(f"Tally: found submission for {masked}, extracting understanding")
|
||||
|
||||
# Format and extract
|
||||
formatted = format_submission_for_llm(submission, questions)
|
||||
if not formatted.strip():
|
||||
logger.warning("Tally: formatted submission was empty, skipping")
|
||||
return
|
||||
|
||||
understanding_input = await extract_business_understanding(formatted)
|
||||
|
||||
# Upsert into database
|
||||
await upsert_business_understanding(user_id, understanding_input)
|
||||
logger.info(f"Tally: successfully populated understanding for user {user_id}")
|
||||
|
||||
except Exception:
|
||||
logger.exception(f"Tally: error populating understanding for user {user_id}")
|
||||
589
autogpt_platform/backend/backend/data/tally_test.py
Normal file
589
autogpt_platform/backend/backend/data/tally_test.py
Normal file
@@ -0,0 +1,589 @@
|
||||
"""Tests for backend.data.tally module."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from backend.data.tally import (
|
||||
_EXTRACTION_PROMPT,
|
||||
_EXTRACTION_SUFFIX,
|
||||
_build_email_index,
|
||||
_format_answer,
|
||||
_make_tally_client,
|
||||
_mask_email,
|
||||
_refresh_cache,
|
||||
extract_business_understanding,
|
||||
find_submission_by_email,
|
||||
format_submission_for_llm,
|
||||
populate_understanding_from_tally,
|
||||
)
|
||||
|
||||
# ── Fixtures ──────────────────────────────────────────────────────────────────
|
||||
|
||||
SAMPLE_QUESTIONS = [
|
||||
{"id": "q1", "label": "What is your name?", "type": "INPUT_TEXT"},
|
||||
{"id": "q2", "label": "Email address", "type": "INPUT_EMAIL"},
|
||||
{"id": "q3", "label": "Company name", "type": "INPUT_TEXT"},
|
||||
{"id": "q4", "label": "Industry", "type": "INPUT_TEXT"},
|
||||
]
|
||||
|
||||
SAMPLE_SUBMISSIONS = [
|
||||
{
|
||||
"respondentEmail": None,
|
||||
"responses": [
|
||||
{"questionId": "q1", "value": "Alice Smith"},
|
||||
{"questionId": "q2", "value": "alice@example.com"},
|
||||
{"questionId": "q3", "value": "Acme Corp"},
|
||||
{"questionId": "q4", "value": "Technology"},
|
||||
],
|
||||
"submittedAt": "2025-01-15T10:00:00Z",
|
||||
},
|
||||
{
|
||||
"respondentEmail": "bob@example.com",
|
||||
"responses": [
|
||||
{"questionId": "q1", "value": "Bob Jones"},
|
||||
{"questionId": "q2", "value": "bob@example.com"},
|
||||
{"questionId": "q3", "value": "Bob's Burgers"},
|
||||
{"questionId": "q4", "value": "Food"},
|
||||
],
|
||||
"submittedAt": "2025-01-16T10:00:00Z",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# ── _build_email_index ────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_build_email_index():
|
||||
index = _build_email_index(SAMPLE_SUBMISSIONS, SAMPLE_QUESTIONS)
|
||||
assert "alice@example.com" in index
|
||||
assert "bob@example.com" in index
|
||||
assert len(index) == 2
|
||||
|
||||
|
||||
def test_build_email_index_case_insensitive():
|
||||
submissions = [
|
||||
{
|
||||
"respondentEmail": None,
|
||||
"responses": [
|
||||
{"questionId": "q2", "value": "Alice@Example.COM"},
|
||||
],
|
||||
"submittedAt": "2025-01-15T10:00:00Z",
|
||||
},
|
||||
]
|
||||
index = _build_email_index(submissions, SAMPLE_QUESTIONS)
|
||||
assert "alice@example.com" in index
|
||||
assert "Alice@Example.COM" not in index
|
||||
|
||||
|
||||
def test_build_email_index_empty():
|
||||
index = _build_email_index([], SAMPLE_QUESTIONS)
|
||||
assert index == {}
|
||||
|
||||
|
||||
def test_build_email_index_no_email_field():
|
||||
questions = [{"id": "q1", "label": "Name", "type": "INPUT_TEXT"}]
|
||||
submissions = [
|
||||
{
|
||||
"responses": [{"questionId": "q1", "value": "Alice"}],
|
||||
"submittedAt": "2025-01-15T10:00:00Z",
|
||||
}
|
||||
]
|
||||
index = _build_email_index(submissions, questions)
|
||||
assert index == {}
|
||||
|
||||
|
||||
def test_build_email_index_respondent_email():
|
||||
"""respondentEmail takes precedence over field scanning."""
|
||||
submissions = [
|
||||
{
|
||||
"respondentEmail": "direct@example.com",
|
||||
"responses": [
|
||||
{"questionId": "q2", "value": "field@example.com"},
|
||||
],
|
||||
"submittedAt": "2025-01-15T10:00:00Z",
|
||||
}
|
||||
]
|
||||
index = _build_email_index(submissions, SAMPLE_QUESTIONS)
|
||||
assert "direct@example.com" in index
|
||||
assert "field@example.com" not in index
|
||||
|
||||
|
||||
# ── format_submission_for_llm ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_format_submission_for_llm():
|
||||
submission = {
|
||||
"responses": [
|
||||
{"questionId": "q1", "value": "Alice Smith"},
|
||||
{"questionId": "q3", "value": "Acme Corp"},
|
||||
],
|
||||
}
|
||||
result = format_submission_for_llm(submission, SAMPLE_QUESTIONS)
|
||||
assert "Q: What is your name?" in result
|
||||
assert "A: Alice Smith" in result
|
||||
assert "Q: Company name" in result
|
||||
assert "A: Acme Corp" in result
|
||||
|
||||
|
||||
def test_format_submission_for_llm_dict_responses():
|
||||
submission = {
|
||||
"responses": {
|
||||
"q1": "Alice Smith",
|
||||
"q3": "Acme Corp",
|
||||
},
|
||||
}
|
||||
result = format_submission_for_llm(submission, SAMPLE_QUESTIONS)
|
||||
assert "A: Alice Smith" in result
|
||||
assert "A: Acme Corp" in result
|
||||
|
||||
|
||||
def test_format_answer_types():
|
||||
assert _format_answer(None) == "(no answer)"
|
||||
assert _format_answer("hello") == "hello"
|
||||
assert _format_answer(["a", "b"]) == "a, b"
|
||||
assert _format_answer({"key": "val"}) == "key: val"
|
||||
assert _format_answer(42) == "42"
|
||||
|
||||
|
||||
# ── find_submission_by_email ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_submission_by_email_cache_hit():
|
||||
cached_index = {
|
||||
"alice@example.com": {"responses": [], "submitted_at": "2025-01-15"},
|
||||
}
|
||||
cached_questions = SAMPLE_QUESTIONS
|
||||
|
||||
with patch(
|
||||
"backend.data.tally._get_cached_index",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(cached_index, cached_questions),
|
||||
) as mock_cache:
|
||||
result = await find_submission_by_email("form123", "alice@example.com")
|
||||
|
||||
mock_cache.assert_awaited_once_with("form123")
|
||||
assert result is not None
|
||||
sub, questions = result
|
||||
assert sub["submitted_at"] == "2025-01-15"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_submission_by_email_cache_miss():
|
||||
refreshed_index = {
|
||||
"alice@example.com": {"responses": [], "submitted_at": "2025-01-15"},
|
||||
}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.data.tally._get_cached_index",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(None, None),
|
||||
),
|
||||
patch(
|
||||
"backend.data.tally._refresh_cache",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(refreshed_index, SAMPLE_QUESTIONS),
|
||||
) as mock_refresh,
|
||||
):
|
||||
result = await find_submission_by_email("form123", "alice@example.com")
|
||||
|
||||
mock_refresh.assert_awaited_once_with("form123")
|
||||
assert result is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_submission_by_email_no_match():
|
||||
cached_index = {
|
||||
"alice@example.com": {"responses": [], "submitted_at": "2025-01-15"},
|
||||
}
|
||||
|
||||
with patch(
|
||||
"backend.data.tally._get_cached_index",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(cached_index, SAMPLE_QUESTIONS),
|
||||
):
|
||||
result = await find_submission_by_email("form123", "unknown@example.com")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
# ── populate_understanding_from_tally ─────────────────────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_populate_understanding_skips_existing():
|
||||
"""If user already has understanding, skip entirely."""
|
||||
mock_understanding = MagicMock()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.data.tally.get_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_understanding,
|
||||
) as mock_get,
|
||||
patch(
|
||||
"backend.data.tally.find_submission_by_email",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_find,
|
||||
):
|
||||
await populate_understanding_from_tally("user-1", "test@example.com")
|
||||
|
||||
mock_get.assert_awaited_once_with("user-1")
|
||||
mock_find.assert_not_awaited()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_populate_understanding_skips_no_api_key():
|
||||
"""If no Tally API key, skip gracefully."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.tally_api_key = ""
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.data.tally.get_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
return_value=None,
|
||||
),
|
||||
patch("backend.data.tally.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.data.tally.find_submission_by_email",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_find,
|
||||
):
|
||||
await populate_understanding_from_tally("user-1", "test@example.com")
|
||||
|
||||
mock_find.assert_not_awaited()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_populate_understanding_handles_errors():
|
||||
"""Must never raise, even on unexpected errors."""
|
||||
with patch(
|
||||
"backend.data.tally.get_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=RuntimeError("DB down"),
|
||||
):
|
||||
# Should not raise
|
||||
await populate_understanding_from_tally("user-1", "test@example.com")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_populate_understanding_full_flow():
|
||||
"""Happy path: no existing understanding, finds submission, extracts, upserts."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.tally_api_key = "test-key"
|
||||
|
||||
submission = {
|
||||
"responses": [
|
||||
{"questionId": "q1", "value": "Alice"},
|
||||
{"questionId": "q3", "value": "Acme"},
|
||||
],
|
||||
}
|
||||
mock_input = MagicMock()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.data.tally.get_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
return_value=None,
|
||||
),
|
||||
patch("backend.data.tally.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.data.tally.find_submission_by_email",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(submission, SAMPLE_QUESTIONS),
|
||||
),
|
||||
patch(
|
||||
"backend.data.tally.extract_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_input,
|
||||
) as mock_extract,
|
||||
patch(
|
||||
"backend.data.tally.upsert_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_upsert,
|
||||
):
|
||||
await populate_understanding_from_tally("user-1", "alice@example.com")
|
||||
|
||||
mock_extract.assert_awaited_once()
|
||||
mock_upsert.assert_awaited_once_with("user-1", mock_input)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_populate_understanding_handles_llm_timeout():
|
||||
"""LLM timeout is caught and doesn't raise."""
|
||||
import asyncio
|
||||
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.tally_api_key = "test-key"
|
||||
|
||||
submission = {
|
||||
"responses": [{"questionId": "q1", "value": "Alice"}],
|
||||
}
|
||||
|
||||
with (
|
||||
patch(
|
||||
"backend.data.tally.get_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
return_value=None,
|
||||
),
|
||||
patch("backend.data.tally.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.data.tally.find_submission_by_email",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(submission, SAMPLE_QUESTIONS),
|
||||
),
|
||||
patch(
|
||||
"backend.data.tally.extract_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=asyncio.TimeoutError(),
|
||||
),
|
||||
patch(
|
||||
"backend.data.tally.upsert_business_understanding",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_upsert,
|
||||
):
|
||||
await populate_understanding_from_tally("user-1", "alice@example.com")
|
||||
|
||||
mock_upsert.assert_not_awaited()
|
||||
|
||||
|
||||
# ── _mask_email ───────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_mask_email():
|
||||
assert _mask_email("alice@example.com") == "a***e@example.com"
|
||||
assert _mask_email("ab@example.com") == "a***@example.com"
|
||||
assert _mask_email("a@example.com") == "a***@example.com"
|
||||
|
||||
|
||||
def test_mask_email_invalid():
|
||||
assert _mask_email("no-at-sign") == "***"
|
||||
|
||||
|
||||
# ── Prompt construction (curly-brace safety) ─────────────────────────────────
|
||||
|
||||
|
||||
def test_extraction_prompt_safe_with_curly_braces():
|
||||
"""User content with curly braces must not break prompt construction.
|
||||
|
||||
Previously _EXTRACTION_PROMPT.format(submission_text=...) would raise
|
||||
KeyError/ValueError if the user text contained { or }.
|
||||
"""
|
||||
text_with_braces = "Q: What tools do you use?\nA: We use {Slack} and {{Jira}}"
|
||||
# This must not raise — the old .format() call would fail here
|
||||
prompt = f"{_EXTRACTION_PROMPT}{text_with_braces}{_EXTRACTION_SUFFIX}"
|
||||
assert text_with_braces in prompt
|
||||
assert prompt.startswith("You are a business analyst.")
|
||||
assert prompt.endswith("Return ONLY valid JSON.")
|
||||
|
||||
|
||||
def test_extraction_prompt_no_format_placeholders():
|
||||
"""_EXTRACTION_PROMPT must not contain Python format placeholders."""
|
||||
assert "{submission_text}" not in _EXTRACTION_PROMPT
|
||||
# Ensure no stray single-brace placeholders
|
||||
# (double braces {{ are fine — they're literal in format strings)
|
||||
import re
|
||||
|
||||
single_braces = re.findall(r"(?<!\{)\{[^{].*?\}(?!\})", _EXTRACTION_PROMPT)
|
||||
assert single_braces == [], f"Found format placeholders: {single_braces}"
|
||||
|
||||
|
||||
# ── extract_business_understanding ────────────────────────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_business_understanding_success():
|
||||
"""Happy path: LLM returns valid JSON that maps to BusinessUnderstandingInput."""
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message.content = json.dumps(
|
||||
{
|
||||
"user_name": "Alice",
|
||||
"business_name": "Acme Corp",
|
||||
"industry": "Technology",
|
||||
"pain_points": ["manual reporting"],
|
||||
}
|
||||
)
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.chat.completions.create.return_value = mock_response
|
||||
|
||||
with patch("backend.data.tally.AsyncOpenAI", return_value=mock_client):
|
||||
result = await extract_business_understanding("Q: Name?\nA: Alice")
|
||||
|
||||
assert result.user_name == "Alice"
|
||||
assert result.business_name == "Acme Corp"
|
||||
assert result.industry == "Technology"
|
||||
assert result.pain_points == ["manual reporting"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_business_understanding_filters_nulls():
|
||||
"""Null values from LLM should be excluded from the result."""
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message.content = json.dumps(
|
||||
{"user_name": "Alice", "business_name": None, "industry": None}
|
||||
)
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.chat.completions.create.return_value = mock_response
|
||||
|
||||
with patch("backend.data.tally.AsyncOpenAI", return_value=mock_client):
|
||||
result = await extract_business_understanding("Q: Name?\nA: Alice")
|
||||
|
||||
assert result.user_name == "Alice"
|
||||
assert result.business_name is None
|
||||
assert result.industry is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_business_understanding_invalid_json():
|
||||
"""Invalid JSON from LLM should raise JSONDecodeError."""
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message.content = "not valid json {"
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.chat.completions.create.return_value = mock_response
|
||||
|
||||
with (
|
||||
patch("backend.data.tally.AsyncOpenAI", return_value=mock_client),
|
||||
pytest.raises(json.JSONDecodeError),
|
||||
):
|
||||
await extract_business_understanding("Q: Name?\nA: Alice")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extract_business_understanding_timeout():
|
||||
"""LLM timeout should propagate as asyncio.TimeoutError."""
|
||||
mock_client = AsyncMock()
|
||||
mock_client.chat.completions.create.side_effect = asyncio.TimeoutError()
|
||||
|
||||
with (
|
||||
patch("backend.data.tally.AsyncOpenAI", return_value=mock_client),
|
||||
patch("backend.data.tally._LLM_TIMEOUT", 0.001),
|
||||
pytest.raises(asyncio.TimeoutError),
|
||||
):
|
||||
await extract_business_understanding("Q: Name?\nA: Alice")
|
||||
|
||||
|
||||
# ── _refresh_cache ───────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_refresh_cache_full_fetch():
|
||||
"""First fetch (no last_fetch in Redis) should do a full fetch and store in Redis."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.tally_api_key = "test-key"
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.get.return_value = None # No last_fetch, no cached index
|
||||
|
||||
questions = SAMPLE_QUESTIONS
|
||||
submissions = SAMPLE_SUBMISSIONS
|
||||
|
||||
with (
|
||||
patch("backend.data.tally.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.data.tally.get_redis_async",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_redis,
|
||||
),
|
||||
patch(
|
||||
"backend.data.tally._fetch_all_submissions",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(questions, submissions),
|
||||
) as mock_fetch,
|
||||
):
|
||||
index, returned_questions = await _refresh_cache("form123")
|
||||
|
||||
mock_fetch.assert_awaited_once()
|
||||
assert "alice@example.com" in index
|
||||
assert "bob@example.com" in index
|
||||
assert returned_questions == questions
|
||||
# Verify Redis setex was called for index, questions, and last_fetch
|
||||
assert mock_redis.setex.await_count == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_refresh_cache_incremental_fetch():
|
||||
"""When last_fetch and index both exist, should do incremental fetch and merge."""
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.secrets.tally_api_key = "test-key"
|
||||
|
||||
existing_index = {
|
||||
"old@example.com": {"responses": [], "submitted_at": "2025-01-01"}
|
||||
}
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
|
||||
def mock_get(key):
|
||||
if "last_fetch" in key:
|
||||
return "2025-01-14T00:00:00Z"
|
||||
if "email_index" in key:
|
||||
return json.dumps(existing_index)
|
||||
if "questions" in key:
|
||||
return json.dumps(SAMPLE_QUESTIONS)
|
||||
return None
|
||||
|
||||
mock_redis.get.side_effect = mock_get
|
||||
|
||||
new_submissions = [SAMPLE_SUBMISSIONS[0]] # Just Alice
|
||||
|
||||
with (
|
||||
patch("backend.data.tally.Settings", return_value=mock_settings),
|
||||
patch(
|
||||
"backend.data.tally.get_redis_async",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_redis,
|
||||
),
|
||||
patch(
|
||||
"backend.data.tally._fetch_all_submissions",
|
||||
new_callable=AsyncMock,
|
||||
return_value=(SAMPLE_QUESTIONS, new_submissions),
|
||||
),
|
||||
):
|
||||
index, _ = await _refresh_cache("form123")
|
||||
|
||||
# Should contain both old and new entries
|
||||
assert "old@example.com" in index
|
||||
assert "alice@example.com" in index
|
||||
|
||||
|
||||
# ── _make_tally_client ───────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_make_tally_client_returns_configured_client():
|
||||
"""_make_tally_client should create a Requests client with auth headers."""
|
||||
client = _make_tally_client("test-api-key")
|
||||
assert client.extra_headers is not None
|
||||
assert client.extra_headers.get("Authorization") == "Bearer test-api-key"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_tally_page_uses_provided_client():
|
||||
"""_fetch_tally_page should use the passed client, not create its own."""
|
||||
from backend.data.tally import _fetch_tally_page
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {"submissions": [], "questions": []}
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.get.return_value = mock_response
|
||||
|
||||
result = await _fetch_tally_page(mock_client, "form123", page=1)
|
||||
|
||||
mock_client.get.assert_awaited_once()
|
||||
call_url = mock_client.get.call_args[0][0]
|
||||
assert "form123" in call_url
|
||||
assert "page=1" in call_url
|
||||
assert result == {"submissions": [], "questions": []}
|
||||
@@ -327,11 +327,16 @@ async def get_workspace_total_size(workspace_id: str) -> int:
|
||||
"""
|
||||
Get the total size of all files in a workspace.
|
||||
|
||||
Queries Prisma directly (skipping Pydantic model conversion) and only
|
||||
fetches the ``sizeBytes`` column to minimise data transfer.
|
||||
|
||||
Args:
|
||||
workspace_id: The workspace ID
|
||||
|
||||
Returns:
|
||||
Total size in bytes
|
||||
"""
|
||||
files = await list_workspace_files(workspace_id)
|
||||
return sum(file.size_bytes for file in files)
|
||||
files = await UserWorkspaceFile.prisma().find_many(
|
||||
where={"workspaceId": workspace_id, "isDeleted": False},
|
||||
)
|
||||
return sum(f.sizeBytes for f in files)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Redis-based distributed locking for cluster coordination."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
@@ -7,6 +8,7 @@ from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis import Redis
|
||||
from redis.asyncio import Redis as AsyncRedis
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -126,3 +128,124 @@ class ClusterLock:
|
||||
|
||||
with self._refresh_lock:
|
||||
self._last_refresh = 0.0
|
||||
|
||||
|
||||
class AsyncClusterLock:
|
||||
"""Async Redis-based distributed lock for preventing duplicate execution."""
|
||||
|
||||
def __init__(
|
||||
self, redis: "AsyncRedis", key: str, owner_id: str, timeout: int = 300
|
||||
):
|
||||
self.redis = redis
|
||||
self.key = key
|
||||
self.owner_id = owner_id
|
||||
self.timeout = timeout
|
||||
self._last_refresh = 0.0
|
||||
self._refresh_lock = asyncio.Lock()
|
||||
|
||||
async def try_acquire(self) -> str | None:
|
||||
"""Try to acquire the lock.
|
||||
|
||||
Returns:
|
||||
- owner_id (self.owner_id) if successfully acquired
|
||||
- different owner_id if someone else holds the lock
|
||||
- None if Redis is unavailable or other error
|
||||
"""
|
||||
try:
|
||||
success = await self.redis.set(
|
||||
self.key, self.owner_id, nx=True, ex=self.timeout
|
||||
)
|
||||
if success:
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = time.time()
|
||||
return self.owner_id # Successfully acquired
|
||||
|
||||
# Failed to acquire, get current owner
|
||||
current_value = await self.redis.get(self.key)
|
||||
if current_value:
|
||||
current_owner = (
|
||||
current_value.decode("utf-8")
|
||||
if isinstance(current_value, bytes)
|
||||
else str(current_value)
|
||||
)
|
||||
return current_owner
|
||||
|
||||
# Key doesn't exist but we failed to set it - race condition or Redis issue
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"AsyncClusterLock.try_acquire failed for key {self.key}: {e}")
|
||||
return None
|
||||
|
||||
async def refresh(self) -> bool:
|
||||
"""Refresh lock TTL if we still own it.
|
||||
|
||||
Rate limited to at most once every timeout/10 seconds (minimum 1 second).
|
||||
During rate limiting, still verifies lock existence but skips TTL extension.
|
||||
Setting _last_refresh to 0 bypasses rate limiting for testing.
|
||||
|
||||
Async-safe: uses asyncio.Lock to protect _last_refresh access.
|
||||
"""
|
||||
# Calculate refresh interval: max(timeout // 10, 1)
|
||||
refresh_interval = max(self.timeout // 10, 1)
|
||||
current_time = time.time()
|
||||
|
||||
# Check if we're within the rate limit period (async-safe read)
|
||||
# _last_refresh == 0 forces a refresh (bypasses rate limiting for testing)
|
||||
async with self._refresh_lock:
|
||||
last_refresh = self._last_refresh
|
||||
is_rate_limited = (
|
||||
last_refresh > 0 and (current_time - last_refresh) < refresh_interval
|
||||
)
|
||||
|
||||
try:
|
||||
# Always verify lock existence, even during rate limiting
|
||||
current_value = await self.redis.get(self.key)
|
||||
if not current_value:
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = 0
|
||||
return False
|
||||
|
||||
stored_owner = (
|
||||
current_value.decode("utf-8")
|
||||
if isinstance(current_value, bytes)
|
||||
else str(current_value)
|
||||
)
|
||||
if stored_owner != self.owner_id:
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = 0
|
||||
return False
|
||||
|
||||
# If rate limited, return True but don't update TTL or timestamp
|
||||
if is_rate_limited:
|
||||
return True
|
||||
|
||||
# Perform actual refresh
|
||||
if await self.redis.expire(self.key, self.timeout):
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = current_time
|
||||
return True
|
||||
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = 0
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"AsyncClusterLock.refresh failed for key {self.key}: {e}")
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = 0
|
||||
return False
|
||||
|
||||
async def release(self):
|
||||
"""Release the lock."""
|
||||
async with self._refresh_lock:
|
||||
if self._last_refresh == 0:
|
||||
return
|
||||
|
||||
try:
|
||||
await self.redis.delete(self.key)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async with self._refresh_lock:
|
||||
self._last_refresh = 0.0
|
||||
|
||||
@@ -47,6 +47,7 @@ class ProviderName(str, Enum):
|
||||
SLANT3D = "slant3d"
|
||||
SMARTLEAD = "smartlead"
|
||||
SMTP = "smtp"
|
||||
TELEGRAM = "telegram"
|
||||
TWITTER = "twitter"
|
||||
TODOIST = "todoist"
|
||||
UNREAL_SPEECH = "unreal_speech"
|
||||
|
||||
@@ -15,6 +15,7 @@ def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]
|
||||
from .compass import CompassWebhookManager
|
||||
from .github import GithubWebhooksManager
|
||||
from .slant3d import Slant3DWebhooksManager
|
||||
from .telegram import TelegramWebhooksManager
|
||||
|
||||
webhook_managers.update(
|
||||
{
|
||||
@@ -23,6 +24,7 @@ def load_webhook_managers() -> dict["ProviderName", type["BaseWebhooksManager"]]
|
||||
CompassWebhookManager,
|
||||
GithubWebhooksManager,
|
||||
Slant3DWebhooksManager,
|
||||
TelegramWebhooksManager,
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
@@ -0,0 +1,242 @@
|
||||
"""
|
||||
Telegram Bot API Webhooks Manager.
|
||||
|
||||
Handles webhook registration and validation for Telegram bots.
|
||||
"""
|
||||
|
||||
import hmac
|
||||
import logging
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
from strenum import StrEnum
|
||||
|
||||
from backend.data import integrations
|
||||
from backend.data.model import APIKeyCredentials, Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.exceptions import MissingConfigError
|
||||
from backend.util.request import Requests
|
||||
from backend.util.settings import Config
|
||||
|
||||
from ._base import BaseWebhooksManager
|
||||
from .utils import webhook_ingress_url
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TelegramWebhookType(StrEnum):
|
||||
BOT = "bot"
|
||||
|
||||
|
||||
class TelegramWebhooksManager(BaseWebhooksManager):
|
||||
"""
|
||||
Manages Telegram bot webhooks.
|
||||
|
||||
Telegram webhooks are registered via the setWebhook API method.
|
||||
Incoming requests are validated using the secret_token header.
|
||||
"""
|
||||
|
||||
PROVIDER_NAME = ProviderName.TELEGRAM
|
||||
WebhookType = TelegramWebhookType
|
||||
|
||||
TELEGRAM_API_BASE = "https://api.telegram.org"
|
||||
|
||||
async def get_suitable_auto_webhook(
|
||||
self,
|
||||
user_id: str,
|
||||
credentials: Credentials,
|
||||
webhook_type: TelegramWebhookType,
|
||||
resource: str,
|
||||
events: list[str],
|
||||
) -> integrations.Webhook:
|
||||
"""
|
||||
Telegram only supports one webhook per bot. Instead of creating a new
|
||||
webhook object when events change (which causes the old one to be pruned
|
||||
and deregistered — removing the ONLY webhook for the bot), we find the
|
||||
existing webhook and update its events in place.
|
||||
"""
|
||||
app_config = Config()
|
||||
if not app_config.platform_base_url:
|
||||
raise MissingConfigError(
|
||||
"PLATFORM_BASE_URL must be set to use Webhook functionality"
|
||||
)
|
||||
|
||||
# Exact match — no re-registration needed
|
||||
if webhook := await integrations.find_webhook_by_credentials_and_props(
|
||||
user_id=user_id,
|
||||
credentials_id=credentials.id,
|
||||
webhook_type=webhook_type,
|
||||
resource=resource,
|
||||
events=events,
|
||||
):
|
||||
return webhook
|
||||
|
||||
# Find any existing webhook for the same bot, regardless of events
|
||||
if existing := await integrations.find_webhook_by_credentials_and_props(
|
||||
user_id=user_id,
|
||||
credentials_id=credentials.id,
|
||||
webhook_type=webhook_type,
|
||||
resource=resource,
|
||||
events=None, # Ignore events for this lookup
|
||||
):
|
||||
# Re-register with Telegram using the same URL but new allowed_updates
|
||||
ingress_url = webhook_ingress_url(self.PROVIDER_NAME, existing.id)
|
||||
_, config = await self._register_webhook(
|
||||
credentials,
|
||||
webhook_type,
|
||||
resource,
|
||||
events,
|
||||
ingress_url,
|
||||
existing.secret,
|
||||
)
|
||||
return await integrations.update_webhook(
|
||||
existing.id, events=events, config=config
|
||||
)
|
||||
|
||||
# No existing webhook at all — create a new one
|
||||
return await self._create_webhook(
|
||||
user_id=user_id,
|
||||
webhook_type=webhook_type,
|
||||
events=events,
|
||||
resource=resource,
|
||||
credentials=credentials,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def validate_payload(
|
||||
cls,
|
||||
webhook: integrations.Webhook,
|
||||
request: Request,
|
||||
credentials: Credentials | None,
|
||||
) -> tuple[dict, str]:
|
||||
"""
|
||||
Validates incoming Telegram webhook request.
|
||||
|
||||
Telegram sends X-Telegram-Bot-Api-Secret-Token header when secret_token
|
||||
was set in setWebhook call.
|
||||
|
||||
Returns:
|
||||
tuple: (payload dict, event_type string)
|
||||
"""
|
||||
# Verify secret token header
|
||||
secret_header = request.headers.get("X-Telegram-Bot-Api-Secret-Token")
|
||||
if not secret_header or not hmac.compare_digest(secret_header, webhook.secret):
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Invalid or missing X-Telegram-Bot-Api-Secret-Token",
|
||||
)
|
||||
|
||||
payload = await request.json()
|
||||
|
||||
# Determine event type based on update content
|
||||
if "message" in payload:
|
||||
message = payload["message"]
|
||||
if "text" in message:
|
||||
event_type = "message.text"
|
||||
elif "photo" in message:
|
||||
event_type = "message.photo"
|
||||
elif "voice" in message:
|
||||
event_type = "message.voice"
|
||||
elif "audio" in message:
|
||||
event_type = "message.audio"
|
||||
elif "document" in message:
|
||||
event_type = "message.document"
|
||||
elif "video" in message:
|
||||
event_type = "message.video"
|
||||
else:
|
||||
logger.warning(
|
||||
"Unknown Telegram webhook payload type; "
|
||||
f"message.keys() = {message.keys()}"
|
||||
)
|
||||
event_type = "message.other"
|
||||
elif "edited_message" in payload:
|
||||
event_type = "message.edited_message"
|
||||
elif "message_reaction" in payload:
|
||||
event_type = "message_reaction"
|
||||
else:
|
||||
event_type = "unknown"
|
||||
|
||||
return payload, event_type
|
||||
|
||||
async def _register_webhook(
|
||||
self,
|
||||
credentials: Credentials,
|
||||
webhook_type: TelegramWebhookType,
|
||||
resource: str,
|
||||
events: list[str],
|
||||
ingress_url: str,
|
||||
secret: str,
|
||||
) -> tuple[str, dict]:
|
||||
"""
|
||||
Register webhook with Telegram using setWebhook API.
|
||||
|
||||
Args:
|
||||
credentials: Bot token credentials
|
||||
webhook_type: Type of webhook (always BOT for Telegram)
|
||||
resource: Resource identifier (unused for Telegram, bots are global)
|
||||
events: Events to subscribe to
|
||||
ingress_url: URL to receive webhook payloads
|
||||
secret: Secret token for request validation
|
||||
|
||||
Returns:
|
||||
tuple: (provider_webhook_id, config dict)
|
||||
"""
|
||||
if not isinstance(credentials, APIKeyCredentials):
|
||||
raise ValueError("API key (bot token) is required for Telegram webhooks")
|
||||
|
||||
token = credentials.api_key.get_secret_value()
|
||||
url = f"{self.TELEGRAM_API_BASE}/bot{token}/setWebhook"
|
||||
|
||||
# Map event filter to Telegram allowed_updates
|
||||
if events:
|
||||
telegram_updates: set[str] = set()
|
||||
for event in events:
|
||||
telegram_updates.add(event.split(".")[0])
|
||||
# "message.edited_message" requires the "edited_message" update type
|
||||
if "edited_message" in event:
|
||||
telegram_updates.add("edited_message")
|
||||
sorted_updates = sorted(telegram_updates)
|
||||
else:
|
||||
sorted_updates = ["message", "message_reaction"]
|
||||
|
||||
webhook_data = {
|
||||
"url": ingress_url,
|
||||
"secret_token": secret,
|
||||
"allowed_updates": sorted_updates,
|
||||
}
|
||||
|
||||
response = await Requests().post(url, json=webhook_data)
|
||||
result = response.json()
|
||||
|
||||
if not result.get("ok"):
|
||||
error_desc = result.get("description", "Unknown error")
|
||||
raise ValueError(f"Failed to set Telegram webhook: {error_desc}")
|
||||
|
||||
# Telegram doesn't return a webhook ID, use empty string
|
||||
config = {
|
||||
"url": ingress_url,
|
||||
"allowed_updates": webhook_data["allowed_updates"],
|
||||
}
|
||||
|
||||
return "", config
|
||||
|
||||
async def _deregister_webhook(
|
||||
self, webhook: integrations.Webhook, credentials: Credentials
|
||||
) -> None:
|
||||
"""
|
||||
Deregister webhook by calling setWebhook with empty URL.
|
||||
|
||||
This removes the webhook from Telegram's servers.
|
||||
"""
|
||||
if not isinstance(credentials, APIKeyCredentials):
|
||||
raise ValueError("API key (bot token) is required for Telegram webhooks")
|
||||
|
||||
token = credentials.api_key.get_secret_value()
|
||||
url = f"{self.TELEGRAM_API_BASE}/bot{token}/setWebhook"
|
||||
|
||||
# Setting empty URL removes the webhook
|
||||
response = await Requests().post(url, json={"url": ""})
|
||||
result = response.json()
|
||||
|
||||
if not result.get("ok"):
|
||||
error_desc = result.get("description", "Unknown error")
|
||||
logger.warning(f"Failed to deregister Telegram webhook: {error_desc}")
|
||||
@@ -372,7 +372,7 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="The port for the Agent Generator service",
|
||||
)
|
||||
agentgenerator_timeout: int = Field(
|
||||
default=600,
|
||||
default=1800,
|
||||
description="The timeout in seconds for Agent Generator service requests (includes retries for rate limits)",
|
||||
)
|
||||
agentgenerator_use_dummy: bool = Field(
|
||||
@@ -413,6 +413,13 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="Maximum file size in MB for workspace files (1-1024 MB)",
|
||||
)
|
||||
|
||||
max_workspace_storage_mb: int = Field(
|
||||
default=500,
|
||||
ge=1,
|
||||
le=10240,
|
||||
description="Maximum total workspace storage per user in MB.",
|
||||
)
|
||||
|
||||
# AutoMod configuration
|
||||
automod_enabled: bool = Field(
|
||||
default=False,
|
||||
@@ -691,6 +698,15 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
|
||||
screenshotone_api_key: str = Field(default="", description="ScreenshotOne API Key")
|
||||
|
||||
tally_api_key: str = Field(
|
||||
default="",
|
||||
description="Tally API key for form submission lookup on signup",
|
||||
)
|
||||
tally_form_id: str = Field(
|
||||
default="npGe0q",
|
||||
description="Tally form ID for signup business understanding form",
|
||||
)
|
||||
|
||||
apollo_api_key: str = Field(default="", description="Apollo API Key")
|
||||
smartlead_api_key: str = Field(default="", description="SmartLead API Key")
|
||||
zerobounce_api_key: str = Field(default="", description="ZeroBounce API Key")
|
||||
@@ -714,6 +730,9 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
langfuse_host: str = Field(
|
||||
default="https://cloud.langfuse.com", description="Langfuse host URL"
|
||||
)
|
||||
langfuse_tracing_environment: str = Field(
|
||||
default="local", description="Tracing environment tag (local/dev/production)"
|
||||
)
|
||||
|
||||
# PostHog analytics
|
||||
posthog_api_key: str = Field(default="", description="PostHog API key")
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
-- AlterTable
|
||||
ALTER TABLE "LibraryAgent" ADD COLUMN "folderId" TEXT;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "LibraryFolder" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"userId" TEXT NOT NULL,
|
||||
"name" TEXT NOT NULL,
|
||||
"icon" TEXT,
|
||||
"color" TEXT,
|
||||
"parentId" TEXT,
|
||||
"isDeleted" BOOLEAN NOT NULL DEFAULT false,
|
||||
|
||||
CONSTRAINT "LibraryFolder_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "LibraryFolder_userId_parentId_name_key" ON "LibraryFolder"("userId", "parentId", "name");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LibraryAgent_folderId_idx" ON "LibraryAgent"("folderId");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LibraryAgent" ADD CONSTRAINT "LibraryAgent_folderId_fkey" FOREIGN KEY ("folderId") REFERENCES "LibraryFolder"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LibraryFolder" ADD CONSTRAINT "LibraryFolder_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "LibraryFolder" ADD CONSTRAINT "LibraryFolder_parentId_fkey" FOREIGN KEY ("parentId") REFERENCES "LibraryFolder"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -0,0 +1,97 @@
|
||||
-- This migration creates a materialized view for suggested blocks based on execution counts
|
||||
-- The view aggregates execution counts per block for the last 14 days
|
||||
--
|
||||
-- IMPORTANT: For production environments, pg_cron is REQUIRED for automatic refresh
|
||||
-- Prerequisites for production:
|
||||
-- 1. pg_cron extension must be installed: CREATE EXTENSION pg_cron;
|
||||
-- 2. pg_cron must be configured in postgresql.conf:
|
||||
-- shared_preload_libraries = 'pg_cron'
|
||||
-- cron.database_name = 'your_database_name'
|
||||
--
|
||||
-- For development environments without pg_cron:
|
||||
-- The migration will succeed but you must manually refresh views with:
|
||||
-- SET search_path TO platform;
|
||||
-- SELECT refresh_suggested_blocks_view();
|
||||
|
||||
-- Check if pg_cron extension is installed
|
||||
DO $$
|
||||
DECLARE
|
||||
has_pg_cron BOOLEAN;
|
||||
BEGIN
|
||||
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
|
||||
|
||||
IF NOT has_pg_cron THEN
|
||||
RAISE WARNING 'pg_cron is not installed. Materialized view will be created but will NOT refresh automatically. For production, install pg_cron. For development, manually refresh with: SELECT refresh_suggested_blocks_view();';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Create materialized view for suggested blocks based on execution counts in last 14 days
|
||||
-- The 14-day threshold is hardcoded to ensure consistent behavior
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS "mv_suggested_blocks" AS
|
||||
SELECT
|
||||
agent_node."agentBlockId" AS block_id,
|
||||
COUNT(execution.id) AS execution_count
|
||||
FROM "AgentNodeExecution" execution
|
||||
JOIN "AgentNode" agent_node ON execution."agentNodeId" = agent_node.id
|
||||
WHERE execution."endedTime" >= (NOW() - INTERVAL '14 days')
|
||||
GROUP BY agent_node."agentBlockId"
|
||||
ORDER BY execution_count DESC;
|
||||
|
||||
-- Create unique index for concurrent refresh support
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS "idx_mv_suggested_blocks_block_id" ON "mv_suggested_blocks"("block_id");
|
||||
|
||||
-- Create refresh function
|
||||
CREATE OR REPLACE FUNCTION refresh_suggested_blocks_view()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
target_schema text := current_schema();
|
||||
BEGIN
|
||||
-- Use CONCURRENTLY for better performance during refresh
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY "mv_suggested_blocks";
|
||||
RAISE NOTICE 'Suggested blocks materialized view refreshed in schema % at %', target_schema, NOW();
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Fallback to non-concurrent refresh if concurrent fails
|
||||
REFRESH MATERIALIZED VIEW "mv_suggested_blocks";
|
||||
RAISE NOTICE 'Suggested blocks materialized view refreshed (non-concurrent) in schema % at %. Concurrent refresh failed due to: %', target_schema, NOW(), SQLERRM;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Initial refresh of the materialized view
|
||||
SELECT refresh_suggested_blocks_view();
|
||||
|
||||
-- Schedule automatic refresh every hour (only if pg_cron is available)
|
||||
DO $$
|
||||
DECLARE
|
||||
has_pg_cron BOOLEAN;
|
||||
current_schema_name text := current_schema();
|
||||
job_name text;
|
||||
BEGIN
|
||||
-- Check if pg_cron extension exists
|
||||
SELECT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_cron') INTO has_pg_cron;
|
||||
|
||||
IF has_pg_cron THEN
|
||||
job_name := format('refresh-suggested-blocks_%s', current_schema_name);
|
||||
|
||||
-- Try to unschedule existing job (ignore errors if it doesn't exist)
|
||||
BEGIN
|
||||
PERFORM cron.unschedule(job_name);
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
NULL;
|
||||
END;
|
||||
|
||||
-- Schedule the new job to run every hour
|
||||
PERFORM cron.schedule(
|
||||
job_name,
|
||||
'0 * * * *', -- Every hour at minute 0
|
||||
format('SET search_path TO %I; SELECT refresh_suggested_blocks_view();', current_schema_name)
|
||||
);
|
||||
RAISE NOTICE 'Scheduled job %; runs every hour for schema %', job_name, current_schema_name;
|
||||
ELSE
|
||||
RAISE WARNING 'Automatic refresh NOT configured - pg_cron is not available. Manually refresh with: SELECT refresh_suggested_blocks_view();';
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
@@ -0,0 +1,7 @@
|
||||
-- This migration adds more than one value to an enum.
|
||||
-- With PostgreSQL versions 11 and earlier, this is not possible
|
||||
-- in a single migration. This can be worked around by creating
|
||||
-- multiple migrations, each migration adding only one value to
|
||||
-- the enum.
|
||||
ALTER TYPE "APIKeyPermission" ADD VALUE 'WRITE_GRAPH';
|
||||
ALTER TYPE "APIKeyPermission" ADD VALUE 'WRITE_LIBRARY';
|
||||
312
autogpt_platform/backend/poetry.lock
generated
312
autogpt_platform/backend/poetry.lock
generated
@@ -1610,6 +1610,101 @@ mccabe = ">=0.7.0,<0.8.0"
|
||||
pycodestyle = ">=2.14.0,<2.15.0"
|
||||
pyflakes = ">=3.4.0,<3.5.0"
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.61.1"
|
||||
description = "Tools to manipulate font files"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c7db70d57e5e1089a274cbb2b1fd635c9a24de809a231b154965d415d6c6d24"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5fe9fd43882620017add5eabb781ebfbc6998ee49b35bd7f8f79af1f9f99a958"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8db08051fc9e7d8bc622f2112511b8107d8f27cd89e2f64ec45e9825e8288da"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a76d4cb80f41ba94a6691264be76435e5f72f2cb3cab0b092a6212855f71c2f6"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a13fc8aeb24bad755eea8f7f9d409438eb94e82cf86b08fe77a03fbc8f6a96b1"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b846a1fcf8beadeb9ea4f44ec5bdde393e2f1569e17d700bfc49cd69bde75881"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-win32.whl", hash = "sha256:78a7d3ab09dc47ac1a363a493e6112d8cabed7ba7caad5f54dbe2f08676d1b47"},
|
||||
{file = "fonttools-4.61.1-cp310-cp310-win_amd64.whl", hash = "sha256:eff1ac3cc66c2ac7cda1e64b4e2f3ffef474b7335f92fc3833fc632d595fcee6"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c6604b735bb12fef8e0efd5578c9fb5d3d8532d5001ea13a19cddf295673ee09"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5ce02f38a754f207f2f06557523cd39a06438ba3aafc0639c477ac409fc64e37"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77efb033d8d7ff233385f30c62c7c79271c8885d5c9657d967ede124671bbdfb"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:75c1a6dfac6abd407634420c93864a1e274ebc1c7531346d9254c0d8f6ca00f9"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0de30bfe7745c0d1ffa2b0b7048fb7123ad0d71107e10ee090fa0b16b9452e87"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58b0ee0ab5b1fc9921eccfe11d1435added19d6494dde14e323f25ad2bc30c56"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-win32.whl", hash = "sha256:f79b168428351d11e10c5aeb61a74e1851ec221081299f4cf56036a95431c43a"},
|
||||
{file = "fonttools-4.61.1-cp311-cp311-win_amd64.whl", hash = "sha256:fe2efccb324948a11dd09d22136fe2ac8a97d6c1347cf0b58a911dcd529f66b7"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f3cb4a569029b9f291f88aafc927dd53683757e640081ca8c412781ea144565e"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41a7170d042e8c0024703ed13b71893519a1a6d6e18e933e3ec7507a2c26a4b2"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10d88e55330e092940584774ee5e8a6971b01fc2f4d3466a1d6c158230880796"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15acc09befd16a0fb8a8f62bc147e1a82817542d72184acca9ce6e0aeda9fa6d"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6bcdf33aec38d16508ce61fd81838f24c83c90a1d1b8c68982857038673d6b8"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5fade934607a523614726119164ff621e8c30e8fa1ffffbbd358662056ba69f0"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-win32.whl", hash = "sha256:75da8f28eff26defba42c52986de97b22106cb8f26515b7c22443ebc9c2d3261"},
|
||||
{file = "fonttools-4.61.1-cp312-cp312-win_amd64.whl", hash = "sha256:497c31ce314219888c0e2fce5ad9178ca83fe5230b01a5006726cdf3ac9f24d9"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c56c488ab471628ff3bfa80964372fc13504ece601e0d97a78ee74126b2045c"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dc492779501fa723b04d0ab1f5be046797fee17d27700476edc7ee9ae535a61e"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:64102ca87e84261419c3747a0d20f396eb024bdbeb04c2bfb37e2891f5fadcb5"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c1b526c8d3f615a7b1867f38a9410849c8f4aef078535742198e942fba0e9bd"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:41ed4b5ec103bd306bb68f81dc166e77409e5209443e5773cb4ed837bcc9b0d3"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b501c862d4901792adaec7c25b1ecc749e2662543f68bb194c42ba18d6eec98d"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-win32.whl", hash = "sha256:4d7092bb38c53bbc78e9255a59158b150bcdc115a1e3b3ce0b5f267dc35dd63c"},
|
||||
{file = "fonttools-4.61.1-cp313-cp313-win_amd64.whl", hash = "sha256:21e7c8d76f62ab13c9472ccf74515ca5b9a761d1bde3265152a6dc58700d895b"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:fff4f534200a04b4a36e7ae3cb74493afe807b517a09e99cb4faa89a34ed6ecd"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:d9203500f7c63545b4ce3799319fe4d9feb1a1b89b28d3cb5abd11b9dd64147e"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa646ecec9528bef693415c79a86e733c70a4965dd938e9a226b0fc64c9d2e6c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f35ad7805edba3aac1a3710d104592df59f4b957e30108ae0ba6c10b11dd75"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b931ae8f62db78861b0ff1ac017851764602288575d65b8e8ff1963fed419063"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b148b56f5de675ee16d45e769e69f87623a4944f7443850bf9a9376e628a89d2"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-win32.whl", hash = "sha256:9b666a475a65f4e839d3d10473fad6d47e0a9db14a2f4a224029c5bfde58ad2c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314-win_amd64.whl", hash = "sha256:4f5686e1fe5fce75d82d93c47a438a25bf0d1319d2843a926f741140b2b16e0c"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:e76ce097e3c57c4bcb67c5aa24a0ecdbd9f74ea9219997a707a4061fbe2707aa"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9cfef3ab326780c04d6646f68d4b4742aae222e8b8ea1d627c74e38afcbc9d91"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a75c301f96db737e1c5ed5fd7d77d9c34466de16095a266509e13da09751bd19"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:91669ccac46bbc1d09e9273546181919064e8df73488ea087dcac3e2968df9ba"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c33ab3ca9d3ccd581d58e989d67554e42d8d4ded94ab3ade3508455fe70e65f7"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:664c5a68ec406f6b1547946683008576ef8b38275608e1cee6c061828171c118"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-win32.whl", hash = "sha256:aed04cabe26f30c1647ef0e8fbb207516fd40fe9472e9439695f5c6998e60ac5"},
|
||||
{file = "fonttools-4.61.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2180f14c141d2f0f3da43f3a81bc8aa4684860f6b0e6f9e165a4831f24e6a23b"},
|
||||
{file = "fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371"},
|
||||
{file = "fonttools-4.61.1.tar.gz", hash = "sha256:6675329885c44657f826ef01d9e4fb33b9158e9d93c537d84ad8399539bc6f69"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.45.0)", "unicodedata2 (>=17.0.0) ; python_version <= \"3.14\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"]
|
||||
graphite = ["lz4 (>=1.7.4.2)"]
|
||||
interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""]
|
||||
lxml = ["lxml (>=4.0)"]
|
||||
pathops = ["skia-pathops (>=0.5.0)"]
|
||||
plot = ["matplotlib"]
|
||||
repacker = ["uharfbuzz (>=0.45.0)"]
|
||||
symfont = ["sympy"]
|
||||
type1 = ["xattr ; sys_platform == \"darwin\""]
|
||||
unicode = ["unicodedata2 (>=17.0.0) ; python_version <= \"3.14\""]
|
||||
woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"]
|
||||
|
||||
[[package]]
|
||||
name = "fpdf2"
|
||||
version = "2.8.6"
|
||||
description = "Simple & fast PDF generation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fpdf2-2.8.6-py3-none-any.whl", hash = "sha256:464658b896c6b0fcbf883abb316b8f0a52d582eb959d71822ba254d6c790bfdd"},
|
||||
{file = "fpdf2-2.8.6.tar.gz", hash = "sha256:5132f26bbeee69a7ca6a292e4da1eb3241147b5aea9348b35e780ecd02bf5fc2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
defusedxml = "*"
|
||||
fonttools = ">=4.34.0"
|
||||
Pillow = ">=8.3.2,<9.2.dev0 || >=9.3.dev0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["bandit", "black", "mypy", "pre-commit", "pylint", "pyright", "semgrep", "zizmor"]
|
||||
docs = ["lxml", "mkdocs", "mkdocs-git-revision-date-localized-plugin", "mkdocs-include-markdown-plugin", "mkdocs-macros-plugin", "mkdocs-material", "mkdocs-minify-plugin", "mkdocs-redirects", "mkdocs-with-pdf", "mknotebooks", "pdoc3"]
|
||||
test = ["brotli", "camelot-py[base]", "endesive[full]", "pytest", "pytest-cov", "qrcode", "tabula-py", "typing-extensions (>=4.0) ; python_version < \"3.11\"", "uharfbuzz"]
|
||||
|
||||
[[package]]
|
||||
name = "frozenlist"
|
||||
version = "1.8.0"
|
||||
@@ -3135,6 +3230,39 @@ pydantic = ">=1.10.7,<3.0"
|
||||
requests = ">=2,<3"
|
||||
wrapt = ">=1.14,<2.0"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.7.7"
|
||||
description = "Client library to connect to the LangSmith Observability and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "langsmith-0.7.7-py3-none-any.whl", hash = "sha256:ef3d0aff77917bf3776368e90f387df5ffd7cb7cff11ece0ec4fd227e433b5de"},
|
||||
{file = "langsmith-0.7.7.tar.gz", hash = "sha256:2294d3c4a5a8205ef38880c1c412d85322e6055858ae999ef6641c815995d437"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.23.0,<1"
|
||||
orjson = {version = ">=3.9.14", markers = "platform_python_implementation != \"PyPy\""}
|
||||
packaging = ">=23.2"
|
||||
pydantic = ">=2,<3"
|
||||
requests = ">=2.0.0"
|
||||
requests-toolbelt = ">=1.0.0"
|
||||
uuid-utils = ">=0.12.0,<1.0"
|
||||
xxhash = ">=3.0.0"
|
||||
zstandard = ">=0.23.0"
|
||||
|
||||
[package.extras]
|
||||
claude-agent-sdk = ["claude-agent-sdk (>=0.1.0) ; python_version >= \"3.10\""]
|
||||
google-adk = ["google-adk (>=1.0.0)", "wrapt (>=1.16.0)"]
|
||||
langsmith-pyo3 = ["langsmith-pyo3 (>=0.1.0rc2)"]
|
||||
openai-agents = ["openai-agents (>=0.0.3)"]
|
||||
otel = ["opentelemetry-api (>=1.30.0)", "opentelemetry-exporter-otlp-proto-http (>=1.30.0)", "opentelemetry-sdk (>=1.30.0)"]
|
||||
pytest = ["pytest (>=7.0.0)", "rich (>=13.9.4)", "vcrpy (>=7.0.0)"]
|
||||
sandbox = ["websockets (>=15.0)"]
|
||||
vcr = ["vcrpy (>=7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-eventsource"
|
||||
version = "1.5.1"
|
||||
@@ -7652,6 +7780,38 @@ h2 = ["h2 (>=4,<5)"]
|
||||
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""]
|
||||
|
||||
[[package]]
|
||||
name = "uuid-utils"
|
||||
version = "0.14.1"
|
||||
description = "Fast, drop-in replacement for Python's uuid module, powered by Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:93a3b5dc798a54a1feb693f2d1cb4cf08258c32ff05ae4929b5f0a2ca624a4f0"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ccd65a4b8e83af23eae5e56d88034b2fe7264f465d3e830845f10d1591b81741"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b56b0cacd81583834820588378e432b0696186683b813058b707aedc1e16c4b1"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb3cf14de789097320a3c56bfdfdd51b1225d11d67298afbedee7e84e3837c96"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60e0854a90d67f4b0cc6e54773deb8be618f4c9bad98d3326f081423b5d14fae"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce6743ba194de3910b5feb1a62590cd2587e33a73ab6af8a01b642ceb5055862"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:043fb58fde6cf1620a6c066382f04f87a8e74feb0f95a585e4ed46f5d44af57b"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c915d53f22945e55fe0d3d3b0b87fd965a57f5fd15666fd92d6593a73b1dd297"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:0972488e3f9b449e83f006ead5a0e0a33ad4a13e4462e865b7c286ab7d7566a3"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:1c238812ae0c8ffe77d8d447a32c6dfd058ea4631246b08b5a71df586ff08531"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:bec8f8ef627af86abf8298e7ec50926627e29b34fa907fcfbedb45aaa72bca43"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-win32.whl", hash = "sha256:b54d6aa6252d96bac1fdbc80d26ba71bad9f220b2724d692ad2f2310c22ef523"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-win_amd64.whl", hash = "sha256:fc27638c2ce267a0ce3e06828aff786f91367f093c80625ee21dad0208e0f5ba"},
|
||||
{file = "uuid_utils-0.14.1-cp39-abi3-win_arm64.whl", hash = "sha256:b04cb49b42afbc4ff8dbc60cf054930afc479d6f4dd7f1ec3bbe5dbfdde06b7a"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:b197cd5424cf89fb019ca7f53641d05bfe34b1879614bed111c9c313b5574cd8"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:12c65020ba6cb6abe1d57fcbfc2d0ea0506c67049ee031714057f5caf0f9bc9c"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b5d2ad28063d422ccc2c28d46471d47b61a58de885d35113a8f18cb547e25bf"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da2234387b45fde40b0fedfee64a0ba591caeea9c48c7698ab6e2d85c7991533"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50fffc2827348c1e48972eed3d1c698959e63f9d030aa5dd82ba451113158a62"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1dbe718765f70f5b7f9b7f66b6a937802941b1cc56bcf642ce0274169741e01"},
|
||||
{file = "uuid_utils-0.14.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:258186964039a8e36db10810c1ece879d229b01331e09e9030bc5dcabe231bd2"},
|
||||
{file = "uuid_utils-0.14.1.tar.gz", hash = "sha256:9bfc95f64af80ccf129c604fb6b8ca66c6f256451e32bc4570f760e4309c9b69"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uvicorn"
|
||||
version = "0.40.0"
|
||||
@@ -8197,6 +8357,156 @@ cffi = ">=1.16.0"
|
||||
[package.extras]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "xxhash"
|
||||
version = "3.6.0"
|
||||
description = "Python binding for xxHash"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb"},
|
||||
{file = "xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef"},
|
||||
{file = "xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c"},
|
||||
{file = "xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8"},
|
||||
{file = "xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f"},
|
||||
{file = "xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dac94fad14a3d1c92affb661021e1d5cbcf3876be5f5b4d90730775ccb7ac41"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6965e0e90f1f0e6cb78da568c13d4a348eeb7f40acfd6d43690a666a459458b8"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2ab89a6b80f22214b43d98693c30da66af910c04f9858dd39c8e570749593d7e"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4903530e866b7a9c1eadfd3fa2fbe1b97d3aed4739a80abf506eb9318561c850"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4da8168ae52c01ac64c511d6f4a709479da8b7a4a1d7621ed51652f93747dffa"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:97460eec202017f719e839a0d3551fbc0b2fcc9c6c6ffaa5af85bbd5de432788"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45aae0c9df92e7fa46fbb738737324a563c727990755ec1965a6a339ea10a1df"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d50101e57aad86f4344ca9b32d091a2135a9d0a4396f19133426c88025b09f1"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9085e798c163ce310d91f8aa6b325dda3c2944c93c6ce1edb314030d4167cc65"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:a87f271a33fad0e5bf3be282be55d78df3a45ae457950deb5241998790326f87"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:9e040d3e762f84500961791fa3709ffa4784d4dcd7690afc655c095e02fff05f"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b0359391c3dad6de872fefb0cf5b69d55b0655c55ee78b1bb7a568979b2ce96b"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-win32.whl", hash = "sha256:e4ff728a2894e7f436b9e94c667b0f426b9c74b71f900cf37d5468c6b5da0536"},
|
||||
{file = "xxhash-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:01be0c5b500c5362871fc9cfdf58c69b3e5c4f531a82229ddb9eb1eb14138004"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cc604dc06027dbeb8281aeac5899c35fcfe7c77b25212833709f0bff4ce74d2a"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:277175a73900ad43a8caeb8b99b9604f21fe8d7c842f2f9061a364a7e220ddb7"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cfbc5b91397c8c2972fdac13fb3e4ed2f7f8ccac85cd2c644887557780a9b6e2"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2762bfff264c4e73c0e507274b40634ff465e025f0eaf050897e88ec8367575d"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2f171a900d59d51511209f7476933c34a0c2c711078d3c80e74e0fe4f38680ec"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:780b90c313348f030b811efc37b0fa1431163cb8db8064cf88a7936b6ce5f222"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b242455eccdfcd1fa4134c431a30737d2b4f045770f8fe84356b3469d4b919"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a75ffc1bd5def584129774c158e108e5d768e10b75813f2b32650bb041066ed6"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1fc1ed882d1e8df932a66e2999429ba6cc4d5172914c904ab193381fba825360"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:44e342e8cc11b4e79dae5c57f2fb6360c3c20cc57d32049af8f567f5b4bcb5f4"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c2f9ccd5c4be370939a2e17602fbc49995299203da72a3429db013d44d590e86"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02ea4cb627c76f48cd9fb37cf7ab22bd51e57e1b519807234b473faebe526796"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-win32.whl", hash = "sha256:6551880383f0e6971dc23e512c9ccc986147ce7bfa1cd2e4b520b876c53e9f3d"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:7c35c4cdc65f2a29f34425c446f2f5cdcd0e3c34158931e1cc927ece925ab802"},
|
||||
{file = "xxhash-3.6.0-cp39-cp39-win_arm64.whl", hash = "sha256:ffc578717a347baf25be8397cb10d2528802d24f94cfc005c0e44fef44b5cdd6"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd"},
|
||||
{file = "xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d"},
|
||||
{file = "xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yarl"
|
||||
version = "1.22.0"
|
||||
@@ -8530,4 +8840,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<3.14"
|
||||
content-hash = "3ef62836d8321b9a3b8e897dade8dc6ca9022fd9468c53f384b0871b521ab343"
|
||||
content-hash = "e7863413fda5e0a8b236e39a4c37390b52ae8c2f572c77df732abbd4280312b6"
|
||||
|
||||
@@ -89,6 +89,8 @@ croniter = "^6.0.0"
|
||||
stagehand = "^0.5.1"
|
||||
gravitas-md2gdocs = "^0.1.0"
|
||||
posthog = "^7.6.0"
|
||||
fpdf2 = "^2.8.6"
|
||||
langsmith = "^0.7.7"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
aiohappyeyeballs = "^2.6.1"
|
||||
|
||||
@@ -51,6 +51,7 @@ model User {
|
||||
ChatSessions ChatSession[]
|
||||
AgentPresets AgentPreset[]
|
||||
LibraryAgents LibraryAgent[]
|
||||
LibraryFolders LibraryFolder[]
|
||||
|
||||
Profile Profile[]
|
||||
UserOnboarding UserOnboarding?
|
||||
@@ -395,6 +396,9 @@ model LibraryAgent {
|
||||
creatorId String?
|
||||
Creator Profile? @relation(fields: [creatorId], references: [id])
|
||||
|
||||
folderId String?
|
||||
Folder LibraryFolder? @relation(fields: [folderId], references: [id], onDelete: Restrict)
|
||||
|
||||
useGraphIsActiveVersion Boolean @default(false)
|
||||
|
||||
isFavorite Boolean @default(false)
|
||||
@@ -407,6 +411,30 @@ model LibraryAgent {
|
||||
@@unique([userId, agentGraphId, agentGraphVersion])
|
||||
@@index([agentGraphId, agentGraphVersion])
|
||||
@@index([creatorId])
|
||||
@@index([folderId])
|
||||
}
|
||||
|
||||
model LibraryFolder {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @default(now()) @updatedAt
|
||||
|
||||
userId String
|
||||
User User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
name String
|
||||
icon String?
|
||||
color String?
|
||||
|
||||
parentId String?
|
||||
Parent LibraryFolder? @relation("FolderHierarchy", fields: [parentId], references: [id], onDelete: Cascade)
|
||||
Children LibraryFolder[] @relation("FolderHierarchy")
|
||||
|
||||
isDeleted Boolean @default(false)
|
||||
|
||||
LibraryAgents LibraryAgent[]
|
||||
|
||||
@@unique([userId, parentId, name]) // Name unique per parent per user
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
@@ -920,6 +948,17 @@ view mv_review_stats {
|
||||
// Refresh uses CONCURRENTLY to avoid blocking reads
|
||||
}
|
||||
|
||||
// Note: This is actually a MATERIALIZED VIEW in the database
|
||||
// Refreshed automatically every hour via pg_cron (with fallback to manual refresh)
|
||||
view mv_suggested_blocks {
|
||||
block_id String @unique
|
||||
execution_count Int
|
||||
|
||||
// Pre-aggregated execution counts per block for the last 14 days
|
||||
// Used by builder suggestions for ordering blocks by popularity
|
||||
// Refresh uses CONCURRENTLY to avoid blocking reads
|
||||
}
|
||||
|
||||
model StoreListing {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
@@ -1091,9 +1130,11 @@ enum APIKeyPermission {
|
||||
IDENTITY // Info about the authenticated user
|
||||
EXECUTE_GRAPH // Can execute agent graphs
|
||||
READ_GRAPH // Can get graph versions and details
|
||||
WRITE_GRAPH // Can create and update agent graphs
|
||||
EXECUTE_BLOCK // Can execute individual blocks
|
||||
READ_BLOCK // Can get block information
|
||||
READ_STORE // Can read store agents and creators
|
||||
WRITE_LIBRARY // Can add agents to library
|
||||
USE_TOOLS // Can use chat tools via external API
|
||||
MANAGE_INTEGRATIONS // Can initiate OAuth flows and complete them
|
||||
READ_INTEGRATIONS // Can list credentials and providers
|
||||
|
||||
@@ -38,6 +38,8 @@
|
||||
"can_access_graph": true,
|
||||
"is_latest_version": true,
|
||||
"is_favorite": false,
|
||||
"folder_id": null,
|
||||
"folder_name": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"settings": {
|
||||
"human_in_the_loop_safe_mode": true,
|
||||
@@ -83,6 +85,8 @@
|
||||
"can_access_graph": false,
|
||||
"is_latest_version": true,
|
||||
"is_favorite": false,
|
||||
"folder_id": null,
|
||||
"folder_name": null,
|
||||
"recommended_schedule_cron": null,
|
||||
"settings": {
|
||||
"human_in_the_loop_safe_mode": true,
|
||||
|
||||
@@ -1,349 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Integration test for the requeue fix implementation.
|
||||
Tests actual RabbitMQ behavior to verify that republishing sends messages to back of queue.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
from threading import Event
|
||||
from typing import List
|
||||
|
||||
from backend.data.rabbitmq import SyncRabbitMQ
|
||||
from backend.executor.utils import create_execution_queue_config
|
||||
|
||||
|
||||
class QueueOrderTester:
|
||||
"""Helper class to test message ordering in RabbitMQ using a dedicated test queue."""
|
||||
|
||||
def __init__(self):
|
||||
self.received_messages: List[dict] = []
|
||||
self.stop_consuming = Event()
|
||||
self.queue_client = SyncRabbitMQ(create_execution_queue_config())
|
||||
self.queue_client.connect()
|
||||
|
||||
# Use a dedicated test queue name to avoid conflicts
|
||||
self.test_queue_name = "test_requeue_ordering"
|
||||
self.test_exchange = "test_exchange"
|
||||
self.test_routing_key = "test.requeue"
|
||||
|
||||
def setup_queue(self):
|
||||
"""Set up a dedicated test queue for testing."""
|
||||
channel = self.queue_client.get_channel()
|
||||
|
||||
# Declare test exchange
|
||||
channel.exchange_declare(
|
||||
exchange=self.test_exchange, exchange_type="direct", durable=True
|
||||
)
|
||||
|
||||
# Declare test queue
|
||||
channel.queue_declare(
|
||||
queue=self.test_queue_name, durable=True, auto_delete=False
|
||||
)
|
||||
|
||||
# Bind queue to exchange
|
||||
channel.queue_bind(
|
||||
exchange=self.test_exchange,
|
||||
queue=self.test_queue_name,
|
||||
routing_key=self.test_routing_key,
|
||||
)
|
||||
|
||||
# Purge the queue to start fresh
|
||||
channel.queue_purge(self.test_queue_name)
|
||||
print(f"✅ Test queue {self.test_queue_name} setup and purged")
|
||||
|
||||
def create_test_message(self, message_id: str, user_id: str = "test-user") -> str:
|
||||
"""Create a test graph execution message."""
|
||||
return json.dumps(
|
||||
{
|
||||
"graph_exec_id": f"exec-{message_id}",
|
||||
"graph_id": f"graph-{message_id}",
|
||||
"user_id": user_id,
|
||||
"execution_context": {"timezone": "UTC"},
|
||||
"nodes_input_masks": {},
|
||||
"starting_nodes_input": [],
|
||||
}
|
||||
)
|
||||
|
||||
def publish_message(self, message: str):
|
||||
"""Publish a message to the test queue."""
|
||||
channel = self.queue_client.get_channel()
|
||||
channel.basic_publish(
|
||||
exchange=self.test_exchange,
|
||||
routing_key=self.test_routing_key,
|
||||
body=message,
|
||||
)
|
||||
|
||||
def consume_messages(self, max_messages: int = 10, timeout: float = 5.0):
|
||||
"""Consume messages and track their order."""
|
||||
|
||||
def callback(ch, method, properties, body):
|
||||
try:
|
||||
message_data = json.loads(body.decode())
|
||||
self.received_messages.append(message_data)
|
||||
ch.basic_ack(delivery_tag=method.delivery_tag)
|
||||
|
||||
if len(self.received_messages) >= max_messages:
|
||||
self.stop_consuming.set()
|
||||
except Exception as e:
|
||||
print(f"Error processing message: {e}")
|
||||
ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False)
|
||||
|
||||
# Use synchronous consumption with blocking
|
||||
channel = self.queue_client.get_channel()
|
||||
|
||||
# Check if there are messages in the queue first
|
||||
method_frame, header_frame, body = channel.basic_get(
|
||||
queue=self.test_queue_name, auto_ack=False
|
||||
)
|
||||
if method_frame:
|
||||
# There are messages, set up consumer
|
||||
channel.basic_nack(
|
||||
delivery_tag=method_frame.delivery_tag, requeue=True
|
||||
) # Put message back
|
||||
|
||||
# Set up consumer
|
||||
channel.basic_consume(
|
||||
queue=self.test_queue_name,
|
||||
on_message_callback=callback,
|
||||
)
|
||||
|
||||
# Consume with timeout
|
||||
start_time = time.time()
|
||||
while (
|
||||
not self.stop_consuming.is_set()
|
||||
and (time.time() - start_time) < timeout
|
||||
and len(self.received_messages) < max_messages
|
||||
):
|
||||
try:
|
||||
channel.connection.process_data_events(time_limit=0.1)
|
||||
except Exception as e:
|
||||
print(f"Error during consumption: {e}")
|
||||
break
|
||||
|
||||
# Cancel the consumer
|
||||
try:
|
||||
channel.cancel()
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
# No messages in queue - this might be expected for some tests
|
||||
pass
|
||||
|
||||
return self.received_messages
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up test resources."""
|
||||
try:
|
||||
channel = self.queue_client.get_channel()
|
||||
channel.queue_delete(queue=self.test_queue_name)
|
||||
channel.exchange_delete(exchange=self.test_exchange)
|
||||
print(f"✅ Test queue {self.test_queue_name} cleaned up")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Cleanup issue: {e}")
|
||||
|
||||
|
||||
def test_queue_ordering_behavior():
|
||||
"""
|
||||
Integration test to verify that our republishing method sends messages to back of queue.
|
||||
This tests the actual fix for the rate limiting queue blocking issue.
|
||||
"""
|
||||
tester = QueueOrderTester()
|
||||
|
||||
try:
|
||||
tester.setup_queue()
|
||||
|
||||
print("🧪 Testing actual RabbitMQ queue ordering behavior...")
|
||||
|
||||
# Test 1: Normal FIFO behavior
|
||||
print("1. Testing normal FIFO queue behavior")
|
||||
|
||||
# Publish messages in order: A, B, C
|
||||
msg_a = tester.create_test_message("A")
|
||||
msg_b = tester.create_test_message("B")
|
||||
msg_c = tester.create_test_message("C")
|
||||
|
||||
tester.publish_message(msg_a)
|
||||
tester.publish_message(msg_b)
|
||||
tester.publish_message(msg_c)
|
||||
|
||||
# Consume and verify FIFO order: A, B, C
|
||||
tester.received_messages = []
|
||||
tester.stop_consuming.clear()
|
||||
messages = tester.consume_messages(max_messages=3)
|
||||
|
||||
assert len(messages) == 3, f"Expected 3 messages, got {len(messages)}"
|
||||
assert (
|
||||
messages[0]["graph_exec_id"] == "exec-A"
|
||||
), f"First message should be A, got {messages[0]['graph_exec_id']}"
|
||||
assert (
|
||||
messages[1]["graph_exec_id"] == "exec-B"
|
||||
), f"Second message should be B, got {messages[1]['graph_exec_id']}"
|
||||
assert (
|
||||
messages[2]["graph_exec_id"] == "exec-C"
|
||||
), f"Third message should be C, got {messages[2]['graph_exec_id']}"
|
||||
|
||||
print("✅ FIFO order confirmed: A -> B -> C")
|
||||
|
||||
# Test 2: Rate limiting simulation - the key test!
|
||||
print("2. Testing rate limiting fix scenario")
|
||||
|
||||
# Simulate the scenario where user1 is rate limited
|
||||
user1_msg = tester.create_test_message("RATE-LIMITED", "user1")
|
||||
user2_msg1 = tester.create_test_message("USER2-1", "user2")
|
||||
user2_msg2 = tester.create_test_message("USER2-2", "user2")
|
||||
|
||||
# Initially publish user1 message (gets consumed, then rate limited on retry)
|
||||
tester.publish_message(user1_msg)
|
||||
|
||||
# Other users publish their messages
|
||||
tester.publish_message(user2_msg1)
|
||||
tester.publish_message(user2_msg2)
|
||||
|
||||
# Now simulate: user1 message gets "requeued" using our new republishing method
|
||||
# This is what happens in manager.py when requeue_by_republishing=True
|
||||
tester.publish_message(user1_msg) # Goes to back via our method
|
||||
|
||||
# Expected order: RATE-LIMITED, USER2-1, USER2-2, RATE-LIMITED (republished to back)
|
||||
# This shows that user2 messages get processed instead of being blocked
|
||||
tester.received_messages = []
|
||||
tester.stop_consuming.clear()
|
||||
messages = tester.consume_messages(max_messages=4)
|
||||
|
||||
assert len(messages) == 4, f"Expected 4 messages, got {len(messages)}"
|
||||
|
||||
# The key verification: user2 messages are NOT blocked by user1's rate-limited message
|
||||
user2_messages = [msg for msg in messages if msg["user_id"] == "user2"]
|
||||
assert len(user2_messages) == 2, "Both user2 messages should be processed"
|
||||
assert user2_messages[0]["graph_exec_id"] == "exec-USER2-1"
|
||||
assert user2_messages[1]["graph_exec_id"] == "exec-USER2-2"
|
||||
|
||||
print("✅ Rate limiting fix confirmed: user2 executions NOT blocked by user1")
|
||||
|
||||
# Test 3: Verify our method behaves like going to back of queue
|
||||
print("3. Testing republishing sends messages to back")
|
||||
|
||||
# Start with message X in queue
|
||||
msg_x = tester.create_test_message("X")
|
||||
tester.publish_message(msg_x)
|
||||
|
||||
# Add message Y
|
||||
msg_y = tester.create_test_message("Y")
|
||||
tester.publish_message(msg_y)
|
||||
|
||||
# Republish X (simulates requeue using our method)
|
||||
tester.publish_message(msg_x)
|
||||
|
||||
# Expected: X, Y, X (X was republished to back)
|
||||
tester.received_messages = []
|
||||
tester.stop_consuming.clear()
|
||||
messages = tester.consume_messages(max_messages=3)
|
||||
|
||||
assert len(messages) == 3
|
||||
# Y should come before the republished X
|
||||
y_index = next(
|
||||
i for i, msg in enumerate(messages) if msg["graph_exec_id"] == "exec-Y"
|
||||
)
|
||||
republished_x_index = next(
|
||||
i
|
||||
for i, msg in enumerate(messages[1:], 1)
|
||||
if msg["graph_exec_id"] == "exec-X"
|
||||
)
|
||||
|
||||
assert (
|
||||
y_index < republished_x_index
|
||||
), f"Y should come before republished X, but got order: {[m['graph_exec_id'] for m in messages]}"
|
||||
|
||||
print("✅ Republishing confirmed: messages go to back of queue")
|
||||
|
||||
print("🎉 All integration tests passed!")
|
||||
print("🎉 Our republishing method works correctly with real RabbitMQ")
|
||||
print("🎉 Queue blocking issue is fixed!")
|
||||
|
||||
finally:
|
||||
tester.cleanup()
|
||||
|
||||
|
||||
def test_traditional_requeue_behavior():
|
||||
"""
|
||||
Test that traditional requeue (basic_nack with requeue=True) sends messages to FRONT of queue.
|
||||
This validates our hypothesis about why queue blocking occurs.
|
||||
"""
|
||||
tester = QueueOrderTester()
|
||||
|
||||
try:
|
||||
tester.setup_queue()
|
||||
print("🧪 Testing traditional requeue behavior (basic_nack with requeue=True)")
|
||||
|
||||
# Step 1: Publish message A
|
||||
msg_a = tester.create_test_message("A")
|
||||
tester.publish_message(msg_a)
|
||||
|
||||
# Step 2: Publish message B
|
||||
msg_b = tester.create_test_message("B")
|
||||
tester.publish_message(msg_b)
|
||||
|
||||
# Step 3: Consume message A and requeue it using traditional method
|
||||
channel = tester.queue_client.get_channel()
|
||||
method_frame, header_frame, body = channel.basic_get(
|
||||
queue=tester.test_queue_name, auto_ack=False
|
||||
)
|
||||
|
||||
assert method_frame is not None, "Should have received message A"
|
||||
consumed_msg = json.loads(body.decode())
|
||||
assert (
|
||||
consumed_msg["graph_exec_id"] == "exec-A"
|
||||
), f"Should have consumed message A, got {consumed_msg['graph_exec_id']}"
|
||||
|
||||
# Traditional requeue: basic_nack with requeue=True (sends to FRONT)
|
||||
channel.basic_nack(delivery_tag=method_frame.delivery_tag, requeue=True)
|
||||
print(f"🔄 Traditional requeue (to FRONT): {consumed_msg['graph_exec_id']}")
|
||||
|
||||
# Step 4: Consume all messages using basic_get for reliability
|
||||
received_messages = []
|
||||
|
||||
# Get first message
|
||||
method_frame, header_frame, body = channel.basic_get(
|
||||
queue=tester.test_queue_name, auto_ack=True
|
||||
)
|
||||
if method_frame:
|
||||
msg = json.loads(body.decode())
|
||||
received_messages.append(msg)
|
||||
|
||||
# Get second message
|
||||
method_frame, header_frame, body = channel.basic_get(
|
||||
queue=tester.test_queue_name, auto_ack=True
|
||||
)
|
||||
if method_frame:
|
||||
msg = json.loads(body.decode())
|
||||
received_messages.append(msg)
|
||||
|
||||
# CRITICAL ASSERTION: Traditional requeue should put A at FRONT
|
||||
# Expected order: A (requeued to front), B
|
||||
assert (
|
||||
len(received_messages) == 2
|
||||
), f"Expected 2 messages, got {len(received_messages)}"
|
||||
|
||||
first_msg = received_messages[0]["graph_exec_id"]
|
||||
second_msg = received_messages[1]["graph_exec_id"]
|
||||
|
||||
# This is the critical test: requeued message A should come BEFORE B
|
||||
assert (
|
||||
first_msg == "exec-A"
|
||||
), f"Traditional requeue should put A at FRONT, but first message was: {first_msg}"
|
||||
assert (
|
||||
second_msg == "exec-B"
|
||||
), f"B should come after requeued A, but second message was: {second_msg}"
|
||||
|
||||
print(
|
||||
"✅ HYPOTHESIS CONFIRMED: Traditional requeue sends messages to FRONT of queue"
|
||||
)
|
||||
print(f" Order: {first_msg} (requeued to front) → {second_msg}")
|
||||
print(" This explains why rate-limited messages block other users!")
|
||||
|
||||
finally:
|
||||
tester.cleanup()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_queue_ordering_behavior()
|
||||
@@ -6,6 +6,7 @@ const config: StorybookConfig = {
|
||||
"../src/components/tokens/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
"../src/components/atoms/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
"../src/components/molecules/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
"../src/components/ai-elements/**/*.stories.@(js|jsx|mjs|ts|tsx)",
|
||||
],
|
||||
addons: [
|
||||
"@storybook/addon-a11y",
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
"dependencies": {
|
||||
"@ai-sdk/react": "3.0.61",
|
||||
"@faker-js/faker": "10.0.0",
|
||||
"@ferrucc-io/emoji-picker": "0.0.48",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@next/third-parties": "15.4.6",
|
||||
"@phosphor-icons/react": "2.1.10",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user