From 728c40def5f717004ceb35b53ad6e13c86759e97 Mon Sep 17 00:00:00 2001
From: Nikhil Bhagat <55572863+NikeGunn@users.noreply.github.com>
Date: Sun, 8 Feb 2026 22:13:04 +0545
Subject: [PATCH 01/18] fix(backend): replace multiprocessing queue with thread
safe queue in ExecutionQueue (#11618)
The `ExecutionQueue` class was using `multiprocessing.Manager().Queue()`
which spawns a subprocess for inter-process communication. However,
analysis showed that `ExecutionQueue` is only accessed from threads
within the same process, not across processes. This caused:
- Unnecessary subprocess spawning per graph execution
- IPC overhead for every queue operation
- Potential resource leaks if Manager processes weren't properly cleaned
up
- Limited scalability when many graphs execute concurrently
### Changes
- Replaced `multiprocessing.Manager().Queue()` with `queue.Queue()` in
`ExecutionQueue` class
- Updated imports: removed `from multiprocessing import Manager` and
`from queue import Empty`, added `import queue`
- Updated exception handling from `except Empty:` to `except
queue.Empty:`
- Added comprehensive docstring explaining the bug and fix
**File changed:** `autogpt_platform/backend/backend/data/execution.py`
### Checklist
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [x] Verified `ExecutionQueue` uses `queue.Queue` (not
`multiprocessing.Manager().Queue()`)
- [x] Tested all queue operations: `add()`, `get()`, `empty()`,
`get_or_none()`
- [x] Verified thread-safety with concurrent producer/consumer threads
(100 items)
- [x] Verified multi-producer/consumer scenario (3 producers, 2
consumers, 150 items)
- [x] Confirmed no subprocess spawning when creating multiple queues
- [x] Code passes Black formatting check
#### For configuration changes:
- [x] `.env.default` is updated or already compatible with my changes
- [x] `docker-compose.yml` is updated or already compatible with my
changes
- [x] I have included a list of my configuration changes in the PR
description (under **Changes**)
> No configuration changes required - this is a code-only fix with no
external API changes.
---------
Co-authored-by: Otto
Co-authored-by: Zamil Majdy
Co-authored-by: Zamil Majdy
---
.../backend/backend/data/execution.py | 15 +++--
.../backend/data/execution_queue_test.py | 60 +++++++++++++++++++
2 files changed, 69 insertions(+), 6 deletions(-)
create mode 100644 autogpt_platform/backend/backend/data/execution_queue_test.py
diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py
index afb8c70538..def3d14fda 100644
--- a/autogpt_platform/backend/backend/data/execution.py
+++ b/autogpt_platform/backend/backend/data/execution.py
@@ -1,9 +1,8 @@
import logging
+import queue
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from enum import Enum
-from multiprocessing import Manager
-from queue import Empty
from typing import (
TYPE_CHECKING,
Annotated,
@@ -1200,12 +1199,16 @@ class NodeExecutionEntry(BaseModel):
class ExecutionQueue(Generic[T]):
"""
- Queue for managing the execution of agents.
- This will be shared between different processes
+ Thread-safe queue for managing node execution within a single graph execution.
+
+ Note: Uses queue.Queue (not multiprocessing.Queue) since all access is from
+ threads within the same process. If migrating back to ProcessPoolExecutor,
+ replace with multiprocessing.Manager().Queue() for cross-process safety.
"""
def __init__(self):
- self.queue = Manager().Queue()
+ # Thread-safe queue (not multiprocessing) — see class docstring
+ self.queue: queue.Queue[T] = queue.Queue()
def add(self, execution: T) -> T:
self.queue.put(execution)
@@ -1220,7 +1223,7 @@ class ExecutionQueue(Generic[T]):
def get_or_none(self) -> T | None:
try:
return self.queue.get_nowait()
- except Empty:
+ except queue.Empty:
return None
diff --git a/autogpt_platform/backend/backend/data/execution_queue_test.py b/autogpt_platform/backend/backend/data/execution_queue_test.py
new file mode 100644
index 0000000000..ffe0fb265b
--- /dev/null
+++ b/autogpt_platform/backend/backend/data/execution_queue_test.py
@@ -0,0 +1,60 @@
+"""Tests for ExecutionQueue thread-safety."""
+
+import queue
+import threading
+
+import pytest
+
+from backend.data.execution import ExecutionQueue
+
+
+def test_execution_queue_uses_stdlib_queue():
+ """Verify ExecutionQueue uses queue.Queue (not multiprocessing)."""
+ q = ExecutionQueue()
+ assert isinstance(q.queue, queue.Queue)
+
+
+def test_basic_operations():
+ """Test add, get, empty, and get_or_none."""
+ q = ExecutionQueue()
+
+ assert q.empty() is True
+ assert q.get_or_none() is None
+
+ result = q.add("item1")
+ assert result == "item1"
+ assert q.empty() is False
+
+ item = q.get()
+ assert item == "item1"
+ assert q.empty() is True
+
+
+def test_thread_safety():
+ """Test concurrent access from multiple threads."""
+ q = ExecutionQueue()
+ results = []
+ num_items = 100
+
+ def producer():
+ for i in range(num_items):
+ q.add(f"item_{i}")
+
+ def consumer():
+ count = 0
+ while count < num_items:
+ item = q.get_or_none()
+ if item is not None:
+ results.append(item)
+ count += 1
+
+ producer_thread = threading.Thread(target=producer)
+ consumer_thread = threading.Thread(target=consumer)
+
+ producer_thread.start()
+ consumer_thread.start()
+
+ producer_thread.join(timeout=5)
+ consumer_thread.join(timeout=5)
+
+ assert len(results) == num_items
From 9c7c598c7d735145ce1f4b535737f80d388954cd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sun, 8 Feb 2026 23:06:40 +0000
Subject: [PATCH 02/18] chore(deps): bump peter-evans/create-pull-request from
7 to 8 (#11663)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps
[peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request)
from 7 to 8.
Release notes
Sourced from peter-evans/create-pull-request's
releases.
Create Pull Request v8.0.0
What's new in v8
What's Changed
New Contributors
Full Changelog: https://github.com/peter-evans/create-pull-request/compare/v7.0.11...v8.0.0
Create Pull Request v7.0.11
What's Changed
Full Changelog: https://github.com/peter-evans/create-pull-request/compare/v7.0.10...v7.0.11
Create Pull Request v7.0.10
⚙️ Fixes an issue where updating a pull request failed when targeting
a forked repository with the same owner as its parent.
What's Changed
New Contributors
Full Changelog: https://github.com/peter-evans/create-pull-request/compare/v7.0.9...v7.0.10
Create Pull Request v7.0.9
⚙️ Fixes an incompatibility
with the recently released actions/checkout@v6.
What's Changed
New Contributors
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
---
.github/workflows/classic-frontend-ci.yml | 2 +-
.github/workflows/claude-dependabot.yml | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/classic-frontend-ci.yml b/.github/workflows/classic-frontend-ci.yml
index 13d518db57..ebd9d92e3e 100644
--- a/.github/workflows/classic-frontend-ci.yml
+++ b/.github/workflows/classic-frontend-ci.yml
@@ -49,7 +49,7 @@ jobs:
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
- uses: peter-evans/create-pull-request@v7
+ uses: peter-evans/create-pull-request@v8
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml
index ff6935dc2d..6fb052e8f3 100644
--- a/.github/workflows/claude-dependabot.yml
+++ b/.github/workflows/claude-dependabot.yml
@@ -309,6 +309,7 @@ jobs:
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
+ allowed_bots: "dependabot[bot]"
claude_args: |
--allowedTools "Bash(npm:*),Bash(pnpm:*),Bash(poetry:*),Bash(git:*),Edit,Replace,NotebookEditCell,mcp__github_inline_comment__create_inline_comment,Bash(gh pr comment:*), Bash(gh pr diff:*), Bash(gh pr view:*)"
prompt: |
From 98dd1a9480924762ef022160636c310e88d34dc5 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sun, 8 Feb 2026 23:40:15 +0000
Subject: [PATCH 03/18] chore(libs/deps): Bump cryptography from 45.0.6 to
46.0.1 in /autogpt_platform/autogpt_libs (#10968)
Bumps [cryptography](https://github.com/pyca/cryptography) from 45.0.6
to 46.0.1.
Changelog
Sourced from cryptography's
changelog.
46.0.1 - 2025-09-16
* Fixed an issue where users installing via ``pip`` on Python 3.14
development
versions would not properly install a dependency.
* Fixed an issue building the free-threaded macOS 3.14 wheels.
.. _v46-0-0:
46.0.0 - 2025-09-16
- BACKWARDS INCOMPATIBLE: Support for Python 3.7 has
been removed.
- Support for OpenSSL < 3.0 is deprecated and will be removed in
the next
release.
- Support for
x86_64 macOS (including publishing wheels)
is deprecated
and will be removed in two releases. We will switch to publishing an
arm64 only wheel for macOS.
- Support for 32-bit Windows (including publishing wheels) is
deprecated
and will be removed in two releases. Users should move to a 64-bit
Python installation.
- Updated Windows, macOS, and Linux wheels to be compiled with OpenSSL
3.5.3.
- We now build
ppc64le manylinux wheels and
publish them to PyPI.
- We now build
win_arm64 (Windows on Arm) wheels and
publish them to PyPI.
- Added support for free-threaded Python 3.14.
- Removed the deprecated
get_attribute_for_oid method on
:class:~cryptography.x509.CertificateSigningRequest. Users
should use
:meth:~cryptography.x509.Attributes.get_attribute_for_oid
instead.
- Removed the deprecated
CAST5, SEED,
IDEA, and Blowfish
classes from the cipher module. These are still available in
:doc:/hazmat/decrepit/index.
- In X.509, when performing a PSS signature with a SHA-3 hash, it is
now
encoded with the official NIST SHA3 OID.
.. _v45-0-7:
45.0.7 - 2025-09-01
* Added a function to support an upcoming ``pyOpenSSL`` release.
.. _v45-0-6:
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
---
autogpt_platform/autogpt_libs/poetry.lock | 270 ++++++++++---------
autogpt_platform/autogpt_libs/pyproject.toml | 2 +-
autogpt_platform/backend/poetry.lock | 105 ++++----
autogpt_platform/backend/pyproject.toml | 2 +-
4 files changed, 211 insertions(+), 168 deletions(-)
diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock
index 434f08fd0a..59c5351a6a 100644
--- a/autogpt_platform/autogpt_libs/poetry.lock
+++ b/autogpt_platform/autogpt_libs/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
[[package]]
name = "annotated-doc"
@@ -67,7 +67,7 @@ description = "Backport of asyncio.Runner, a context manager that controls event
optional = false
python-versions = "<3.11,>=3.8"
groups = ["dev"]
-markers = "python_version < \"3.11\""
+markers = "python_version == \"3.10\""
files = [
{file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"},
{file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"},
@@ -99,84 +99,101 @@ files = [
[[package]]
name = "cffi"
-version = "1.17.1"
+version = "2.0.0"
description = "Foreign Function Interface for Python calling C code."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main"]
markers = "platform_python_implementation != \"PyPy\""
files = [
- {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
- {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
- {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
- {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
- {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
- {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
- {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
- {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
- {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
- {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
- {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
- {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
- {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
- {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
- {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
- {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
- {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
- {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
- {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
- {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
- {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
- {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
- {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
- {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
- {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
- {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
- {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
- {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
- {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
- {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
- {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
- {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
- {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
- {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
- {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
- {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
- {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
+ {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"},
+ {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"},
+ {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"},
+ {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"},
+ {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"},
+ {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"},
+ {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"},
+ {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"},
+ {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"},
+ {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"},
+ {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"},
+ {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"},
+ {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"},
+ {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"},
+ {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"},
+ {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"},
+ {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"},
+ {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"},
+ {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"},
+ {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"},
+ {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"},
+ {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"},
+ {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"},
+ {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"},
+ {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"},
+ {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"},
+ {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"},
+ {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"},
+ {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"},
+ {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"},
+ {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"},
+ {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"},
+ {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"},
+ {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"},
+ {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"},
+ {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"},
+ {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"},
+ {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"},
+ {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"},
+ {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"},
+ {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"},
+ {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"},
+ {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"},
+ {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"},
+ {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"},
+ {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"},
+ {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"},
+ {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"},
+ {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"},
+ {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"},
+ {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"},
+ {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"},
+ {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"},
+ {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"},
+ {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"},
+ {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"},
+ {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"},
+ {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"},
]
[package.dependencies]
-pycparser = "*"
+pycparser = {version = "*", markers = "implementation_name != \"PyPy\""}
[[package]]
name = "charset-normalizer"
@@ -413,62 +430,75 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
[[package]]
name = "cryptography"
-version = "45.0.6"
+version = "46.0.4"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
-python-versions = "!=3.9.0,!=3.9.1,>=3.7"
+python-versions = "!=3.9.0,!=3.9.1,>=3.8"
groups = ["main"]
files = [
- {file = "cryptography-45.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402"},
- {file = "cryptography-45.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42"},
- {file = "cryptography-45.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05"},
- {file = "cryptography-45.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453"},
- {file = "cryptography-45.0.6-cp311-abi3-win32.whl", hash = "sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159"},
- {file = "cryptography-45.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec"},
- {file = "cryptography-45.0.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5"},
- {file = "cryptography-45.0.6-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016"},
- {file = "cryptography-45.0.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3"},
- {file = "cryptography-45.0.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9"},
- {file = "cryptography-45.0.6-cp37-abi3-win32.whl", hash = "sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02"},
- {file = "cryptography-45.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b"},
- {file = "cryptography-45.0.6-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:705bb7c7ecc3d79a50f236adda12ca331c8e7ecfbea51edd931ce5a7a7c4f012"},
- {file = "cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:826b46dae41a1155a0c0e66fafba43d0ede1dc16570b95e40c4d83bfcf0a451d"},
- {file = "cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cc4d66f5dc4dc37b89cfef1bd5044387f7a1f6f0abb490815628501909332d5d"},
- {file = "cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:f68f833a9d445cc49f01097d95c83a850795921b3f7cc6488731e69bde3288da"},
- {file = "cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:3b5bf5267e98661b9b888a9250d05b063220dfa917a8203744454573c7eb79db"},
- {file = "cryptography-45.0.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2384f2ab18d9be88a6e4f8972923405e2dbb8d3e16c6b43f15ca491d7831bd18"},
- {file = "cryptography-45.0.6-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fc022c1fa5acff6def2fc6d7819bbbd31ccddfe67d075331a65d9cfb28a20983"},
- {file = "cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3de77e4df42ac8d4e4d6cdb342d989803ad37707cf8f3fbf7b088c9cbdd46427"},
- {file = "cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:599c8d7df950aa68baa7e98f7b73f4f414c9f02d0e8104a30c0182a07732638b"},
- {file = "cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:31a2b9a10530a1cb04ffd6aa1cd4d3be9ed49f7d77a4dafe198f3b382f41545c"},
- {file = "cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:e5b3dda1b00fb41da3af4c5ef3f922a200e33ee5ba0f0bc9ecf0b0c173958385"},
- {file = "cryptography-45.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:629127cfdcdc6806dfe234734d7cb8ac54edaf572148274fa377a7d3405b0043"},
- {file = "cryptography-45.0.6.tar.gz", hash = "sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719"},
+ {file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
+ {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
+ {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
+ {file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
+ {file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
+ {file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
+ {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
+ {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
+ {file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
+ {file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
+ {file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
+ {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
+ {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
+ {file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
+ {file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
+ {file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
]
[package.dependencies]
-cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""}
+cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""}
+typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11.0\""}
[package.extras]
-docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""]
+docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
-nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""]
-pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
+nox = ["nox[uv] (>=2024.4.15)"]
+pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["certifi (>=2024)", "cryptography-vectors (==45.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
+test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"]
[[package]]
@@ -493,7 +523,7 @@ description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["main", "dev"]
-markers = "python_version < \"3.11\""
+markers = "python_version == \"3.10\""
files = [
{file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
{file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
@@ -1650,7 +1680,7 @@ description = "C parser in Python"
optional = false
python-versions = ">=3.8"
groups = ["main"]
-markers = "platform_python_implementation != \"PyPy\""
+markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""
files = [
{file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
@@ -2515,7 +2545,7 @@ description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
-markers = "python_version < \"3.11\""
+markers = "python_version == \"3.10\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2863,4 +2893,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<4.0"
-content-hash = "5f15a9c9381c9a374f3d18e087c23b1f1ba8cce192d6f67463a3e3a7a18fee44"
+content-hash = "cc80d3a129b84435a0f40132d073caa37858ca2427ed372fecfd810a61712d0c"
diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml
index 451eaae40c..591be11d0b 100644
--- a/autogpt_platform/autogpt_libs/pyproject.toml
+++ b/autogpt_platform/autogpt_libs/pyproject.toml
@@ -9,7 +9,7 @@ packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
colorama = "^0.4.6"
-cryptography = "^45.0"
+cryptography = "^46.0"
expiringdict = "^1.2.2"
fastapi = "^0.128.0"
google-cloud-logging = "^3.13.0"
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index 000009593b..66b70be3b7 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -438,7 +438,7 @@ develop = true
[package.dependencies]
colorama = "^0.4.6"
-cryptography = "^45.0"
+cryptography = "^46.0"
expiringdict = "^1.2.2"
fastapi = "^0.128.0"
google-cloud-logging = "^3.13.0"
@@ -970,62 +970,75 @@ pytz = ">2021.1"
[[package]]
name = "cryptography"
-version = "45.0.7"
+version = "46.0.4"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
-python-versions = "!=3.9.0,!=3.9.1,>=3.7"
+python-versions = "!=3.9.0,!=3.9.1,>=3.8"
groups = ["main"]
files = [
- {file = "cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3"},
- {file = "cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3"},
- {file = "cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6"},
- {file = "cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd"},
- {file = "cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8"},
- {file = "cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443"},
- {file = "cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27"},
- {file = "cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17"},
- {file = "cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b"},
- {file = "cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c"},
- {file = "cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5"},
- {file = "cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:de58755d723e86175756f463f2f0bddd45cc36fbd62601228a3f8761c9f58252"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a20e442e917889d1a6b3c570c9e3fa2fdc398c20868abcea268ea33c024c4083"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:258e0dff86d1d891169b5af222d362468a9570e2532923088658aa866eb11130"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d97cf502abe2ab9eff8bd5e4aca274da8d06dd3ef08b759a8d6143f4ad65d4b4"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:c987dad82e8c65ebc985f5dae5e74a3beda9d0a2a4daf8a1115f3772b59e5141"},
- {file = "cryptography-45.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c13b1e3afd29a5b3b2656257f14669ca8fa8d7956d509926f0b130b600b50ab7"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a862753b36620af6fc54209264f92c716367f2f0ff4624952276a6bbd18cbde"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:06ce84dc14df0bf6ea84666f958e6080cdb6fe1231be2a51f3fc1267d9f3fb34"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d0c5c6bac22b177bf8da7435d9d27a6834ee130309749d162b26c3105c0795a9"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:2f641b64acc00811da98df63df7d59fd4706c0df449da71cb7ac39a0732b40ae"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:f5414a788ecc6ee6bc58560e85ca624258a55ca434884445440a810796ea0e0b"},
- {file = "cryptography-45.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:1f3d56f73595376f4244646dd5c5870c14c196949807be39e79e7bd9bac3da63"},
- {file = "cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971"},
+ {file = "cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32"},
+ {file = "cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616"},
+ {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0"},
+ {file = "cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0"},
+ {file = "cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5"},
+ {file = "cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b"},
+ {file = "cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e"},
+ {file = "cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f"},
+ {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82"},
+ {file = "cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c"},
+ {file = "cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061"},
+ {file = "cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7"},
+ {file = "cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b"},
+ {file = "cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019"},
+ {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4"},
+ {file = "cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b"},
+ {file = "cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc"},
+ {file = "cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947"},
+ {file = "cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3"},
+ {file = "cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59"},
]
[package.dependencies]
-cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""}
+cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""}
+typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11.0\""}
[package.extras]
-docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""]
+docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
-nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""]
-pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
+nox = ["nox[uv] (>=2024.4.15)"]
+pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["certifi (>=2024)", "cryptography-vectors (==45.0.7)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
+test = ["certifi (>=2024)", "cryptography-vectors (==46.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"]
[[package]]
@@ -8382,4 +8395,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
-content-hash = "40b2c87c3c86bd10214bd30ad291cead75da5060ab894105025ee4c0a3b3828e"
+content-hash = "1e226d8f7a342d17a85c036bfdfdf2ccc7d9e52c96644022fa69bf6044046528"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index 564e004811..20ddc61869 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -17,7 +17,7 @@ apscheduler = "^3.11.1"
autogpt-libs = { path = "../autogpt_libs", develop = true }
bleach = { extras = ["css"], version = "^6.2.0" }
click = "^8.2.0"
-cryptography = "^45.0"
+cryptography = "^46.0"
discord-py = "^2.5.2"
e2b-code-interpreter = "^1.5.2"
elevenlabs = "^1.50.0"
From a329831b0b2af431fd34d5f0b5f24daba1b841b9 Mon Sep 17 00:00:00 2001
From: Otto
Date: Mon, 9 Feb 2026 00:24:18 +0000
Subject: [PATCH 04/18] feat(backend): Add ClamAV scanning for local file paths
(#11988)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
## Context
From PR #11796 review discussion. Files processed by the video blocks
(downloads, uploads, generated videos) should be scanned through ClamAV
for malware detection.
## Problem
`store_media_file()` in `backend/util/file.py` already scans:
- `workspace://` references
- Cloud storage paths
- Data URIs (`data:...`)
- HTTP/HTTPS URLs
**But local file paths were NOT scanned.** The `else` branch only
verified the file exists.
This gap affected video processing blocks (e.g., `LoopVideoBlock`,
`AddAudioToVideoBlock`) that:
1. Download/receive input media
2. Process it locally (loop, add audio, etc.)
3. Write output to temp directory
4. Call `store_media_file(output_filename, ...)` with a local path →
**skipped virus scanning**
## Solution
Added virus scanning to the local file path branch:
```python
# Virus scan the local file before any further processing
local_content = target_path.read_bytes()
if len(local_content) > MAX_FILE_SIZE_BYTES:
raise ValueError(...)
await scan_content_safe(local_content, filename=sanitized_file)
```
## Changes
- `backend/util/file.py` - Added ~7 lines to scan local files
(consistent with other input types)
- `backend/util/file_test.py` - Added 2 test cases for local file
scanning
## Risk Assessment
- **Low risk:** Single point of change, follows existing pattern
- **Backwards compatible:** No API changes
- **Fail-safe:** If scanning fails, file is rejected (existing behavior)
Closes SECRT-1904
Co-authored-by: Nicholas Tindle
---
autogpt_platform/backend/backend/util/file.py | 8 ++
.../backend/backend/util/file_test.py | 97 +++++++++++++++++++
2 files changed, 105 insertions(+)
diff --git a/autogpt_platform/backend/backend/util/file.py b/autogpt_platform/backend/backend/util/file.py
index 1b8dbdea82..70e354a29c 100644
--- a/autogpt_platform/backend/backend/util/file.py
+++ b/autogpt_platform/backend/backend/util/file.py
@@ -342,6 +342,14 @@ async def store_media_file(
if not target_path.is_file():
raise ValueError(f"Local file does not exist: {target_path}")
+ # Virus scan the local file before any further processing
+ local_content = target_path.read_bytes()
+ if len(local_content) > MAX_FILE_SIZE_BYTES:
+ raise ValueError(
+ f"File too large: {len(local_content)} bytes > {MAX_FILE_SIZE_BYTES} bytes"
+ )
+ await scan_content_safe(local_content, filename=sanitized_file)
+
# Return based on requested format
if return_format == "for_local_processing":
# Use when processing files locally with tools like ffmpeg, MoviePy, PIL
diff --git a/autogpt_platform/backend/backend/util/file_test.py b/autogpt_platform/backend/backend/util/file_test.py
index 9fe672d155..87c53e4305 100644
--- a/autogpt_platform/backend/backend/util/file_test.py
+++ b/autogpt_platform/backend/backend/util/file_test.py
@@ -247,3 +247,100 @@ class TestFileCloudIntegration:
execution_context=make_test_context(graph_exec_id=graph_exec_id),
return_format="for_local_processing",
)
+
+ @pytest.mark.asyncio
+ async def test_store_media_file_local_path_scanned(self):
+ """Test that local file paths are scanned for viruses."""
+ graph_exec_id = "test-exec-123"
+ local_file = "test_video.mp4"
+ file_content = b"fake video content"
+
+ with patch(
+ "backend.util.file.get_cloud_storage_handler"
+ ) as mock_handler_getter, patch(
+ "backend.util.file.scan_content_safe"
+ ) as mock_scan, patch(
+ "backend.util.file.Path"
+ ) as mock_path_class:
+
+ # Mock cloud storage handler - not a cloud path
+ mock_handler = MagicMock()
+ mock_handler.is_cloud_path.return_value = False
+ mock_handler_getter.return_value = mock_handler
+
+ # Mock virus scanner
+ mock_scan.return_value = None
+
+ # Mock file system operations
+ mock_base_path = MagicMock()
+ mock_target_path = MagicMock()
+ mock_resolved_path = MagicMock()
+
+ mock_path_class.return_value = mock_base_path
+ mock_base_path.mkdir = MagicMock()
+ mock_base_path.__truediv__ = MagicMock(return_value=mock_target_path)
+ mock_target_path.resolve.return_value = mock_resolved_path
+ mock_resolved_path.is_relative_to.return_value = True
+ mock_resolved_path.is_file.return_value = True
+ mock_resolved_path.read_bytes.return_value = file_content
+ mock_resolved_path.relative_to.return_value = Path(local_file)
+ mock_resolved_path.name = local_file
+
+ result = await store_media_file(
+ file=MediaFileType(local_file),
+ execution_context=make_test_context(graph_exec_id=graph_exec_id),
+ return_format="for_local_processing",
+ )
+
+ # Verify virus scan was called for local file
+ mock_scan.assert_called_once_with(file_content, filename=local_file)
+
+ # Result should be the relative path
+ assert str(result) == local_file
+
+ @pytest.mark.asyncio
+ async def test_store_media_file_local_path_virus_detected(self):
+ """Test that infected local files raise VirusDetectedError."""
+ from backend.api.features.store.exceptions import VirusDetectedError
+
+ graph_exec_id = "test-exec-123"
+ local_file = "infected.exe"
+ file_content = b"malicious content"
+
+ with patch(
+ "backend.util.file.get_cloud_storage_handler"
+ ) as mock_handler_getter, patch(
+ "backend.util.file.scan_content_safe"
+ ) as mock_scan, patch(
+ "backend.util.file.Path"
+ ) as mock_path_class:
+
+ # Mock cloud storage handler - not a cloud path
+ mock_handler = MagicMock()
+ mock_handler.is_cloud_path.return_value = False
+ mock_handler_getter.return_value = mock_handler
+
+ # Mock virus scanner to detect virus
+ mock_scan.side_effect = VirusDetectedError(
+ "EICAR-Test-File", "File rejected due to virus detection"
+ )
+
+ # Mock file system operations
+ mock_base_path = MagicMock()
+ mock_target_path = MagicMock()
+ mock_resolved_path = MagicMock()
+
+ mock_path_class.return_value = mock_base_path
+ mock_base_path.mkdir = MagicMock()
+ mock_base_path.__truediv__ = MagicMock(return_value=mock_target_path)
+ mock_target_path.resolve.return_value = mock_resolved_path
+ mock_resolved_path.is_relative_to.return_value = True
+ mock_resolved_path.is_file.return_value = True
+ mock_resolved_path.read_bytes.return_value = file_content
+
+ with pytest.raises(VirusDetectedError):
+ await store_media_file(
+ file=MediaFileType(local_file),
+ execution_context=make_test_context(graph_exec_id=graph_exec_id),
+ return_format="for_local_processing",
+ )
From 9e38bd5b78259ec8acc68e77f2d2b485626324be Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sun, 8 Feb 2026 21:28:22 -0600
Subject: [PATCH 05/18] chore(backend/deps): bump the production-dependencies
group across 1 directory with 8 updates (#12014)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the production-dependencies group with 8 updates in the
/autogpt_platform/backend directory:
| Package | From | To |
| --- | --- | --- |
| [anthropic](https://github.com/anthropics/anthropic-sdk-python) |
`0.59.0` | `0.79.0` |
| [fastapi](https://github.com/fastapi/fastapi) | `0.128.3` | `0.128.5`
|
| [ollama](https://github.com/ollama/ollama-python) | `0.5.4` | `0.6.1`
|
| [prometheus-client](https://github.com/prometheus/client_python) |
`0.22.1` | `0.24.1` |
| [python-multipart](https://github.com/Kludex/python-multipart) |
`0.0.20` | `0.0.22` |
| [supabase](https://github.com/supabase/supabase-py) | `2.27.2` |
`2.27.3` |
| [tenacity](https://github.com/jd/tenacity) | `9.1.3` | `9.1.4` |
| [tiktoken](https://github.com/openai/tiktoken) | `0.9.0` | `0.12.0` |
Updates `anthropic` from 0.59.0 to 0.79.0
Release notes
Sourced from anthropic's
releases.
v0.79.0
0.79.0 (2026-02-07)
Full Changelog: v0.78.0...v0.79.0
Features
- api: enabling fast-mode in claude-opus-4-6 (5953ba7)
Bug Fixes
- pass speed parameter through in sync beta count_tokens (1dd6119)
v0.78.0
0.78.0 (2026-02-05)
Full Changelog: v0.77.1...v0.78.0
Features
- api: Release Claude Opus 4.6, adaptive thinking,
and other features (3ef1529)
v0.77.1
0.77.1 (2026-02-03)
Full Changelog: v0.77.0...v0.77.1
Bug Fixes
- structured outputs: send structured output beta
header when format is omitted (#1158)
(258494e)
Chores
v0.77.0
0.77.0 (2026-01-29)
Full Changelog: v0.76.0...v0.77.0
Features
- api: add support for Structured Outputs in the
Messages API (ad56677)
- api: migrate sending message format in
output_config rather than output_format (af405e4)
- client: add custom JSON encoder for extended type
support (7780e90)
- use output_config for structured outputs (82d669d)
... (truncated)
Changelog
Sourced from anthropic's
changelog.
0.79.0 (2026-02-07)
Full Changelog: v0.78.0...v0.79.0
Features
- api: enabling fast-mode in claude-opus-4-6 (5953ba7)
Bug Fixes
- pass speed parameter through in sync beta count_tokens (1dd6119)
0.78.0 (2026-02-05)
Full Changelog: v0.77.1...v0.78.0
Features
- api: Release Claude Opus 4.6, adaptive thinking,
and other features (3ef1529)
0.77.1 (2026-02-03)
Full Changelog: v0.77.0...v0.77.1
Bug Fixes
- structured outputs: send structured output beta
header when format is omitted (#1158)
(258494e)
Chores
0.77.0 (2026-01-29)
Full Changelog: v0.76.0...v0.77.0
Features
- api: add support for Structured Outputs in the
Messages API (ad56677)
- api: migrate sending message format in
output_config rather than output_format (af405e4)
- client: add custom JSON encoder for extended type
support (7780e90)
- use output_config for structured outputs (82d669d)
Bug Fixes
... (truncated)
Commits
cd1b39b
release: 0.79.0
fb52a6a
fix: pass speed parameter through in sync beta count_tokens
b7c2df2
feat(api): enabling fast-mode in claude-opus-4-6
7c42e4b
Update CHANGELOG.md (#1163)
f2b61ed
release: 0.78.0
a4a29ca
feat(api): manual updates
3955600
release: 0.77.1
eca8ddf
fix(structured outputs): send structured output beta header when format
is om...
ee44c52
chore: remove claude-code-review workflow (#1338)
9c485f6
release: 0.77.0 (#1117)
- Additional commits viewable in compare
view
Updates `fastapi` from 0.128.3 to 0.128.5
Release notes
Sourced from fastapi's
releases.
0.128.5
Refactors
- ♻️ Refactor and simplify Pydantic v2 (and v1) compatibility internal
utils. PR #14862
by
@tiangolo.
Internal
- ✅ Add inline snapshot tests for OpenAPI before changes from Pydantic
v2. PR #14864
by
@tiangolo.
0.128.4
Refactors
- ♻️ Refactor internals, simplify Pydantic v2/v1 utils,
create_model_field, better types for
lenient_issubclass. PR #14860
by @tiangolo.
- ♻️ Simplify internals, remove Pydantic v1 only logic, no longer
needed. PR #14857
by
@tiangolo.
- ♻️ Refactor internals, cleanup unneeded Pydantic v1 specific logic.
PR #14856
by
@tiangolo.
Translations
Internal
Commits
dedf140
🔖 Release version 0.128.5
79d4dfb
📝 Update release notes
9f4ecf5
✅ Add inline snapshot tests for OpenAPI before changes from Pydantic v2
(#14864)
c48539f
📝 Update release notes
2e7d375
♻️ Refactor and simplify Pydantic v2 (and v1) compatibility internal
utils (#...
8eac94b
🔖 Release version 0.128.4
58cdfc7
📝 Update release notes
d59fbc3
♻️ Refactor internals, simplify Pydantic v2/v1 utils,
create_model_field, b...
cc6ced6
📝 Update release notes
cf55bad
♻️ Simplify internals, remove Pydantic v1 only logic, no longer needed
(#14857)
- Additional commits viewable in compare
view
Updates `ollama` from 0.5.4 to 0.6.1
Release notes
Sourced from ollama's
releases.
v0.6.1
What's Changed
Full Changelog: https://github.com/ollama/ollama-python/compare/v0.6.0...v0.6.1
v0.6.0
What's Changed
New Contributors
Full Changelog: https://github.com/ollama/ollama-python/compare/v0.5.4...v0.6.0
Commits
Updates `prometheus-client` from 0.22.1 to 0.24.1
Release notes
Sourced from prometheus-client's
releases.
v0.24.1
v0.24.0
What's Changed
Full Changelog: https://github.com/prometheus/client_python/compare/v0.23.1...v0.24.0
v0.23.1
What's Changed
New Contributors
Full Changelog: https://github.com/prometheus/client_python/compare/v0.23.0...v0.23.1
v0.23.0
What's Changed
New Contributors
Full Changelog: https://github.com/prometheus/client_python/compare/v0.22.1...v0.23.0
Commits
Updates `python-multipart` from 0.0.20 to 0.0.22
Release notes
Sourced from python-multipart's
releases.
Version 0.0.22
What's Changed
- Drop directory path from filename in
File 9433f4b.
Full Changelog: https://github.com/Kludex/python-multipart/compare/0.0.21...0.0.22
Version 0.0.21
What's Changed
New Contributors
Full Changelog: https://github.com/Kludex/python-multipart/compare/0.0.20...0.0.21
Changelog
Sourced from python-multipart's
changelog.
0.0.22 (2026-01-25)
- Drop directory path from filename in
File 9433f4b.
0.0.21 (2025-12-17)
- Add support for Python 3.14 and drop EOL 3.8 and 3.9 #216.
Commits
Updates `supabase` from 2.27.2 to 2.27.3
Release notes
Sourced from supabase's
releases.
v2.27.3
2.27.3
(2026-02-03)
Bug Fixes
- deprecate python 3.9 in all packages (#1365)
(cc72ed7)
- ensure storage_url has trailing slash to prevent warning (#1367)
(4267ff1)
Changelog
Sourced from supabase's
changelog.
2.27.3
(2026-02-03)
Bug Fixes
- deprecate python 3.9 in all packages (#1365)
(cc72ed7)
- ensure storage_url has trailing slash to prevent warning (#1367)
(4267ff1)
Commits
Updates `tenacity` from 9.1.3 to 9.1.4
Release notes
Sourced from tenacity's
releases.
9.1.4
What's Changed
Full Changelog: https://github.com/jd/tenacity/compare/9.1.3...9.1.4
Commits
Updates `tiktoken` from 0.9.0 to 0.12.0
Changelog
Sourced from tiktoken's
changelog.
[v0.12.0]
- Build wheels for Python 3.14
- Build musllinux aarch64 wheels
- Support for free-threaded Python
- Update version of
pyo3 and rustc-hash
- Avoid use of
blobfile for reading local files
- Recognise
gpt-5 model identifier
- Minor performance improvement for file reading
[v0.11.0]
- Support for
GPT-5
- Update version of
pyo3
- Use new Rust edition
- Fix special token handling in
encode_to_numpy
- Better error handling
- Improvements to private APIs
[v0.10.0]
- Support for newer models
- Improvements to private APIs
Commits
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Otto
---
.../backend/backend/blocks/llm.py | 4 +-
autogpt_platform/backend/poetry.lock | 200 +++++++++++-------
autogpt_platform/backend/pyproject.toml | 16 +-
3 files changed, 133 insertions(+), 87 deletions(-)
diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py
index dcf007440b..7a020593d7 100644
--- a/autogpt_platform/backend/backend/blocks/llm.py
+++ b/autogpt_platform/backend/backend/blocks/llm.py
@@ -531,12 +531,12 @@ class LLMResponse(BaseModel):
def convert_openai_tool_fmt_to_anthropic(
openai_tools: list[dict] | None = None,
-) -> Iterable[ToolParam] | anthropic.NotGiven:
+) -> Iterable[ToolParam] | anthropic.Omit:
"""
Convert OpenAI tool format to Anthropic tool format.
"""
if not openai_tools or len(openai_tools) == 0:
- return anthropic.NOT_GIVEN
+ return anthropic.omit
anthropic_tools = []
for tool in openai_tools:
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index 66b70be3b7..d80c6da0fc 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -269,19 +269,20 @@ files = [
[[package]]
name = "anthropic"
-version = "0.59.0"
+version = "0.79.0"
description = "The official Python library for the anthropic API"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "anthropic-0.59.0-py3-none-any.whl", hash = "sha256:cbc8b3dccef66ad6435c4fa1d317e5ebb092399a4b88b33a09dc4bf3944c3183"},
- {file = "anthropic-0.59.0.tar.gz", hash = "sha256:d710d1ef0547ebbb64b03f219e44ba078e83fc83752b96a9b22e9726b523fd8f"},
+ {file = "anthropic-0.79.0-py3-none-any.whl", hash = "sha256:04cbd473b6bbda4ca2e41dd670fe2f829a911530f01697d0a1e37321eb75f3cf"},
+ {file = "anthropic-0.79.0.tar.gz", hash = "sha256:8707aafb3b1176ed6c13e2b1c9fb3efddce90d17aee5d8b83a86c70dcdcca871"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
+docstring-parser = ">=0.15,<1"
httpx = ">=0.25.0,<1"
jiter = ">=0.4.0,<1"
pydantic = ">=1.9.0,<3"
@@ -289,7 +290,7 @@ sniffio = "*"
typing-extensions = ">=4.10,<5"
[package.extras]
-aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"]
+aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"]
bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"]
vertex = ["google-auth[requests] (>=2,<3)"]
@@ -1148,6 +1149,23 @@ idna = ["idna (>=3.10)"]
trio = ["trio (>=0.30)"]
wmi = ["wmi (>=1.5.1) ; platform_system == \"Windows\""]
+[[package]]
+name = "docstring-parser"
+version = "0.17.0"
+description = "Parse Python docstrings in reST, Google and Numpydoc format"
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+ {file = "docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708"},
+ {file = "docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912"},
+]
+
+[package.extras]
+dev = ["pre-commit (>=2.16.0) ; python_version >= \"3.9\"", "pydoctor (>=25.4.0)", "pytest"]
+docs = ["pydoctor (>=25.4.0)"]
+test = ["pytest"]
+
[[package]]
name = "dulwich"
version = "0.22.8"
@@ -1364,14 +1382,14 @@ tzdata = "*"
[[package]]
name = "fastapi"
-version = "0.128.3"
+version = "0.128.5"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "fastapi-0.128.3-py3-none-any.whl", hash = "sha256:c8cdf7c2182c9a06bf9cfa3329819913c189dc86389b90d5709892053582db29"},
- {file = "fastapi-0.128.3.tar.gz", hash = "sha256:ed99383fd96063447597d5aa2a9ec3973be198e3b4fc10c55f15c62efdb21c60"},
+ {file = "fastapi-0.128.5-py3-none-any.whl", hash = "sha256:bceec0de8aa6564599c5bcc0593b0d287703562c848271fca8546fd2c87bf4dd"},
+ {file = "fastapi-0.128.5.tar.gz", hash = "sha256:a7173579fc162d6471e3c6fbd9a4b7610c7a3b367bcacf6c4f90d5d022cab711"},
]
[package.dependencies]
@@ -3945,14 +3963,14 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "ollama"
-version = "0.5.4"
+version = "0.6.1"
description = "The official Python client for Ollama."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "ollama-0.5.4-py3-none-any.whl", hash = "sha256:6374c9bb4f2a371b3583c09786112ba85b006516745689c172a7e28af4d4d1a2"},
- {file = "ollama-0.5.4.tar.gz", hash = "sha256:75857505a5d42e5e58114a1b78cc8c24596d8866863359d8a2329946a9b6d6f3"},
+ {file = "ollama-0.6.1-py3-none-any.whl", hash = "sha256:fc4c984b345735c5486faeee67d8a265214a31cbb828167782dc642ce0a2bf8c"},
+ {file = "ollama-0.6.1.tar.gz", hash = "sha256:478c67546836430034b415ed64fa890fd3d1ff91781a9d548b3325274e69d7c6"},
]
[package.dependencies]
@@ -4710,14 +4728,14 @@ tests = ["coverage-conditional-plugin (>=0.9.0)", "portalocker[redis]", "pytest
[[package]]
name = "postgrest"
-version = "2.27.2"
+version = "2.27.3"
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "postgrest-2.27.2-py3-none-any.whl", hash = "sha256:1666fef3de05ca097a314433dd5ae2f2d71c613cb7b233d0f468c4ffe37277da"},
- {file = "postgrest-2.27.2.tar.gz", hash = "sha256:55407d530b5af3d64e883a71fec1f345d369958f723ce4a8ab0b7d169e313242"},
+ {file = "postgrest-2.27.3-py3-none-any.whl", hash = "sha256:ed79123af7127edd78d538bfe8351d277e45b1a36994a4dbf57ae27dde87a7b7"},
+ {file = "postgrest-2.27.3.tar.gz", hash = "sha256:c2e2679addfc8eaab23197bad7ddaee6cbb4cbe8c483ebd2d2e5219543037cc3"},
]
[package.dependencies]
@@ -4875,17 +4893,19 @@ tqdm = "*"
[[package]]
name = "prometheus-client"
-version = "0.22.1"
+version = "0.24.1"
description = "Python client for the Prometheus monitoring system."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "prometheus_client-0.22.1-py3-none-any.whl", hash = "sha256:cca895342e308174341b2cbf99a56bef291fbc0ef7b9e5412a0f26d653ba7094"},
- {file = "prometheus_client-0.22.1.tar.gz", hash = "sha256:190f1331e783cf21eb60bca559354e0a4d4378facecf78f5428c39b675d20d28"},
+ {file = "prometheus_client-0.24.1-py3-none-any.whl", hash = "sha256:150db128af71a5c2482b36e588fc8a6b95e498750da4b17065947c16070f4055"},
+ {file = "prometheus_client-0.24.1.tar.gz", hash = "sha256:7e0ced7fbbd40f7b84962d5d2ab6f17ef88a72504dcf7c0b40737b43b2a461f9"},
]
[package.extras]
+aiohttp = ["aiohttp"]
+django = ["django"]
twisted = ["twisted"]
[[package]]
@@ -5945,14 +5965,14 @@ cli = ["click (>=5.0)"]
[[package]]
name = "python-multipart"
-version = "0.0.20"
+version = "0.0.22"
description = "A streaming multipart parser for Python"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"},
- {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"},
+ {file = "python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155"},
+ {file = "python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58"},
]
[[package]]
@@ -6240,14 +6260,14 @@ all = ["numpy"]
[[package]]
name = "realtime"
-version = "2.27.2"
+version = "2.27.3"
description = ""
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "realtime-2.27.2-py3-none-any.whl", hash = "sha256:34a9cbb26a274e707e8fc9e3ee0a66de944beac0fe604dc336d1e985db2c830f"},
- {file = "realtime-2.27.2.tar.gz", hash = "sha256:b960a90294d2cea1b3f1275ecb89204304728e08fff1c393cc1b3150739556b3"},
+ {file = "realtime-2.27.3-py3-none-any.whl", hash = "sha256:f571115f86988e33c41c895cb3fba2eaa1b693aeaede3617288f44274ca90f43"},
+ {file = "realtime-2.27.3.tar.gz", hash = "sha256:02b082243107656a5ef3fb63e8e2ab4c40bc199abb45adb8a42ed63f089a1041"},
]
[package.dependencies]
@@ -7005,14 +7025,14 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart
[[package]]
name = "storage3"
-version = "2.27.2"
+version = "2.27.3"
description = "Supabase Storage client for Python."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "storage3-2.27.2-py3-none-any.whl", hash = "sha256:e6f16e7a260729e7b1f46e9bf61746805a02e30f5e419ee1291007c432e3ec63"},
- {file = "storage3-2.27.2.tar.gz", hash = "sha256:cb4807b7f86b4bb1272ac6fdd2f3cfd8ba577297046fa5f88557425200275af5"},
+ {file = "storage3-2.27.3-py3-none-any.whl", hash = "sha256:11a05b7da84bccabeeea12d940bca3760cf63fe6ca441868677335cfe4fdfbe0"},
+ {file = "storage3-2.27.3.tar.gz", hash = "sha256:dc1a4a010cf36d5482c5cb6c1c28fc5f00e23284342b89e4ae43b5eae8501ddb"},
]
[package.dependencies]
@@ -7072,35 +7092,35 @@ typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""}
[[package]]
name = "supabase"
-version = "2.27.2"
+version = "2.27.3"
description = "Supabase client for Python."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "supabase-2.27.2-py3-none-any.whl", hash = "sha256:d4dce00b3a418ee578017ec577c0e5be47a9a636355009c76f20ed2faa15bc54"},
- {file = "supabase-2.27.2.tar.gz", hash = "sha256:2aed40e4f3454438822442a1e94a47be6694c2c70392e7ae99b51a226d4293f7"},
+ {file = "supabase-2.27.3-py3-none-any.whl", hash = "sha256:082a74642fcf9954693f1ce8c251baf23e4bda26ffdbc8dcd4c99c82e60d69ff"},
+ {file = "supabase-2.27.3.tar.gz", hash = "sha256:5e5a348232ac4315c1032ddd687278f0b982465471f0cbb52bca7e6a66495ff3"},
]
[package.dependencies]
httpx = ">=0.26,<0.29"
-postgrest = "2.27.2"
-realtime = "2.27.2"
-storage3 = "2.27.2"
-supabase-auth = "2.27.2"
-supabase-functions = "2.27.2"
+postgrest = "2.27.3"
+realtime = "2.27.3"
+storage3 = "2.27.3"
+supabase-auth = "2.27.3"
+supabase-functions = "2.27.3"
yarl = ">=1.22.0"
[[package]]
name = "supabase-auth"
-version = "2.27.2"
+version = "2.27.3"
description = "Python Client Library for Supabase Auth"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "supabase_auth-2.27.2-py3-none-any.whl", hash = "sha256:78ec25b11314d0a9527a7205f3b1c72560dccdc11b38392f80297ef98664ee91"},
- {file = "supabase_auth-2.27.2.tar.gz", hash = "sha256:0f5bcc79b3677cb42e9d321f3c559070cfa40d6a29a67672cc8382fb7dc2fe97"},
+ {file = "supabase_auth-2.27.3-py3-none-any.whl", hash = "sha256:82a4262eaad85383319d394dab0eea11fcf3ebd774062aef8ea3874ae2f02579"},
+ {file = "supabase_auth-2.27.3.tar.gz", hash = "sha256:39894d4bc60b6f23b5cff4d0d7d4c1659e5d69563cadf014d4896f780ca8ca78"},
]
[package.dependencies]
@@ -7110,14 +7130,14 @@ pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
[[package]]
name = "supabase-functions"
-version = "2.27.2"
+version = "2.27.3"
description = "Library for Supabase Functions"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "supabase_functions-2.27.2-py3-none-any.whl", hash = "sha256:db480efc669d0bca07605b9b6f167312af43121adcc842a111f79bea416ef754"},
- {file = "supabase_functions-2.27.2.tar.gz", hash = "sha256:d0c8266207a94371cb3fd35ad3c7f025b78a97cf026861e04ccd35ac1775f80b"},
+ {file = "supabase_functions-2.27.3-py3-none-any.whl", hash = "sha256:9d14a931d49ede1c6cf5fbfceb11c44061535ba1c3f310f15384964d86a83d9e"},
+ {file = "supabase_functions-2.27.3.tar.gz", hash = "sha256:e954f1646da8ca6e7e16accef58d0884a5f97b25956ee98e7d4927a210ed92f9"},
]
[package.dependencies]
@@ -7127,14 +7147,14 @@ yarl = ">=1.20.1"
[[package]]
name = "tenacity"
-version = "9.1.3"
+version = "9.1.4"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "tenacity-9.1.3-py3-none-any.whl", hash = "sha256:51171cfc6b8a7826551e2f029426b10a6af189c5ac6986adcd7eb36d42f17954"},
- {file = "tenacity-9.1.3.tar.gz", hash = "sha256:a6724c947aa717087e2531f883bde5c9188f603f6669a9b8d54eb998e604c12a"},
+ {file = "tenacity-9.1.4-py3-none-any.whl", hash = "sha256:6095a360c919085f28c6527de529e76a06ad89b23659fa881ae0649b867a9d55"},
+ {file = "tenacity-9.1.4.tar.gz", hash = "sha256:adb31d4c263f2bd041081ab33b498309a57c77f9acf2db65aadf0898179cf93a"},
]
[package.extras]
@@ -7143,43 +7163,69 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"]
[[package]]
name = "tiktoken"
-version = "0.9.0"
+version = "0.12.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
- {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
- {file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd"},
- {file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de"},
- {file = "tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990"},
- {file = "tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4"},
- {file = "tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e"},
- {file = "tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348"},
- {file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33"},
- {file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136"},
- {file = "tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336"},
- {file = "tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb"},
- {file = "tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03"},
- {file = "tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210"},
- {file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794"},
- {file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22"},
- {file = "tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2"},
- {file = "tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16"},
- {file = "tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb"},
- {file = "tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63"},
- {file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01"},
- {file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139"},
- {file = "tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a"},
- {file = "tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95"},
- {file = "tiktoken-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c6386ca815e7d96ef5b4ac61e0048cd32ca5a92d5781255e13b31381d28667dc"},
- {file = "tiktoken-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75f6d5db5bc2c6274b674ceab1615c1778e6416b14705827d19b40e6355f03e0"},
- {file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e15b16f61e6f4625a57a36496d28dd182a8a60ec20a534c5343ba3cafa156ac7"},
- {file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebcec91babf21297022882344c3f7d9eed855931466c3311b1ad6b64befb3df"},
- {file = "tiktoken-0.9.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5fd49e7799579240f03913447c0cdfa1129625ebd5ac440787afc4345990427"},
- {file = "tiktoken-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:26242ca9dc8b58e875ff4ca078b9a94d2f0813e6a535dcd2205df5d49d927cc7"},
- {file = "tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d"},
+ {file = "tiktoken-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3de02f5a491cfd179aec916eddb70331814bd6bf764075d39e21d5862e533970"},
+ {file = "tiktoken-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b6cfb6d9b7b54d20af21a912bfe63a2727d9cfa8fbda642fd8322c70340aad16"},
+ {file = "tiktoken-0.12.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:cde24cdb1b8a08368f709124f15b36ab5524aac5fa830cc3fdce9c03d4fb8030"},
+ {file = "tiktoken-0.12.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6de0da39f605992649b9cfa6f84071e3f9ef2cec458d08c5feb1b6f0ff62e134"},
+ {file = "tiktoken-0.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6faa0534e0eefbcafaccb75927a4a380463a2eaa7e26000f0173b920e98b720a"},
+ {file = "tiktoken-0.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:82991e04fc860afb933efb63957affc7ad54f83e2216fe7d319007dab1ba5892"},
+ {file = "tiktoken-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:6fb2995b487c2e31acf0a9e17647e3b242235a20832642bb7a9d1a181c0c1bb1"},
+ {file = "tiktoken-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e227c7f96925003487c33b1b32265fad2fbcec2b7cf4817afb76d416f40f6bb"},
+ {file = "tiktoken-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c06cf0fcc24c2cb2adb5e185c7082a82cba29c17575e828518c2f11a01f445aa"},
+ {file = "tiktoken-0.12.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f18f249b041851954217e9fd8e5c00b024ab2315ffda5ed77665a05fa91f42dc"},
+ {file = "tiktoken-0.12.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:47a5bc270b8c3db00bb46ece01ef34ad050e364b51d406b6f9730b64ac28eded"},
+ {file = "tiktoken-0.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:508fa71810c0efdcd1b898fda574889ee62852989f7c1667414736bcb2b9a4bd"},
+ {file = "tiktoken-0.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1af81a6c44f008cba48494089dd98cccb8b313f55e961a52f5b222d1e507967"},
+ {file = "tiktoken-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:3e68e3e593637b53e56f7237be560f7a394451cb8c11079755e80ae64b9e6def"},
+ {file = "tiktoken-0.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b97f74aca0d78a1ff21b8cd9e9925714c15a9236d6ceacf5c7327c117e6e21e8"},
+ {file = "tiktoken-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b90f5ad190a4bb7c3eb30c5fa32e1e182ca1ca79f05e49b448438c3e225a49b"},
+ {file = "tiktoken-0.12.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:65b26c7a780e2139e73acc193e5c63ac754021f160df919add909c1492c0fb37"},
+ {file = "tiktoken-0.12.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:edde1ec917dfd21c1f2f8046b86348b0f54a2c0547f68149d8600859598769ad"},
+ {file = "tiktoken-0.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:35a2f8ddd3824608b3d650a000c1ef71f730d0c56486845705a8248da00f9fe5"},
+ {file = "tiktoken-0.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83d16643edb7fa2c99eff2ab7733508aae1eebb03d5dfc46f5565862810f24e3"},
+ {file = "tiktoken-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffc5288f34a8bc02e1ea7047b8d041104791d2ddbf42d1e5fa07822cbffe16bd"},
+ {file = "tiktoken-0.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:775c2c55de2310cc1bc9a3ad8826761cbdc87770e586fd7b6da7d4589e13dab3"},
+ {file = "tiktoken-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a01b12f69052fbe4b080a2cfb867c4de12c704b56178edf1d1d7b273561db160"},
+ {file = "tiktoken-0.12.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:01d99484dc93b129cd0964f9d34eee953f2737301f18b3c7257bf368d7615baa"},
+ {file = "tiktoken-0.12.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:4a1a4fcd021f022bfc81904a911d3df0f6543b9e7627b51411da75ff2fe7a1be"},
+ {file = "tiktoken-0.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:981a81e39812d57031efdc9ec59fa32b2a5a5524d20d4776574c4b4bd2e9014a"},
+ {file = "tiktoken-0.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9baf52f84a3f42eef3ff4e754a0db79a13a27921b457ca9832cf944c6be4f8f3"},
+ {file = "tiktoken-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:b8a0cd0c789a61f31bf44851defbd609e8dd1e2c8589c614cc1060940ef1f697"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d5f89ea5680066b68bcb797ae85219c72916c922ef0fcdd3480c7d2315ffff16"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b4e7ed1c6a7a8a60a3230965bdedba8cc58f68926b835e519341413370e0399a"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:fc530a28591a2d74bce821d10b418b26a094bf33839e69042a6e86ddb7a7fb27"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:06a9f4f49884139013b138920a4c393aa6556b2f8f536345f11819389c703ebb"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:04f0e6a985d95913cabc96a741c5ffec525a2c72e9df086ff17ebe35985c800e"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0ee8f9ae00c41770b5f9b0bb1235474768884ae157de3beb5439ca0fd70f3e25"},
+ {file = "tiktoken-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dc2dd125a62cb2b3d858484d6c614d136b5b848976794edfb63688d539b8b93f"},
+ {file = "tiktoken-0.12.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a90388128df3b3abeb2bfd1895b0681412a8d7dc644142519e6f0a97c2111646"},
+ {file = "tiktoken-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:da900aa0ad52247d8794e307d6446bd3cdea8e192769b56276695d34d2c9aa88"},
+ {file = "tiktoken-0.12.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:285ba9d73ea0d6171e7f9407039a290ca77efcdb026be7769dccc01d2c8d7fff"},
+ {file = "tiktoken-0.12.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:d186a5c60c6a0213f04a7a802264083dea1bbde92a2d4c7069e1a56630aef830"},
+ {file = "tiktoken-0.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:604831189bd05480f2b885ecd2d1986dc7686f609de48208ebbbddeea071fc0b"},
+ {file = "tiktoken-0.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8f317e8530bb3a222547b85a58583238c8f74fd7a7408305f9f63246d1a0958b"},
+ {file = "tiktoken-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:399c3dd672a6406719d84442299a490420b458c44d3ae65516302a99675888f3"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2c714c72bc00a38ca969dae79e8266ddec999c7ceccd603cc4f0d04ccd76365"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cbb9a3ba275165a2cb0f9a83f5d7025afe6b9d0ab01a22b50f0e74fee2ad253e"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:dfdfaa5ffff8993a3af94d1125870b1d27aed7cb97aa7eb8c1cefdbc87dbee63"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:584c3ad3d0c74f5269906eb8a659c8bfc6144a52895d9261cdaf90a0ae5f4de0"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:54c891b416a0e36b8e2045b12b33dd66fb34a4fe7965565f1b482da50da3e86a"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5edb8743b88d5be814b1a8a8854494719080c28faaa1ccbef02e87354fe71ef0"},
+ {file = "tiktoken-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:f61c0aea5565ac82e2ec50a05e02a6c44734e91b51c10510b084ea1b8e633a71"},
+ {file = "tiktoken-0.12.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d51d75a5bffbf26f86554d28e78bfb921eae998edc2675650fd04c7e1f0cdc1e"},
+ {file = "tiktoken-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:09eb4eae62ae7e4c62364d9ec3a57c62eea707ac9a2b2c5d6bd05de6724ea179"},
+ {file = "tiktoken-0.12.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:df37684ace87d10895acb44b7f447d4700349b12197a526da0d4a4149fde074c"},
+ {file = "tiktoken-0.12.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:4c9614597ac94bb294544345ad8cf30dac2129c05e2db8dc53e082f355857af7"},
+ {file = "tiktoken-0.12.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:20cf97135c9a50de0b157879c3c4accbb29116bcf001283d26e073ff3b345946"},
+ {file = "tiktoken-0.12.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:15d875454bbaa3728be39880ddd11a5a2a9e548c29418b41e8fd8a767172b5ec"},
+ {file = "tiktoken-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cff3688ba3c639ebe816f8d58ffbbb0aa7433e23e08ab1cade5d175fc973fb3"},
+ {file = "tiktoken-0.12.0.tar.gz", hash = "sha256:b18ba7ee2b093863978fcb14f74b3707cdc8d4d4d3836853ce7ec60772139931"},
]
[package.dependencies]
@@ -8395,4 +8441,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
-content-hash = "1e226d8f7a342d17a85c036bfdfdf2ccc7d9e52c96644022fa69bf6044046528"
+content-hash = "57127e7ce52ee64c6eea638b269ad0185bad72ff95d11cca28bd1a082015d396"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index 20ddc61869..b1f755b8c8 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -12,7 +12,7 @@ python = ">=3.10,<3.14"
aio-pika = "^9.5.5"
aiohttp = "^3.10.0"
aiodns = "^3.5.0"
-anthropic = "^0.59.0"
+anthropic = "^0.79.0"
apscheduler = "^3.11.1"
autogpt-libs = { path = "../autogpt_libs", develop = true }
bleach = { extras = ["css"], version = "^6.2.0" }
@@ -21,7 +21,7 @@ cryptography = "^46.0"
discord-py = "^2.5.2"
e2b-code-interpreter = "^1.5.2"
elevenlabs = "^1.50.0"
-fastapi = "^0.128.0"
+fastapi = "^0.128.5"
feedparser = "^6.0.11"
flake8 = "^7.3.0"
google-api-python-client = "^2.177.0"
@@ -38,7 +38,7 @@ langfuse = "^3.11.0"
launchdarkly-server-sdk = "^9.14.1"
mem0ai = "^0.1.115"
moviepy = "^2.1.2"
-ollama = "^0.5.1"
+ollama = "^0.6.1"
openai = "^1.97.1"
orjson = "^3.10.0"
pika = "^1.3.2"
@@ -48,7 +48,7 @@ postmarker = "^1.0"
praw = "~7.8.1"
prisma = "^0.15.0"
rank-bm25 = "^0.2.2"
-prometheus-client = "^0.22.1"
+prometheus-client = "^0.24.1"
prometheus-fastapi-instrumentator = "^7.0.0"
psutil = "^7.0.0"
psycopg2-binary = "^2.9.10"
@@ -57,7 +57,7 @@ pydantic-settings = "^2.12.0"
pytest = "^8.4.1"
pytest-asyncio = "^1.1.0"
python-dotenv = "^1.1.1"
-python-multipart = "^0.0.20"
+python-multipart = "^0.0.22"
redis = "^6.2.0"
regex = "^2025.9.18"
replicate = "^1.0.6"
@@ -65,8 +65,8 @@ sentry-sdk = {extras = ["anthropic", "fastapi", "launchdarkly", "openai", "sqlal
sqlalchemy = "^2.0.40"
strenum = "^0.4.9"
stripe = "^11.5.0"
-supabase = "2.27.2"
-tenacity = "^9.1.2"
+supabase = "2.27.3"
+tenacity = "^9.1.4"
todoist-api-python = "^2.1.7"
tweepy = "^4.16.0"
uvicorn = { extras = ["standard"], version = "^0.40.0" }
@@ -77,7 +77,7 @@ zerobouncesdk = "^1.1.2"
# NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0"
aiofiles = "^24.1.0"
-tiktoken = "^0.9.0"
+tiktoken = "^0.12.0"
aioclamd = "^1.0.0"
setuptools = "^80.9.0"
gcloud-aio-storage = "^9.5.0"
From deccc26f1f250e7d602926ea4a3527c9554476d6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sun, 8 Feb 2026 21:28:23 -0600
Subject: [PATCH 06/18] chore(deps): bump actions/cache from 4 to 5 (#11665)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [actions/cache](https://github.com/actions/cache) from 4 to 5.
Release notes
Sourced from actions/cache's
releases.
v5.0.0
[!IMPORTANT]
actions/cache@v5 runs on the Node.js 24 runtime and
requires a minimum Actions Runner version of
2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
What's Changed
Full Changelog: https://github.com/actions/cache/compare/v4.3.0...v5.0.0
v4.3.0
What's Changed
New Contributors
Full Changelog: https://github.com/actions/cache/compare/v4...v4.3.0
v4.2.4
What's Changed
New Contributors
Full Changelog: https://github.com/actions/cache/compare/v4...v4.2.4
v4.2.3
What's Changed
- Update to use
@actions/cache 4.0.3 package &
prepare for new release by @salmanmkc in actions/cache#1577
(SAS tokens for cache entries are now masked in debug logs)
New Contributors
Full Changelog: https://github.com/actions/cache/compare/v4.2.2...v4.2.3
... (truncated)
Changelog
Sourced from actions/cache's
changelog.
Releases
Changelog
5.0.1
- Update
@azure/storage-blob to ^12.29.1 via
@actions/cache@5.0.1 #1685
5.0.0
[!IMPORTANT]
actions/cache@v5 runs on the Node.js 24 runtime and
requires a minimum Actions Runner version of 2.327.1.
If you are using self-hosted runners, ensure they are updated before
upgrading.
4.3.0
4.2.4
- Bump
@actions/cache to v4.0.5
4.2.3
- Bump
@actions/cache to v4.0.3 (obfuscates SAS token in
debug logs for cache entries)
4.2.2
- Bump
@actions/cache to v4.0.2
4.2.1
- Bump
@actions/cache to v4.0.1
4.2.0
TLDR; The cache backend service has been rewritten from the ground up
for improved performance and reliability. actions/cache now integrates
with the new cache service (v2) APIs.
The new service will gradually roll out as of February 1st,
2025. The legacy service will also be sunset on the same date.
Changes in these release are fully backward
compatible.
We are deprecating some versions of this action. We
recommend upgrading to version v4 or v3 as
soon as possible before February 1st, 2025. (Upgrade
instructions below).
If you are using pinned SHAs, please use the SHAs of versions
v4.2.0 or v3.4.0
If you do not upgrade, all workflow runs using any of the deprecated
actions/cache will
fail.
Upgrading to the recommended versions will not break your
workflows.
4.1.2
... (truncated)
Commits
9255dc7
Merge pull request #1686
from actions/cache-v5.0.1-release
8ff5423
chore: release v5.0.1
9233019
Merge pull request #1685
from salmanmkc/node24-storage-blob-fix
b975f2b
fix: add peer property to package-lock.json for dependencies
d0a0e18
fix: update license files for @actions/cache,
fast-xml-parser, and strnum
74de208
fix: update @actions/cache to ^5.0.1 for Node.js 24
punycode fix
ac7f115
peer
b0f846b
fix: update @actions/cache with storage-blob fix for
Node.js 24 punycode depr...
a783357
Merge pull request #1684
from actions/prepare-cache-v5-release
3bb0d78
docs: highlight v5 runner requirement in releases
- Additional commits viewable in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
---
.github/workflows/claude-dependabot.yml | 6 +++---
.github/workflows/claude.yml | 6 +++---
.github/workflows/copilot-setup-steps.yml | 6 +++---
.github/workflows/docs-block-sync.yml | 2 +-
.github/workflows/docs-claude-review.yml | 2 +-
.github/workflows/docs-enhance.yml | 2 +-
.github/workflows/platform-backend-ci.yml | 2 +-
.github/workflows/platform-frontend-ci.yml | 12 ++++++------
.github/workflows/platform-fullstack-ci.yml | 4 ++--
9 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml
index 6fb052e8f3..c39fdb0e35 100644
--- a/.github/workflows/claude-dependabot.yml
+++ b/.github/workflows/claude-dependabot.yml
@@ -41,7 +41,7 @@ jobs:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
@@ -91,7 +91,7 @@ jobs:
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
@@ -124,7 +124,7 @@ jobs:
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml
index 8c45f930dc..805f3d78bb 100644
--- a/.github/workflows/claude.yml
+++ b/.github/workflows/claude.yml
@@ -57,7 +57,7 @@ jobs:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
@@ -107,7 +107,7 @@ jobs:
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
@@ -140,7 +140,7 @@ jobs:
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml
index aac8befee0..f70fe36572 100644
--- a/.github/workflows/copilot-setup-steps.yml
+++ b/.github/workflows/copilot-setup-steps.yml
@@ -39,7 +39,7 @@ jobs:
python-version: "3.11" # Use standard version matching CI
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
@@ -89,7 +89,7 @@ jobs:
echo "PNPM_HOME=$HOME/.pnpm-store" >> $GITHUB_ENV
- name: Cache frontend dependencies
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}
@@ -132,7 +132,7 @@ jobs:
# Phase 1: Cache and load Docker images for faster setup
- name: Set up Docker image cache
id: docker-cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/docker-cache
# Use a versioned key for cache invalidation when image list changes
diff --git a/.github/workflows/docs-block-sync.yml b/.github/workflows/docs-block-sync.yml
index b1fc986ea7..4977877b19 100644
--- a/.github/workflows/docs-block-sync.yml
+++ b/.github/workflows/docs-block-sync.yml
@@ -33,7 +33,7 @@ jobs:
python-version: "3.11"
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
diff --git a/.github/workflows/docs-claude-review.yml b/.github/workflows/docs-claude-review.yml
index 511c2efcd5..1643fe1c49 100644
--- a/.github/workflows/docs-claude-review.yml
+++ b/.github/workflows/docs-claude-review.yml
@@ -33,7 +33,7 @@ jobs:
python-version: "3.11"
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
diff --git a/.github/workflows/docs-enhance.yml b/.github/workflows/docs-enhance.yml
index db750e78ae..4baa882cd1 100644
--- a/.github/workflows/docs-enhance.yml
+++ b/.github/workflows/docs-enhance.yml
@@ -38,7 +38,7 @@ jobs:
python-version: "3.11"
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml
index f66cce8a37..a301477ecf 100644
--- a/.github/workflows/platform-backend-ci.yml
+++ b/.github/workflows/platform-backend-ci.yml
@@ -88,7 +88,7 @@ jobs:
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
- name: Set up Python dependency cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.cache/pypoetry
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml
index 14676a6a1f..01e057207d 100644
--- a/.github/workflows/platform-frontend-ci.yml
+++ b/.github/workflows/platform-frontend-ci.yml
@@ -54,7 +54,7 @@ jobs:
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
@@ -82,7 +82,7 @@ jobs:
run: corepack enable
- name: Restore dependencies cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
@@ -120,7 +120,7 @@ jobs:
run: corepack enable
- name: Restore dependencies cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
@@ -176,7 +176,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-frontend-test-${{ hashFiles('autogpt_platform/docker-compose.yml', 'autogpt_platform/backend/Dockerfile', 'autogpt_platform/backend/pyproject.toml', 'autogpt_platform/backend/poetry.lock') }}
@@ -231,7 +231,7 @@ jobs:
fi
- name: Restore dependencies cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
@@ -290,7 +290,7 @@ jobs:
run: corepack enable
- name: Restore dependencies cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml
index c888ace6c5..f64d5e33c9 100644
--- a/.github/workflows/platform-fullstack-ci.yml
+++ b/.github/workflows/platform-fullstack-ci.yml
@@ -44,7 +44,7 @@ jobs:
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('autogpt_platform/frontend/pnpm-lock.yaml', 'autogpt_platform/frontend/package.json') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
@@ -88,7 +88,7 @@ jobs:
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
- name: Restore dependencies cache
- uses: actions/cache@v4
+ uses: actions/cache@v5
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
From 1a32ba7d9ab8d844246a050229e977b669924a8a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sun, 8 Feb 2026 21:39:05 -0600
Subject: [PATCH 07/18] chore(deps): bump urllib3 from 2.5.0 to 2.6.0 in
/autogpt_platform/backend (#11607)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [urllib3](https://github.com/urllib3/urllib3) from 2.5.0 to 2.6.0.
Release notes
Sourced from urllib3's
releases.
2.6.0
🚀 urllib3 is fundraising for HTTP/2 support
urllib3
is raising ~$40,000 USD to release HTTP/2 support and ensure
long-term sustainable maintenance of the project after a sharp decline
in financial support. If your company or organization uses Python and
would benefit from HTTP/2 support in Requests, pip, cloud SDKs, and
thousands of other projects please consider contributing
financially to ensure HTTP/2 support is developed sustainably and
maintained for the long-haul.
Thank you for your support.
Security
- Fixed a security issue where streaming API could improperly handle
highly compressed HTTP content ("decompression bombs") leading
to excessive resource consumption even when a small amount of data was
requested. Reading small chunks of compressed data is safer and much
more efficient now. (CVE-2025-66471 reported by
@Cycloctane, 8.9
High, GHSA-2xpw-w6gg-jr37)
- Fixed a security issue where an attacker could compose an HTTP
response with virtually unlimited links in the
Content-Encoding header, potentially leading to a denial of
service (DoS) attack by exhausting system resources during decoding. The
number of allowed chained encodings is now limited to 5. (CVE-2025-66418
reported by @illia-v, 8.9 High,
GHSA-gm62-xv2j-4w53)
[!IMPORTANT]
- If urllib3 is not installed with the optional
urllib3[brotli] extra, but your environment contains a
Brotli/brotlicffi/brotlipy package anyway, make sure to upgrade it to at
least Brotli 1.2.0 or brotlicffi 1.2.0.0 to benefit from the security
fixes and avoid warnings. Prefer using urllib3[brotli] to
install a compatible Brotli package automatically.
- If you use custom decompressors, please make sure to update them to
respect the changed API of
urllib3.response.ContentDecoder.
Features
- Enabled retrieval, deletion, and membership testing in
HTTPHeaderDict using bytes keys. (#3653)
- Added host and port information to string representations of
HTTPConnection. (#3666)
- Added support for Python 3.14 free-threading builds explicitly. (#3696)
Removals
- Removed the
HTTPResponse.getheaders() method in favor
of HTTPResponse.headers. Removed the
HTTPResponse.getheader(name, default) method in favor of
HTTPResponse.headers.get(name, default). (#3622)
Bugfixes
- Fixed redirect handling in
urllib3.PoolManager when an
integer is passed for the retries parameter. (#3649)
- Fixed
HTTPConnectionPool when used in Emscripten with
no explicit port. (#3664)
- Fixed handling of
SSLKEYLOGFILE with expandable
variables. (#3700)
Misc
- Changed the
zstd extra to install
backports.zstd instead of zstandard on Python
3.13 and before. (#3693)
- Improved the performance of content decoding by optimizing
BytesQueueBuffer class. (#3710)
- Allowed building the urllib3 package with newer setuptools-scm v9.x.
(#3652)
- Ensured successful urllib3 builds by setting Hatchling requirement
to ≥ 1.27.0. (#3638)
Changelog
Sourced from urllib3's
changelog.
2.6.0 (2025-12-05)
Security
- Fixed a security issue where streaming API could improperly handle
highly
compressed HTTP content ("decompression bombs") leading to
excessive resource
consumption even when a small amount of data was requested. Reading
small
chunks of compressed data is safer and much more efficient now.
(
GHSA-2xpw-w6gg-jr37
<https://github.com/urllib3/urllib3/security/advisories/GHSA-2xpw-w6gg-jr37>__)
- Fixed a security issue where an attacker could compose an HTTP
response with
virtually unlimited links in the
Content-Encoding header,
potentially
leading to a denial of service (DoS) attack by exhausting system
resources
during decoding. The number of allowed chained encodings is now limited
to 5.
(GHSA-gm62-xv2j-4w53
<https://github.com/urllib3/urllib3/security/advisories/GHSA-gm62-xv2j-4w53>__)
.. caution::
-
If urllib3 is not installed with the optional
urllib3[brotli] extra, but
your environment contains a Brotli/brotlicffi/brotlipy package anyway,
make
sure to upgrade it to at least Brotli 1.2.0 or brotlicffi 1.2.0.0 to
benefit from the security fixes and avoid warnings. Prefer using
urllib3[brotli] to install a compatible Brotli package
automatically.
-
If you use custom decompressors, please make sure to update them to
respect the changed API of
urllib3.response.ContentDecoder.
Features
- Enabled retrieval, deletion, and membership testing in
HTTPHeaderDict using bytes keys.
([#3653](https://github.com/urllib3/urllib3/issues/3653)
<https://github.com/urllib3/urllib3/issues/3653>__)
- Added host and port information to string representations of
HTTPConnection.
([#3666](https://github.com/urllib3/urllib3/issues/3666)
<https://github.com/urllib3/urllib3/issues/3666>__)
- Added support for Python 3.14 free-threading builds explicitly.
(
[#3696](https://github.com/urllib3/urllib3/issues/3696)
<https://github.com/urllib3/urllib3/issues/3696>__)
Removals
- Removed the
HTTPResponse.getheaders() method in favor
of HTTPResponse.headers.
Removed the HTTPResponse.getheader(name, default) method in
favor of HTTPResponse.headers.get(name, default).
([#3622](https://github.com/urllib3/urllib3/issues/3622)
<https://github.com/urllib3/urllib3/issues/3622>__)
Bugfixes
- Fixed redirect handling in
urllib3.PoolManager when an
integer is passed
for the retries parameter.
([#3649](https://github.com/urllib3/urllib3/issues/3649)
<https://github.com/urllib3/urllib3/issues/3649>__)
- Fixed
HTTPConnectionPool when used in Emscripten with
no explicit port.
([#3664](https://github.com/urllib3/urllib3/issues/3664)
<https://github.com/urllib3/urllib3/issues/3664>__)
- Fixed handling of
SSLKEYLOGFILE with expandable
variables.
([#3700](https://github.com/urllib3/urllib3/issues/3700)
<https://github.com/urllib3/urllib3/issues/3700>__)
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
You can disable automated security fix PRs for this repo from the
[Security Alerts
page](https://github.com/Significant-Gravitas/AutoGPT/network/alerts).
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
From b04e916c239de95c15a8f2384b39874d62bd0c0c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Feb 2026 04:26:58 +0000
Subject: [PATCH 08/18] chore(backend/deps-dev): bump the
development-dependencies group across 1 directory with 3 updates (#12005)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the development-dependencies group with 3 updates in the
/autogpt_platform/backend directory:
[poethepoet](https://github.com/nat-n/poethepoet),
[pytest-watcher](https://github.com/olzhasar/pytest-watcher) and
[ruff](https://github.com/astral-sh/ruff).
Updates `poethepoet` from 0.37.0 to 0.40.0
Release notes
Sourced from poethepoet's
releases.
0.40.0
Enhancements
Fixes
Code improvements
Full Changelog: https://github.com/nat-n/poethepoet/compare/v0.39.0...v0.40.0
0.39.0
Enhancements
- Add support for uv executor options by
@rochacbruno and
@nat-n in nat-n/poethepoet#327
- feat: add various
options to the uv executor to be passed to the uv run command
- feat: allow task executor to be configure with just the type as a
string
- feat executor options to be set at runtime via the new
--executor-opt cli global option
- feat: allow inheritance of compatible executor options from global
to task to runtime
- refactor: extend PoeOptions to support annotating config fields with
a config_name to parse, separate from the attribute name
- refactor: some micro-optimizations to PoeOptions and
AnnotationType
- doc: Add guide
for replacing tox with poe + uv
- doc: tidy up executor docs
- doc: fix typo in doc for expr task
- test: improve test coverage of PoeOptions
- test: disable some test cases on windows that are too flaky
New Contributors
Full Changelog: https://github.com/nat-n/poethepoet/compare/v0.38.0...v0.39.0
0.38.0
Enhancements
Breaking changes
... (truncated)
Commits
0a7247d
Bump version to 0.40.0
312e74a
feat: Add choices option to constrain named arguments (#348)
5e0b3e5
feat: support ignore_fail on execution task types and ref tasks (#347)
a3c97e1
test: ensure the test virtual environment is always removed (#346)
bc04e2f
feat: support capture_output on ref tasks (#343)
f7b82ef
fix: global executor option (#340)
8e7b116
fix: handle SIGHUP and SIGBREAK signals to stop tasks (#344)
8e51f2b
refactor: modernize type annotations (#339)
72a9225
fix: set uv to quiet during shell completion (#338)
c6c7306
feat: allow optional envfiles without warnings (#337)
- Additional commits viewable in compare
view
Updates `pytest-watcher` from 0.4.3 to 0.6.3
Release notes
Sourced from pytest-watcher's
releases.
v0.6.3
Features
- Add debug mode activated with
PTW_DEBUG environment
variable and improve log messages.
Bugfixes
- Fix terminal flushing after menu and header prints.
- Use monotonic clock for trigger detection to avoid misbehavior on
clock changes.
v0.6.2
Bugfixes
- Allow specifying blank patterns via CLI
- Fix duplicate command entries in menu
v0.6.1
Bugfixes
- Trigger tests in interactive mode for carriage return character
Improved Documentation
Misc
- Integrate towncrier
into the development process
v0.6.0
Features
- Add
notify-on-failure flag (and config option) to emit
BEL symbol on test suite failure.
Infrastructure
- Migrate from poetry to uv.
- Remove tox.
v0.5.0
Fixes
- Merge arguments passed to the runner from config and CLI instead of
overriding.
Changes
- Drop support for Python 3.7 & 3.8
Changelog
Sourced from pytest-watcher's
changelog.
0.6.3
- 2026-01-11
Features
- Add debug mode activated with
PTW_DEBUG environment
variable and improve log messages.
Bugfixes
- Fix terminal flushing after menu and header prints.
- Use monotonic clock for trigger detection to avoid misbehavior on
clock changes.
0.6.2
- 2025-12-28
Bugfixes
- Allow specifying blank patterns via CLI
- Fix duplicate command entries in menu
0.6.1
- 2025-12-26
Bugfixes
- Trigger tests in interactive mode for carriage return character
Improved Documentation
Misc
- Integrate towncrier
into the development process
0.6.0
- 2025-12-22
Features
- Add notify-on-failure flag (and config option) to emit BEL symbol on
test suite failure.
Infrastructure
- Migrate from
poetry to uv.
- Remove
tox.
0.5.0
- 2025-12-21
Fixes
- Merge arguments passed to the runner from config and CLI instead of
overriding.
Changes
... (truncated)
Commits
c52925b
release v0.6.3
23d4989
Add debug mode. Improve log messages
e3dffa1
Fix terminal flushing after menu and header prints
0eeaf60
Use monotonic clock for trigger detection
5ed9d0e
Update CHANGELOG. Fix changelog_reader action
756f005
release v0.6.2
902aa9e
Merge pull request #51
from olzhasar/fix-duplicate-menu
e6b20d3
Allow specifying empty patterns via CLI
2d522da
Fix duplicate menu entries
171e6f1
Fix towncrier CHANGELOG versioning
- Additional commits viewable in compare
view
Updates `ruff` from 0.14.14 to 0.15.0
Release notes
Sourced from ruff's
releases.
0.15.0
Release Notes
Released on 2026-02-03.
Check out the blog
post for a migration guide and overview of the changes!
Breaking changes
-
Ruff now formats your code according to the 2026 style guide. See the
formatter section below or in the blog post for a detailed list of
changes.
-
The linter now supports block suppression comments. For example, to
suppress N803 for all parameters in this function:
# ruff: disable[N803]
def foo(
legacyArg1,
legacyArg2,
legacyArg3,
legacyArg4,
): ...
# ruff: enable[N803]
See the documentation
for more details.
-
The ruff:alpine Docker image is now based on Alpine 3.23
(up from 3.21).
-
The ruff:debian and ruff:debian-slim Docker
images are now based on Debian 13 "Trixie" instead of Debian
12 "Bookworm."
-
Binaries for the ppc64 (64-bit big-endian PowerPC)
architecture are no longer included in our releases. It should still be
possible to build Ruff manually for this platform, if needed.
-
Ruff now resolves all extended configuration files
before falling back on a default Python version.
Stabilization
The following rules have been stabilized and are no longer in
preview:
... (truncated)
Changelog
Sourced from ruff's
changelog.
0.15.0
Released on 2026-02-03.
Check out the blog
post for a migration
guide and overview of the changes!
Breaking changes
-
Ruff now formats your code according to the 2026 style guide. See the
formatter section below or in the blog post for a detailed list of
changes.
-
The linter now supports block suppression comments. For example, to
suppress N803 for all parameters in this function:
# ruff: disable[N803]
def foo(
legacyArg1,
legacyArg2,
legacyArg3,
legacyArg4,
): ...
# ruff: enable[N803]
See the documentation
for more details.
-
The ruff:alpine Docker image is now based on Alpine 3.23
(up from 3.21).
-
The ruff:debian and ruff:debian-slim Docker
images are now based on Debian 13 "Trixie" instead of Debian
12 "Bookworm."
-
Binaries for the ppc64 (64-bit big-endian PowerPC)
architecture are no longer included in our releases. It should still be
possible to build Ruff manually for this platform, if needed.
-
Ruff now resolves all extended configuration files
before falling back on a default Python version.
Stabilization
The following rules have been stabilized and are no longer in
preview:
... (truncated)
Commits
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
Co-authored-by: Nicholas Tindle
---
autogpt_platform/backend/poetry.lock | 63 ++++++++++++-------------
autogpt_platform/backend/pyproject.toml | 6 +--
2 files changed, 34 insertions(+), 35 deletions(-)
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index d80c6da0fc..425b8d555a 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -4640,20 +4640,20 @@ testing = ["coverage", "pytest", "pytest-benchmark"]
[[package]]
name = "poethepoet"
-version = "0.37.0"
+version = "0.41.0"
description = "A task runner that works well with poetry and uv."
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
groups = ["dev"]
files = [
- {file = "poethepoet-0.37.0-py3-none-any.whl", hash = "sha256:861790276315abcc8df1b4bd60e28c3d48a06db273edd3092f3c94e1a46e5e22"},
- {file = "poethepoet-0.37.0.tar.gz", hash = "sha256:73edf458707c674a079baa46802e21455bda3a7f82a408e58c31b9f4fe8e933d"},
+ {file = "poethepoet-0.41.0-py3-none-any.whl", hash = "sha256:4bab9fd8271664c5d21407e8f12827daeb6aa484dc6cc7620f0c3b4e62b42ee4"},
+ {file = "poethepoet-0.41.0.tar.gz", hash = "sha256:dcaad621dc061f6a90b17d091bebb9ca043d67bfe9bd6aa4185aea3ebf7ff3e6"},
]
[package.dependencies]
pastel = ">=0.2.1,<0.3.0"
-pyyaml = ">=6.0.2,<7.0"
-tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""}
+pyyaml = ">=6.0.3,<7.0"
+tomli = {version = ">=1.3.0", markers = "python_version < \"3.11\""}
[package.extras]
poetry-plugin = ["poetry (>=1.2.0,<3.0.0) ; python_version < \"4.0\""]
@@ -5919,18 +5919,18 @@ pytest = ">=3.0.0"
[[package]]
name = "pytest-watcher"
-version = "0.4.3"
+version = "0.6.3"
description = "Automatically rerun your tests on file modifications"
optional = false
-python-versions = "<4.0.0,>=3.7.0"
+python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "pytest_watcher-0.4.3-py3-none-any.whl", hash = "sha256:d59b1e1396f33a65ea4949b713d6884637755d641646960056a90b267c3460f9"},
- {file = "pytest_watcher-0.4.3.tar.gz", hash = "sha256:0cb0e4661648c8c0ff2b2d25efa5a8e421784b9e4c60fcecbf9b7c30b2d731b3"},
+ {file = "pytest_watcher-0.6.3-py3-none-any.whl", hash = "sha256:83e7748c933087e8276edb6078663e6afa9926434b4fd8b85cf6b32b1d5bec89"},
+ {file = "pytest_watcher-0.6.3.tar.gz", hash = "sha256:842dc904264df0ad2d5264153a66bb452fccfa46598cd6e0a5ef1d19afed9b13"},
]
[package.dependencies]
-tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
+tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
[[package]]
@@ -6672,31 +6672,30 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "ruff"
-version = "0.14.14"
+version = "0.15.0"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
- {file = "ruff-0.14.14-py3-none-linux_armv6l.whl", hash = "sha256:7cfe36b56e8489dee8fbc777c61959f60ec0f1f11817e8f2415f429552846aed"},
- {file = "ruff-0.14.14-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6006a0082336e7920b9573ef8a7f52eec837add1265cc74e04ea8a4368cd704c"},
- {file = "ruff-0.14.14-py3-none-macosx_11_0_arm64.whl", hash = "sha256:026c1d25996818f0bf498636686199d9bd0d9d6341c9c2c3b62e2a0198b758de"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f666445819d31210b71e0a6d1c01e24447a20b85458eea25a25fe8142210ae0e"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c0f18b922c6d2ff9a5e6c3ee16259adc513ca775bcf82c67ebab7cbd9da5bc8"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1629e67489c2dea43e8658c3dba659edbfd87361624b4040d1df04c9740ae906"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:27493a2131ea0f899057d49d303e4292b2cae2bb57253c1ed1f256fbcd1da480"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01ff589aab3f5b539e35db38425da31a57521efd1e4ad1ae08fc34dbe30bd7df"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc12d74eef0f29f51775f5b755913eb523546b88e2d733e1d701fe65144e89b"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb8481604b7a9e75eff53772496201690ce2687067e038b3cc31aaf16aa0b974"},
- {file = "ruff-0.14.14-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:14649acb1cf7b5d2d283ebd2f58d56b75836ed8c6f329664fa91cdea19e76e66"},
- {file = "ruff-0.14.14-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e8058d2145566510790eab4e2fad186002e288dec5e0d343a92fe7b0bc1b3e13"},
- {file = "ruff-0.14.14-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e651e977a79e4c758eb807f0481d673a67ffe53cfa92209781dfa3a996cf8412"},
- {file = "ruff-0.14.14-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cc8b22da8d9d6fdd844a68ae937e2a0adf9b16514e9a97cc60355e2d4b219fc3"},
- {file = "ruff-0.14.14-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:16bc890fb4cc9781bb05beb5ab4cd51be9e7cb376bf1dd3580512b24eb3fda2b"},
- {file = "ruff-0.14.14-py3-none-win32.whl", hash = "sha256:b530c191970b143375b6a68e6f743800b2b786bbcf03a7965b06c4bf04568167"},
- {file = "ruff-0.14.14-py3-none-win_amd64.whl", hash = "sha256:3dde1435e6b6fe5b66506c1dff67a421d0b7f6488d466f651c07f4cab3bf20fd"},
- {file = "ruff-0.14.14-py3-none-win_arm64.whl", hash = "sha256:56e6981a98b13a32236a72a8da421d7839221fa308b223b9283312312e5ac76c"},
- {file = "ruff-0.14.14.tar.gz", hash = "sha256:2d0f819c9a90205f3a867dbbd0be083bee9912e170fd7d9704cc8ae45824896b"},
+ {file = "ruff-0.15.0-py3-none-linux_armv6l.whl", hash = "sha256:aac4ebaa612a82b23d45964586f24ae9bc23ca101919f5590bdb368d74ad5455"},
+ {file = "ruff-0.15.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:dcd4be7cc75cfbbca24a98d04d0b9b36a270d0833241f776b788d59f4142b14d"},
+ {file = "ruff-0.15.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d747e3319b2bce179c7c1eaad3d884dc0a199b5f4d5187620530adf9105268ce"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:650bd9c56ae03102c51a5e4b554d74d825ff3abe4db22b90fd32d816c2e90621"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6664b7eac559e3048223a2da77769c2f92b43a6dfd4720cef42654299a599c9"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f811f97b0f092b35320d1556f3353bf238763420ade5d9e62ebd2b73f2ff179"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:761ec0a66680fab6454236635a39abaf14198818c8cdf691e036f4bc0f406b2d"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:940f11c2604d317e797b289f4f9f3fa5555ffe4fb574b55ed006c3d9b6f0eb78"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcbca3d40558789126da91d7ef9a7c87772ee107033db7191edefa34e2c7f1b4"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9a121a96db1d75fa3eb39c4539e607f628920dd72ff1f7c5ee4f1b768ac62d6e"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5298d518e493061f2eabd4abd067c7e4fb89e2f63291c94332e35631c07c3662"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afb6e603d6375ff0d6b0cee563fa21ab570fd15e65c852cb24922cef25050cf1"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:77e515f6b15f828b94dc17d2b4ace334c9ddb7d9468c54b2f9ed2b9c1593ef16"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6f6e80850a01eb13b3e42ee0ebdf6e4497151b48c35051aab51c101266d187a3"},
+ {file = "ruff-0.15.0-py3-none-win32.whl", hash = "sha256:238a717ef803e501b6d51e0bdd0d2c6e8513fe9eec14002445134d3907cd46c3"},
+ {file = "ruff-0.15.0-py3-none-win_amd64.whl", hash = "sha256:dd5e4d3301dc01de614da3cdffc33d4b1b96fb89e45721f1598e5532ccf78b18"},
+ {file = "ruff-0.15.0-py3-none-win_arm64.whl", hash = "sha256:c480d632cc0ca3f0727acac8b7d053542d9e114a462a145d0b00e7cd658c515a"},
+ {file = "ruff-0.15.0.tar.gz", hash = "sha256:6bdea47cdbea30d40f8f8d7d69c0854ba7c15420ec75a26f463290949d7f7e9a"},
]
[[package]]
@@ -8441,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
-content-hash = "57127e7ce52ee64c6eea638b269ad0185bad72ff95d11cca28bd1a082015d396"
+content-hash = "14686ee0e2dc446a75d0db145b08dc410dc31c357e25085bb0f9b0174711c4b1"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index b1f755b8c8..677b73b468 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -95,13 +95,13 @@ black = "^24.10.0"
faker = "^38.2.0"
httpx = "^0.28.1"
isort = "^5.13.2"
-poethepoet = "^0.37.0"
+poethepoet = "^0.41.0"
pre-commit = "^4.4.0"
pyright = "^1.1.407"
pytest-mock = "^3.15.1"
-pytest-watcher = "^0.4.2"
+pytest-watcher = "^0.6.3"
requests = "^2.32.5"
-ruff = "^0.14.5"
+ruff = "^0.15.0"
# NOTE: please insert new dependencies in their alphabetical location
[build-system]
From 0c6fa60436d4678cd8401a0195b731df7bbe54d6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Feb 2026 04:27:07 +0000
Subject: [PATCH 09/18] chore(deps): Bump actions/github-script from 7 to 8
(#10870)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [actions/github-script](https://github.com/actions/github-script)
from 7 to 8.
Release notes
Sourced from actions/github-script's
releases.
v8.0.0
What's Changed
⚠️ Minimum Compatible Runner Version
v2.327.1
Release
Notes
Make sure your runner is updated to this version or newer to use this
release.
New Contributors
Full Changelog: https://github.com/actions/github-script/compare/v7.1.0...v8.0.0
v7.1.0
What's Changed
New Contributors
Full Changelog: https://github.com/actions/github-script/compare/v7...v7.1.0
... (truncated)
Commits
ed59741
Merge pull request #653
from actions/sneha-krip/readme-for-v8
2dc352e
Bold minimum Actions Runner version in README
01e118c
Update README for Node 24 runtime requirements
8b222ac
Apply suggestion from @salmanmkc
adc0eea
README for updating actions/github-script from v7 to v8
20fe497
Merge pull request #637
from actions/node24
e7b7f22
update licenses
2c81ba0
Update Node.js version support to 24.x
- See full diff in compare
view
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---
> [!NOTE]
> Update GitHub Actions workflows to use actions/github-script v8.
>
> - **CI Workflows**:
> - Update `actions/github-script` from `v7` to `v8` in:
> - `.github/workflows/claude-ci-failure-auto-fix.yml`
> - `.github/workflows/platform-dev-deploy-event-dispatcher.yml`
>
> Written by [Cursor
Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit
cfdccf966b92bb9c244a2f39f86bbd37c453c401. This will update automatically
on new commits. Configure
[here](https://cursor.com/dashboard?tab=bugbot).
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nicholas Tindle
---
.github/workflows/claude-ci-failure-auto-fix.yml | 2 +-
.../platform-dev-deploy-event-dispatcher.yml | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/claude-ci-failure-auto-fix.yml b/.github/workflows/claude-ci-failure-auto-fix.yml
index e39c6676ce..070a4acd14 100644
--- a/.github/workflows/claude-ci-failure-auto-fix.yml
+++ b/.github/workflows/claude-ci-failure-auto-fix.yml
@@ -42,7 +42,7 @@ jobs:
- name: Get CI failure details
id: failure_details
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
const run = await github.rest.actions.getWorkflowRun({
diff --git a/.github/workflows/platform-dev-deploy-event-dispatcher.yml b/.github/workflows/platform-dev-deploy-event-dispatcher.yml
index d7915f64c6..b5324b7c2c 100644
--- a/.github/workflows/platform-dev-deploy-event-dispatcher.yml
+++ b/.github/workflows/platform-dev-deploy-event-dispatcher.yml
@@ -17,7 +17,7 @@ jobs:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
const commentBody = context.payload.comment.body.trim();
@@ -55,7 +55,7 @@ jobs:
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
await github.rest.issues.createComment({
@@ -68,7 +68,7 @@ jobs:
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
const pr = await github.rest.pulls.get({
@@ -98,7 +98,7 @@ jobs:
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
await github.rest.issues.createComment({
@@ -126,7 +126,7 @@ jobs:
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
await github.rest.issues.createComment({
@@ -139,7 +139,7 @@ jobs:
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
const comments = await github.rest.issues.listComments({
@@ -187,7 +187,7 @@ jobs:
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
await github.rest.issues.createComment({
From 6cbfbdd01380d272aac1479a331f4bf2493e8f67 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Feb 2026 04:54:05 +0000
Subject: [PATCH 10/18] chore(libs/deps-dev): bump the development-dependencies
group across 1 directory with 4 updates (#11349)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps the development-dependencies group with 4 updates in the
/autogpt_platform/autogpt_libs directory:
[pyright](https://github.com/RobertCraigie/pyright-python),
[pytest-asyncio](https://github.com/pytest-dev/pytest-asyncio),
[pytest-mock](https://github.com/pytest-dev/pytest-mock) and
[ruff](https://github.com/astral-sh/ruff).
Updates `pyright` from 1.1.404 to 1.1.407
Commits
Updates `pytest-asyncio` from 1.1.0 to 1.3.0
Release notes
Sourced from pytest-asyncio's
releases.
pytest-asyncio 1.3.0
1.3.0
- 2025-11-10
Removed
- Support for Python 3.9 (#1278)
Added
- Support for pytest 9 (#1279)
Notes for Downstream Packagers
- Tested Python versions include free threaded Python 3.14t (#1274)
- Tests are run in the same pytest process, instead of spawning a
subprocess with
pytest.Pytester.runpytest_subprocess. This
prevents the test suite from accidentally using a system installation of
pytest-asyncio, which could result in test errors. (#1275)
pytest-asyncio 1.2.0
1.2.0
- 2025-09-12
Added
--asyncio-debug CLI option and
asyncio_debug configuration option to enable asyncio debug
mode for the default event loop. (#980)
- A
pytest.UsageError for invalid configuration values of
asyncio_default_fixture_loop_scope and
asyncio_default_test_loop_scope. (#1189)
- Compatibility with the Pyright type checker (#731)
Fixed
RuntimeError: There is no current event loop in thread
'MainThread' when any test unsets the event loop (such as when
using asyncio.run and asyncio.Runner). (#1177)
- Deprecation warning when decorating an asynchronous fixture with
@pytest.fixture in [strict]{.title-ref} mode. The warning
message now refers to the correct package. (#1198)
Notes for Downstream Packagers
- Bump the minimum required version of tox to v4.28. This change is
only relevant if you use the
tox.ini file provided by
pytest-asyncio to run tests.
- Extend dependency on typing-extensions>=4.12 from Python<3.10
to Python<3.13.
pytest-asyncio 1.1.1
v1.1.1
- 2025-09-12
Notes for Downstream Packagers
- Addresses a build problem with setuptoos-scm >= 9 caused by
invalid setuptools-scm configuration in pytest-asyncio. (#1192)
Commits
2e9695f
docs: Compile changelog for v1.3.0
dd0e9ba
docs: Reference correct issue in news fragment.
4c31abe
Build(deps): Bump nh3 from 0.3.1 to 0.3.2
13e9477
Link to migration guides from changelog
4d2cf3c
tests: handle Python 3.14 DefaultEventLoopPolicy deprecation
warnings
ee3549b
test: Remove obsolete test for the event_loop fixture.
7a67c82
tests: Fix failing test by preventing warning conversion to error.
a17b689
test: add pytest config to isolated test directories
18afc9d
fix(tests): replace runpytest_subprocess with runpytest
cdc6bd1
Add support for pytest 9 and drop Python 3.9 support
- Additional commits viewable in compare
view
Updates `pytest-mock` from 3.14.1 to 3.15.1
Release notes
Sourced from pytest-mock's
releases.
v3.15.1
2025-09-16
- #529:
Fixed
itertools._tee object has no attribute error -- now
duplicate_iterators=True must be passed to
mocker.spy to duplicate iterators.
v3.15.0
2025-09-04
- Python 3.8 (EOL) is no longer supported.
- #524:
Added
spy_return_iter to mocker.spy, which
contains a duplicate of the return value of the spied method if it is an
Iterator.
Changelog
Sourced from pytest-mock's
changelog.
3.15.1
2025-09-16
[#529](https://github.com/pytest-dev/pytest-mock/issues/529)
<https://github.com/pytest-dev/pytest-mock/issues/529>_:
Fixed itertools._tee object has no attribute error -- now
duplicate_iterators=True must be passed to
mocker.spy to duplicate iterators.
3.15.0
2025-09-04
- Python 3.8 (EOL) is no longer supported.
[#524](https://github.com/pytest-dev/pytest-mock/issues/524)
<https://github.com/pytest-dev/pytest-mock/pull/524>_:
Added spy_return_iter to mocker.spy, which
contains a duplicate of the return value of the spied method if it is an
Iterator.
Commits
e1b5c62
Release 3.15.1
184eb19
Set spy_return_iter only when explicitly requested (#537)
4fa0088
[pre-commit.ci] pre-commit autoupdate (#536)
f5aff33
Fix test failure with pytest 8+ and verbose mode (#535)
adc4187
Bump actions/setup-python from 5 to 6 in the github-actions group (#533)
95ad570
[pre-commit.ci] pre-commit autoupdate (#532)
e696bf0
Fix standalone mock support (#531)
5b29b03
Fix gen-release-notes script
7d22ef4
Merge pull request #528
from pytest-dev/release-3.15.0
90b29f8
Update CHANGELOG for 3.15.0
- Additional commits viewable in compare
view
Updates `ruff` from 0.12.11 to 0.14.4
Release notes
Sourced from ruff's
releases.
0.14.4
Release Notes
Released on 2025-11-06.
Preview features
- [formatter] Allow newlines after function headers without docstrings
(#21110)
- [formatter] Avoid extra parentheses for long
match
patterns with as captures (#21176)
- [
refurb] Expand fix safety for keyword arguments and
Decimals (FURB164) (#21259)
- [
refurb] Preserve argument ordering in autofix
(FURB103) (#20790)
Bug fixes
- [server] Fix missing diagnostics for notebooks (#21156)
- [
flake8-bugbear] Ignore non-NFKC attribute names in
B009 and B010 (#21131)
- [
refurb] Fix false negative for underscores before sign
in Decimal constructor (FURB157) (#21190)
- [
ruff] Fix false positives on starred arguments
(RUF057) (#21256)
Rule changes
- [
airflow] extend deprecated argument
concurrency in airflow..DAG
(AIR301) (#21220)
Documentation
- Improve
extend docs (#21135)
- [
flake8-comprehensions] Fix typo in C416
documentation (#21184)
- Revise Ruff setup instructions for Zed editor (#20935)
Other changes
- Make
ruff analyze graph work with jupyter notebooks (#21161)
Contributors
Install ruff 0.14.4
Install prebuilt binaries via shell script
... (truncated)
Changelog
Sourced from ruff's
changelog.
0.14.4
Released on 2025-11-06.
Preview features
- [formatter] Allow newlines after function headers without docstrings
(#21110)
- [formatter] Avoid extra parentheses for long
match
patterns with as captures (#21176)
- [
refurb] Expand fix safety for keyword arguments and
Decimals (FURB164) (#21259)
- [
refurb] Preserve argument ordering in autofix
(FURB103) (#20790)
Bug fixes
- [server] Fix missing diagnostics for notebooks (#21156)
- [
flake8-bugbear] Ignore non-NFKC attribute names in
B009 and B010 (#21131)
- [
refurb] Fix false negative for underscores before sign
in Decimal constructor (FURB157) (#21190)
- [
ruff] Fix false positives on starred arguments
(RUF057) (#21256)
Rule changes
- [
airflow] extend deprecated argument
concurrency in airflow..DAG
(AIR301) (#21220)
Documentation
- Improve
extend docs (#21135)
- [
flake8-comprehensions] Fix typo in C416
documentation (#21184)
- Revise Ruff setup instructions for Zed editor (#20935)
Other changes
- Make
ruff analyze graph work with jupyter notebooks (#21161)
Contributors
0.14.3
Released on 2025-10-30.
Preview features
... (truncated)
Commits
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore major version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's major version (unless you unignore this specific
dependency's major version or upgrade to it yourself)
- `@dependabot ignore minor version` will close this
group update PR and stop Dependabot creating any more for the specific
dependency's minor version (unless you unignore this specific
dependency's minor version or upgrade to it yourself)
- `@dependabot ignore ` will close this group update PR
and stop Dependabot creating any more for the specific dependency
(unless you unignore this specific dependency or upgrade to it yourself)
- `@dependabot unignore ` will remove all of the ignore
conditions of the specified dependency
- `@dependabot unignore ` will
remove the ignore condition of the specified dependency and ignore
conditions
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
---
autogpt_platform/autogpt_libs/poetry.lock | 66 ++++++++++----------
autogpt_platform/autogpt_libs/pyproject.toml | 8 +--
2 files changed, 37 insertions(+), 37 deletions(-)
diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock
index 59c5351a6a..f1d1e932fe 100644
--- a/autogpt_platform/autogpt_libs/poetry.lock
+++ b/autogpt_platform/autogpt_libs/poetry.lock
@@ -2002,14 +2002,14 @@ files = [
[[package]]
name = "pyright"
-version = "1.1.404"
+version = "1.1.408"
description = "Command line wrapper for pyright"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
- {file = "pyright-1.1.404-py3-none-any.whl", hash = "sha256:c7b7ff1fdb7219c643079e4c3e7d4125f0dafcc19d253b47e898d130ea426419"},
- {file = "pyright-1.1.404.tar.gz", hash = "sha256:455e881a558ca6be9ecca0b30ce08aa78343ecc031d37a198ffa9a7a1abeb63e"},
+ {file = "pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1"},
+ {file = "pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684"},
]
[package.dependencies]
@@ -2141,19 +2141,20 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests
[[package]]
name = "pytest-asyncio"
-version = "1.1.0"
+version = "1.3.0"
description = "Pytest support for asyncio"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
groups = ["dev"]
files = [
- {file = "pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf"},
- {file = "pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea"},
+ {file = "pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5"},
+ {file = "pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5"},
]
[package.dependencies]
backports-asyncio-runner = {version = ">=1.1,<2", markers = "python_version < \"3.11\""}
-pytest = ">=8.2,<9"
+pytest = ">=8.2,<10"
+typing-extensions = {version = ">=4.12", markers = "python_version < \"3.13\""}
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"]
@@ -2181,14 +2182,14 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
[[package]]
name = "pytest-mock"
-version = "3.14.1"
+version = "3.15.1"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0"},
- {file = "pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e"},
+ {file = "pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d"},
+ {file = "pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f"},
]
[package.dependencies]
@@ -2322,31 +2323,30 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "ruff"
-version = "0.12.11"
+version = "0.15.0"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
- {file = "ruff-0.12.11-py3-none-linux_armv6l.whl", hash = "sha256:93fce71e1cac3a8bf9200e63a38ac5c078f3b6baebffb74ba5274fb2ab276065"},
- {file = "ruff-0.12.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8e33ac7b28c772440afa80cebb972ffd823621ded90404f29e5ab6d1e2d4b93"},
- {file = "ruff-0.12.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d69fb9d4937aa19adb2e9f058bc4fbfe986c2040acb1a4a9747734834eaa0bfd"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:411954eca8464595077a93e580e2918d0a01a19317af0a72132283e28ae21bee"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a2c0a2e1a450f387bf2c6237c727dd22191ae8c00e448e0672d624b2bbd7fb0"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ca4c3a7f937725fd2413c0e884b5248a19369ab9bdd850b5781348ba283f644"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4d1df0098124006f6a66ecf3581a7f7e754c4df7644b2e6704cd7ca80ff95211"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a8dd5f230efc99a24ace3b77e3555d3fbc0343aeed3fc84c8d89e75ab2ff793"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4dc75533039d0ed04cd33fb8ca9ac9620b99672fe7ff1533b6402206901c34ee"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fc58f9266d62c6eccc75261a665f26b4ef64840887fc6cbc552ce5b29f96cc8"},
- {file = "ruff-0.12.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5a0113bd6eafd545146440225fe60b4e9489f59eb5f5f107acd715ba5f0b3d2f"},
- {file = "ruff-0.12.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0d737b4059d66295c3ea5720e6efc152623bb83fde5444209b69cd33a53e2000"},
- {file = "ruff-0.12.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:916fc5defee32dbc1fc1650b576a8fed68f5e8256e2180d4d9855aea43d6aab2"},
- {file = "ruff-0.12.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c984f07d7adb42d3ded5be894fb4007f30f82c87559438b4879fe7aa08c62b39"},
- {file = "ruff-0.12.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e07fbb89f2e9249f219d88331c833860489b49cdf4b032b8e4432e9b13e8a4b9"},
- {file = "ruff-0.12.11-py3-none-win32.whl", hash = "sha256:c792e8f597c9c756e9bcd4d87cf407a00b60af77078c96f7b6366ea2ce9ba9d3"},
- {file = "ruff-0.12.11-py3-none-win_amd64.whl", hash = "sha256:a3283325960307915b6deb3576b96919ee89432ebd9c48771ca12ee8afe4a0fd"},
- {file = "ruff-0.12.11-py3-none-win_arm64.whl", hash = "sha256:bae4d6e6a2676f8fb0f98b74594a048bae1b944aab17e9f5d504062303c6dbea"},
- {file = "ruff-0.12.11.tar.gz", hash = "sha256:c6b09ae8426a65bbee5425b9d0b82796dbb07cb1af045743c79bfb163001165d"},
+ {file = "ruff-0.15.0-py3-none-linux_armv6l.whl", hash = "sha256:aac4ebaa612a82b23d45964586f24ae9bc23ca101919f5590bdb368d74ad5455"},
+ {file = "ruff-0.15.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:dcd4be7cc75cfbbca24a98d04d0b9b36a270d0833241f776b788d59f4142b14d"},
+ {file = "ruff-0.15.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d747e3319b2bce179c7c1eaad3d884dc0a199b5f4d5187620530adf9105268ce"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:650bd9c56ae03102c51a5e4b554d74d825ff3abe4db22b90fd32d816c2e90621"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6664b7eac559e3048223a2da77769c2f92b43a6dfd4720cef42654299a599c9"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f811f97b0f092b35320d1556f3353bf238763420ade5d9e62ebd2b73f2ff179"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:761ec0a66680fab6454236635a39abaf14198818c8cdf691e036f4bc0f406b2d"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:940f11c2604d317e797b289f4f9f3fa5555ffe4fb574b55ed006c3d9b6f0eb78"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcbca3d40558789126da91d7ef9a7c87772ee107033db7191edefa34e2c7f1b4"},
+ {file = "ruff-0.15.0-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9a121a96db1d75fa3eb39c4539e607f628920dd72ff1f7c5ee4f1b768ac62d6e"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5298d518e493061f2eabd4abd067c7e4fb89e2f63291c94332e35631c07c3662"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:afb6e603d6375ff0d6b0cee563fa21ab570fd15e65c852cb24922cef25050cf1"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:77e515f6b15f828b94dc17d2b4ace334c9ddb7d9468c54b2f9ed2b9c1593ef16"},
+ {file = "ruff-0.15.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6f6e80850a01eb13b3e42ee0ebdf6e4497151b48c35051aab51c101266d187a3"},
+ {file = "ruff-0.15.0-py3-none-win32.whl", hash = "sha256:238a717ef803e501b6d51e0bdd0d2c6e8513fe9eec14002445134d3907cd46c3"},
+ {file = "ruff-0.15.0-py3-none-win_amd64.whl", hash = "sha256:dd5e4d3301dc01de614da3cdffc33d4b1b96fb89e45721f1598e5532ccf78b18"},
+ {file = "ruff-0.15.0-py3-none-win_arm64.whl", hash = "sha256:c480d632cc0ca3f0727acac8b7d053542d9e114a462a145d0b00e7cd658c515a"},
+ {file = "ruff-0.15.0.tar.gz", hash = "sha256:6bdea47cdbea30d40f8f8d7d69c0854ba7c15420ec75a26f463290949d7f7e9a"},
]
[[package]]
@@ -2893,4 +2893,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<4.0"
-content-hash = "cc80d3a129b84435a0f40132d073caa37858ca2427ed372fecfd810a61712d0c"
+content-hash = "b7ac335a86aa44c3d7d2802298818b389a6f1286e3e9b7b0edb2ff06377cecaf"
diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml
index 591be11d0b..9b2bcb8fbe 100644
--- a/autogpt_platform/autogpt_libs/pyproject.toml
+++ b/autogpt_platform/autogpt_libs/pyproject.toml
@@ -22,12 +22,12 @@ supabase = "^2.27.2"
uvicorn = "^0.40.0"
[tool.poetry.group.dev.dependencies]
-pyright = "^1.1.404"
+pyright = "^1.1.408"
pytest = "^8.4.1"
-pytest-asyncio = "^1.1.0"
-pytest-mock = "^3.14.1"
+pytest-asyncio = "^1.3.0"
+pytest-mock = "^3.15.1"
pytest-cov = "^6.2.1"
-ruff = "^0.12.11"
+ruff = "^0.15.0"
[build-system]
requires = ["poetry-core"]
From 5dae303ce0a2080b06a971b90b7aaeb6dc4ccb91 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Feb 2026 06:42:47 +0000
Subject: [PATCH 11/18] chore(frontend/deps): Bump react-window and
@types/react-window in /autogpt_platform/frontend (#10943)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [react-window](https://github.com/bvaughn/react-window) and
[@types/react-window](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react-window).
These dependencies needed to be updated together.
Updates `react-window` from 1.8.11 to 2.1.0
Release notes
Sourced from react-window's
releases.
2.1.0
Improved ARIA support:
- Add better default ARIA attributes for outer
HTMLDivElement
- Add optional
ariaAttributes prop to row and cell
renderers to simplify better ARIA attributes for user-rendered
cells
- Remove intermediate
HTMLDivElement from
List and Grid
- This may enable more/better custom CSS styling
- This may also enable adding an optional
children prop
to List and Grid for e.g.
overlays/tooltips
- Add optional
tagName prop; defaults to
"div" but can be changed to e.g.
"ul"
// Example of how to use new `ariaAttributes` prop
function RowComponent({
ariaAttributes,
index,
style,
...rest
}: RowComponentProps<object>) {
return (
<div style={style} {...ariaAttributes}>
...
</div>
);
}
Added optional children prop to better support edge
cases like sticky rows.
Minor changes to onRowsRendered and
onCellsRendered callbacks to make it easier to
differentiate between visible items and items rendered due to
overscan settings. These methods will now receive two params– the first
for visible rows and the second for all rows
(including overscan), e.g.:
function onRowsRendered(
visibleRows: {
startIndex: number;
stopIndex: number;
},
allRows: {
startIndex: number;
stopIndex: number;
}
): void {
// ...
}
function onCellsRendered(
visibleCells: {
columnStartIndex: number;
columnStopIndex: number;
rowStartIndex: number;
rowStopIndex: number;
</tr></table>
... (truncated)
Changelog
Sourced from react-window's
changelog.
2.1.0
Improved ARIA support:
- Add better default ARIA attributes for outer
HTMLDivElement
- Add optional
ariaAttributes prop to row and cell
renderers to simplify better ARIA attributes for user-rendered
cells
- Remove intermediate
HTMLDivElement from
List and Grid
- This may enable more/better custom CSS styling
- This may also enable adding an optional
children prop
to List and Grid for e.g.
overlays/tooltips
- Add optional
tagName prop; defaults to
"div" but can be changed to e.g.
"ul"
// Example of how to use new `ariaAttributes` prop
function RowComponent({
ariaAttributes,
index,
style,
...rest
}: RowComponentProps<object>) {
return (
<div style={style} {...ariaAttributes}>
...
</div>
);
}
Added optional children prop to better support edge
cases like sticky rows.
Minor changes to onRowsRendered and
onCellsRendered callbacks to make it easier to
differentiate between visible items and items rendered due to
overscan settings. These methods will now receive two params– the first
for visible rows and the second for all rows
(including overscan), e.g.:
function onRowsRendered(
visibleRows: {
startIndex: number;
stopIndex: number;
},
allRows: {
startIndex: number;
stopIndex: number;
}
): void {
// ...
}
function onCellsRendered(
visibleCells: {
columnStartIndex: number;
columnStopIndex: number;
rowStartIndex: number;
</tr></table>
... (truncated)
Commits
1b6840b
Merge pull request #836
from bvaughn/ARIA-roles
35f651b
Revert accidental change to docs example
8bce7f5
onRowsRendered/onCellsRendered separate visible and overscan items
9f1e8f2
Support custom tagName for outer element and (optional) children
7f07ac3
Improve ARIA attributes
7234ec3
Reduced network waterfalls between routes
5c431a2
Stronger typing for doc website routes
c9349a4
2.0.1 -> 2.0.2
6adc6c0
Merge pull request #832
from bvaughn/issues/831
bd562c5
Add tests
- Additional commits viewable in compare
view
Updates `@types/react-window` from 1.8.8 to 2.0.0
Commits
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nicholas Tindle
Co-authored-by: Nick Tindle
---
autogpt_platform/frontend/package.json | 4 +-
autogpt_platform/frontend/pnpm-lock.yaml | 38 +++++++++----------
.../ActivityDropdown/ActivityDropdown.tsx | 31 +++++++--------
3 files changed, 35 insertions(+), 38 deletions(-)
diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json
index f22a182d20..e8c9871a72 100644
--- a/autogpt_platform/frontend/package.json
+++ b/autogpt_platform/frontend/package.json
@@ -102,7 +102,7 @@
"react-markdown": "9.0.3",
"react-modal": "3.16.3",
"react-shepherd": "6.1.9",
- "react-window": "1.8.11",
+ "react-window": "2.2.0",
"recharts": "3.3.0",
"rehype-autolink-headings": "7.1.0",
"rehype-highlight": "7.0.2",
@@ -140,7 +140,7 @@
"@types/react": "18.3.17",
"@types/react-dom": "18.3.5",
"@types/react-modal": "3.16.3",
- "@types/react-window": "1.8.8",
+ "@types/react-window": "2.0.0",
"@vitejs/plugin-react": "5.1.2",
"axe-playwright": "2.2.2",
"chromatic": "13.3.3",
diff --git a/autogpt_platform/frontend/pnpm-lock.yaml b/autogpt_platform/frontend/pnpm-lock.yaml
index db891ccf3f..377a298564 100644
--- a/autogpt_platform/frontend/pnpm-lock.yaml
+++ b/autogpt_platform/frontend/pnpm-lock.yaml
@@ -228,8 +228,8 @@ importers:
specifier: 6.1.9
version: 6.1.9(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.9.3)
react-window:
- specifier: 1.8.11
- version: 1.8.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ specifier: 2.2.0
+ version: 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
recharts:
specifier: 3.3.0
version: 3.3.0(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react-is@18.3.1)(react@18.3.1)(redux@5.0.1)
@@ -337,8 +337,8 @@ importers:
specifier: 3.16.3
version: 3.16.3
'@types/react-window':
- specifier: 1.8.8
- version: 1.8.8
+ specifier: 2.0.0
+ version: 2.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
'@vitejs/plugin-react':
specifier: 5.1.2
version: 5.1.2(vite@7.3.1(@types/node@24.10.0)(jiti@2.6.1)(terser@5.44.1)(yaml@2.8.2))
@@ -3469,8 +3469,9 @@ packages:
'@types/react-modal@3.16.3':
resolution: {integrity: sha512-xXuGavyEGaFQDgBv4UVm8/ZsG+qxeQ7f77yNrW3n+1J6XAstUy5rYHeIHPh1KzsGc6IkCIdu6lQ2xWzu1jBTLg==}
- '@types/react-window@1.8.8':
- resolution: {integrity: sha512-8Ls660bHR1AUA2kuRvVG9D/4XpRC6wjAaPT9dil7Ckc76eP9TKWZwwmgfq8Q1LANX3QNDnoU4Zp48A3w+zK69Q==}
+ '@types/react-window@2.0.0':
+ resolution: {integrity: sha512-E8hMDtImEpMk1SjswSvqoSmYvk7GEtyVaTa/GJV++FdDNuMVVEzpAClyJ0nqeKYBrMkGiyH6M1+rPLM0Nu1exQ==}
+ deprecated: This is a stub types definition. react-window provides its own type definitions, so you do not need this installed.
'@types/react@18.3.17':
resolution: {integrity: sha512-opAQ5no6LqJNo9TqnxBKsgnkIYHozW9KSTlFVoSUJYh1Fl/sswkEoqIugRSm7tbh6pABtYjGAjW+GOS23j8qbw==}
@@ -5976,9 +5977,6 @@ packages:
resolution: {integrity: sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==}
engines: {node: '>= 4.0.0'}
- memoize-one@5.2.1:
- resolution: {integrity: sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==}
-
merge-stream@2.0.0:
resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==}
@@ -6891,12 +6889,11 @@ packages:
'@types/react':
optional: true
- react-window@1.8.11:
- resolution: {integrity: sha512-+SRbUVT2scadgFSWx+R1P754xHPEqvcfSfVX10QYg6POOz+WNgkN48pS+BtZNIMGiL1HYrSEiCkwsMS15QogEQ==}
- engines: {node: '>8.0.0'}
+ react-window@2.2.0:
+ resolution: {integrity: sha512-Y2L7yonHq6K1pQA2P98wT5QdIsEcjBTB7T8o6Mub12hH9eYppXoYu6vgClmcjlh3zfNcW2UrXiJJJqDxUY7GVw==}
peerDependencies:
- react: ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
- react-dom: ^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0
+ react: ^18.0.0 || ^19.0.0
+ react-dom: ^18.0.0 || ^19.0.0
react@18.3.1:
resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==}
@@ -11603,9 +11600,12 @@ snapshots:
dependencies:
'@types/react': 18.3.17
- '@types/react-window@1.8.8':
+ '@types/react-window@2.0.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
- '@types/react': 18.3.17
+ react-window: 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ transitivePeerDependencies:
+ - react
+ - react-dom
'@types/react@18.3.17':
dependencies:
@@ -14545,8 +14545,6 @@ snapshots:
dependencies:
fs-monkey: 1.1.0
- memoize-one@5.2.1: {}
-
merge-stream@2.0.0: {}
merge2@1.4.1: {}
@@ -15592,10 +15590,8 @@ snapshots:
optionalDependencies:
'@types/react': 18.3.17
- react-window@1.8.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ react-window@2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
- '@babel/runtime': 7.28.4
- memoize-one: 5.2.1
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
diff --git a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityDropdown/ActivityDropdown.tsx b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityDropdown/ActivityDropdown.tsx
index 263453b327..885877786f 100644
--- a/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityDropdown/ActivityDropdown.tsx
+++ b/autogpt_platform/frontend/src/components/layout/Navbar/components/AgentActivityDropdown/components/ActivityDropdown/ActivityDropdown.tsx
@@ -4,7 +4,7 @@ import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";
import { Text } from "@/components/atoms/Text/Text";
import { Bell, MagnifyingGlass, X } from "@phosphor-icons/react";
-import { FixedSizeList as List } from "react-window";
+import { List, type RowComponentProps } from "react-window";
import { AgentExecutionWithInfo } from "../../helpers";
import { ActivityItem } from "../ActivityItem";
import styles from "./styles.module.css";
@@ -19,14 +19,16 @@ interface Props {
recentFailures: AgentExecutionWithInfo[];
}
-interface VirtualizedItemProps {
- index: number;
- style: React.CSSProperties;
- data: AgentExecutionWithInfo[];
+interface ActivityRowProps {
+ executions: AgentExecutionWithInfo[];
}
-function VirtualizedActivityItem({ index, style, data }: VirtualizedItemProps) {
- const execution = data[index];
+function VirtualizedActivityItem({
+ index,
+ style,
+ executions,
+}: RowComponentProps) {
+ const execution = executions[index];
return (
@@ -129,14 +131,13 @@ export function ActivityDropdown({
>
{filteredExecutions.length > 0 ? (
- {VirtualizedActivityItem}
-
+ defaultHeight={listHeight}
+ rowCount={filteredExecutions.length}
+ rowHeight={itemHeight}
+ rowProps={{ executions: filteredExecutions }}
+ rowComponent={VirtualizedActivityItem}
+ style={{ width: 320, height: listHeight }}
+ />
) : (
From 1a16e203b8df62efd1fc8dfeb83d5f795385b445 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Feb 2026 07:11:21 +0000
Subject: [PATCH 12/18] chore(deps): Bump actions/setup-node from 4 to 6
(#11213)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4
to 6.
Release notes
Sourced from actions/setup-node's
releases.
v6.0.0
What's Changed
Breaking Changes
Dependency Upgrades
Full Changelog: https://github.com/actions/setup-node/compare/v5...v6.0.0
v5.0.0
What's Changed
Breaking Changes
This update, introduces automatic caching when a valid
packageManager field is present in your
package.json. This aims to improve workflow performance and
make dependency management more seamless.
To disable this automatic caching, set package-manager-cache:
false
steps:
- uses: actions/checkout@v5
- uses: actions/setup-node@v5
with:
package-manager-cache: false
Make sure your runner is on version v2.327.1 or later to ensure
compatibility with this release. See
Release Notes
Dependency Upgrades
New Contributors
Full Changelog: https://github.com/actions/setup-node/compare/v4...v5.0.0
v4.4.0
... (truncated)
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
You can trigger a rebase of this PR by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
> **Note**
> Automatic rebases have been disabled on this pull request as it has
been open for over 30 days.
---------
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nick Tindle
---
.github/workflows/claude-dependabot.yml | 2 +-
.github/workflows/claude.yml | 2 +-
.github/workflows/copilot-setup-steps.yml | 2 +-
.github/workflows/platform-frontend-ci.yml | 10 +++++-----
.github/workflows/platform-fullstack-ci.yml | 4 ++--
5 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/claude-dependabot.yml b/.github/workflows/claude-dependabot.yml
index c39fdb0e35..6dbe068c3d 100644
--- a/.github/workflows/claude-dependabot.yml
+++ b/.github/workflows/claude-dependabot.yml
@@ -78,7 +78,7 @@ jobs:
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22"
diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml
index 805f3d78bb..8e165b823e 100644
--- a/.github/workflows/claude.yml
+++ b/.github/workflows/claude.yml
@@ -94,7 +94,7 @@ jobs:
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22"
diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml
index f70fe36572..eae6eea5d2 100644
--- a/.github/workflows/copilot-setup-steps.yml
+++ b/.github/workflows/copilot-setup-steps.yml
@@ -76,7 +76,7 @@ jobs:
# Frontend Node.js/pnpm setup (mirrors platform-frontend-ci.yml)
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22"
diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml
index 01e057207d..669a775934 100644
--- a/.github/workflows/platform-frontend-ci.yml
+++ b/.github/workflows/platform-frontend-ci.yml
@@ -42,7 +42,7 @@ jobs:
- 'autogpt_platform/frontend/src/components/**'
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
@@ -74,7 +74,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
@@ -112,7 +112,7 @@ jobs:
fetch-depth: 0
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
@@ -153,7 +153,7 @@ jobs:
submodules: recursive
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
@@ -282,7 +282,7 @@ jobs:
submodules: recursive
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
diff --git a/.github/workflows/platform-fullstack-ci.yml b/.github/workflows/platform-fullstack-ci.yml
index f64d5e33c9..67be0ae939 100644
--- a/.github/workflows/platform-fullstack-ci.yml
+++ b/.github/workflows/platform-fullstack-ci.yml
@@ -32,7 +32,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
@@ -68,7 +68,7 @@ jobs:
submodules: recursive
- name: Set up Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: "22.18.0"
From e8fc8ee6234772484e4c0f88ffa1e4d019e6e488 Mon Sep 17 00:00:00 2001
From: Nicholas Tindle
Date: Mon, 9 Feb 2026 01:19:43 -0600
Subject: [PATCH 13/18] fix(backend): filter graph-only blocks from CoPilot's
find_block results (#11892)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Filters out blocks that are unsuitable for standalone execution from
CoPilot's block search and execution. These blocks serve graph-specific
purposes and will either fail, hang, or confuse users when run outside
of a graph context.
**Important:** This does NOT affect the Builder UI which uses
`load_all_blocks()` directly.
### Changes 🏗️
- **find_block.py**: Added `EXCLUDED_BLOCK_TYPES` and
`EXCLUDED_BLOCK_IDS` constants, skip excluded blocks in search results
- **run_block.py**: Added execution guard that returns clear error
message for excluded blocks
- **content_handlers.py**: Added filtering to
`BlockHandler.get_missing_items()` and `get_stats()` to prevent indexing
excluded blocks
**Excluded by BlockType:**
| BlockType | Reason |
|-----------|--------|
| `INPUT` | Graph interface definition - data enters via chat, not graph
inputs |
| `OUTPUT` | Graph interface definition - data exits via chat, not graph
outputs |
| `WEBHOOK` | Wait for external events - would hang forever in CoPilot |
| `WEBHOOK_MANUAL` | Same as WEBHOOK |
| `NOTE` | Visual annotation only - no runtime behavior |
| `HUMAN_IN_THE_LOOP` | Pauses for human approval - CoPilot IS
human-in-the-loop |
| `AGENT` | AgentExecutorBlock requires graph context - use `run_agent`
tool instead |
**Excluded by ID:**
| Block | Reason |
|-------|--------|
| `SmartDecisionMakerBlock` | Dynamically discovers downstream blocks
via graph topology |
### Checklist 📋
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- [ ] Search for "input" in CoPilot - should NOT return AgentInputBlock
variants
- [ ] Search for "output" in CoPilot - should NOT return
AgentOutputBlock
- [ ] Search for "webhook" in CoPilot - should NOT return trigger blocks
- [ ] Search for "human" in CoPilot - should NOT return
HumanInTheLoopBlock
- [ ] Search for "decision" in CoPilot - should NOT return
SmartDecisionMakerBlock
- [ ] Verify functional blocks still appear (e.g., "email", "http",
"text")
- [ ] Verify Builder UI still shows ALL blocks (no regression)
#### For configuration changes:
- [x] `.env.default` is updated or already compatible with my changes
- [x] `docker-compose.yml` is updated or already compatible with my
changes
- [x] I have included a list of my configuration changes in the PR
description (under **Changes**)
No configuration changes required.
---
Resolves: [SECRT-1831](https://linear.app/autogpt/issue/SECRT-1831)
🤖 Generated with [Claude Code](https://claude.ai/code)
---
> [!NOTE]
> **Low Risk**
> Behavior change is limited to CoPilot’s block discovery/execution
guards and is covered by new tests; main risk is inadvertently excluding
a block that should be runnable.
>
> **Overview**
> CoPilot now **filters out graph-only blocks** from `find_block`
results and prevents them from being executed via `run_block`, returning
a clear error when a user attempts to run an excluded block.
>
> `find_block` introduces explicit exclusion lists (by `BlockType` and a
specific block ID), over-fetches search results to maintain up to 10
usable matches after filtering, and adds debug logging when results are
reduced. New unit tests cover both the search filtering and the
`run_block` execution guard; a minor cleanup removes an unused `pytest`
import in `execution_queue_test.py`.
>
> Written by [Cursor
Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit
bc50755dcff892fecd5a0c46c4bd629742320e3c. This will update automatically
on new commits. Configure
[here](https://cursor.com/dashboard?tab=bugbot).
---------
Co-authored-by: Claude Opus 4.5
Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com>
Co-authored-by: Nicholas Tindle
Co-authored-by: Otto
---
.../api/features/chat/tools/find_block.py | 158 ++++++++++++------
.../features/chat/tools/find_block_test.py | 139 +++++++++++++++
.../api/features/chat/tools/run_block.py | 17 ++
.../api/features/chat/tools/run_block_test.py | 106 ++++++++++++
.../backend/data/execution_queue_test.py | 2 -
5 files changed, 367 insertions(+), 55 deletions(-)
create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py
create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py
index 7ca85961f9..f55cd567e8 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block.py
@@ -13,10 +13,32 @@ from backend.api.features.chat.tools.models import (
NoResultsResponse,
)
from backend.api.features.store.hybrid_search import unified_hybrid_search
-from backend.data.block import get_block
+from backend.data.block import BlockType, get_block
logger = logging.getLogger(__name__)
+_TARGET_RESULTS = 10
+# Over-fetch to compensate for post-hoc filtering of graph-only blocks.
+# 40 is 2x current removed; speed of query 10 vs 40 is minimial
+_OVERFETCH_PAGE_SIZE = 40
+
+# Block types that only work within graphs and cannot run standalone in CoPilot.
+COPILOT_EXCLUDED_BLOCK_TYPES = {
+ BlockType.INPUT, # Graph interface definition - data enters via chat, not graph inputs
+ BlockType.OUTPUT, # Graph interface definition - data exits via chat, not graph outputs
+ BlockType.WEBHOOK, # Wait for external events - would hang forever in CoPilot
+ BlockType.WEBHOOK_MANUAL, # Same as WEBHOOK
+ BlockType.NOTE, # Visual annotation only - no runtime behavior
+ BlockType.HUMAN_IN_THE_LOOP, # Pauses for human approval - CoPilot IS human-in-the-loop
+ BlockType.AGENT, # AgentExecutorBlock requires execution_context - use run_agent tool
+}
+
+# Specific block IDs excluded from CoPilot (STANDARD type but still require graph context)
+COPILOT_EXCLUDED_BLOCK_IDS = {
+ # SmartDecisionMakerBlock - dynamically discovers downstream blocks via graph topology
+ "3b191d9f-356f-482d-8238-ba04b6d18381",
+}
+
class FindBlockTool(BaseTool):
"""Tool for searching available blocks."""
@@ -88,7 +110,7 @@ class FindBlockTool(BaseTool):
query=query,
content_types=[ContentType.BLOCK],
page=1,
- page_size=10,
+ page_size=_OVERFETCH_PAGE_SIZE,
)
if not results:
@@ -108,60 +130,90 @@ class FindBlockTool(BaseTool):
block = get_block(block_id)
# Skip disabled blocks
- if block and not block.disabled:
- # Get input/output schemas
- input_schema = {}
- output_schema = {}
- try:
- input_schema = block.input_schema.jsonschema()
- except Exception:
- pass
- try:
- output_schema = block.output_schema.jsonschema()
- except Exception:
- pass
+ if not block or block.disabled:
+ continue
- # Get categories from block instance
- categories = []
- if hasattr(block, "categories") and block.categories:
- categories = [cat.value for cat in block.categories]
+ # Skip blocks excluded from CoPilot (graph-only blocks)
+ if (
+ block.block_type in COPILOT_EXCLUDED_BLOCK_TYPES
+ or block.id in COPILOT_EXCLUDED_BLOCK_IDS
+ ):
+ continue
- # Extract required inputs for easier use
- required_inputs: list[BlockInputFieldInfo] = []
- if input_schema:
- properties = input_schema.get("properties", {})
- required_fields = set(input_schema.get("required", []))
- # Get credential field names to exclude from required inputs
- credentials_fields = set(
- block.input_schema.get_credentials_fields().keys()
- )
-
- for field_name, field_schema in properties.items():
- # Skip credential fields - they're handled separately
- if field_name in credentials_fields:
- continue
-
- required_inputs.append(
- BlockInputFieldInfo(
- name=field_name,
- type=field_schema.get("type", "string"),
- description=field_schema.get("description", ""),
- required=field_name in required_fields,
- default=field_schema.get("default"),
- )
- )
-
- blocks.append(
- BlockInfoSummary(
- id=block_id,
- name=block.name,
- description=block.description or "",
- categories=categories,
- input_schema=input_schema,
- output_schema=output_schema,
- required_inputs=required_inputs,
- )
+ # Get input/output schemas
+ input_schema = {}
+ output_schema = {}
+ try:
+ input_schema = block.input_schema.jsonschema()
+ except Exception as e:
+ logger.debug(
+ "Failed to generate input schema for block %s: %s",
+ block_id,
+ e,
)
+ try:
+ output_schema = block.output_schema.jsonschema()
+ except Exception as e:
+ logger.debug(
+ "Failed to generate output schema for block %s: %s",
+ block_id,
+ e,
+ )
+
+ # Get categories from block instance
+ categories = []
+ if hasattr(block, "categories") and block.categories:
+ categories = [cat.value for cat in block.categories]
+
+ # Extract required inputs for easier use
+ required_inputs: list[BlockInputFieldInfo] = []
+ if input_schema:
+ properties = input_schema.get("properties", {})
+ required_fields = set(input_schema.get("required", []))
+ # Get credential field names to exclude from required inputs
+ credentials_fields = set(
+ block.input_schema.get_credentials_fields().keys()
+ )
+
+ for field_name, field_schema in properties.items():
+ # Skip credential fields - they're handled separately
+ if field_name in credentials_fields:
+ continue
+
+ required_inputs.append(
+ BlockInputFieldInfo(
+ name=field_name,
+ type=field_schema.get("type", "string"),
+ description=field_schema.get("description", ""),
+ required=field_name in required_fields,
+ default=field_schema.get("default"),
+ )
+ )
+
+ blocks.append(
+ BlockInfoSummary(
+ id=block_id,
+ name=block.name,
+ description=block.description or "",
+ categories=categories,
+ input_schema=input_schema,
+ output_schema=output_schema,
+ required_inputs=required_inputs,
+ )
+ )
+
+ if len(blocks) >= _TARGET_RESULTS:
+ break
+
+ if blocks and len(blocks) < _TARGET_RESULTS:
+ logger.debug(
+ "find_block returned %d/%d results for query '%s' "
+ "(filtered %d excluded/disabled blocks)",
+ len(blocks),
+ _TARGET_RESULTS,
+ query,
+ len(results) - len(blocks),
+ )
if not blocks:
return NoResultsResponse(
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py
new file mode 100644
index 0000000000..0f3d4cbfa5
--- /dev/null
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/find_block_test.py
@@ -0,0 +1,139 @@
+"""Tests for block filtering in FindBlockTool."""
+
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from backend.api.features.chat.tools.find_block import (
+ COPILOT_EXCLUDED_BLOCK_IDS,
+ COPILOT_EXCLUDED_BLOCK_TYPES,
+ FindBlockTool,
+)
+from backend.api.features.chat.tools.models import BlockListResponse
+from backend.data.block import BlockType
+
+from ._test_data import make_session
+
+_TEST_USER_ID = "test-user-find-block"
+
+
+def make_mock_block(
+ block_id: str, name: str, block_type: BlockType, disabled: bool = False
+):
+ """Create a mock block for testing."""
+ mock = MagicMock()
+ mock.id = block_id
+ mock.name = name
+ mock.description = f"{name} description"
+ mock.block_type = block_type
+ mock.disabled = disabled
+ mock.input_schema = MagicMock()
+ mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
+ mock.input_schema.get_credentials_fields.return_value = {}
+ mock.output_schema = MagicMock()
+ mock.output_schema.jsonschema.return_value = {}
+ mock.categories = []
+ return mock
+
+
+class TestFindBlockFiltering:
+ """Tests for block filtering in FindBlockTool."""
+
+ def test_excluded_block_types_contains_expected_types(self):
+ """Verify COPILOT_EXCLUDED_BLOCK_TYPES contains all graph-only types."""
+ assert BlockType.INPUT in COPILOT_EXCLUDED_BLOCK_TYPES
+ assert BlockType.OUTPUT in COPILOT_EXCLUDED_BLOCK_TYPES
+ assert BlockType.WEBHOOK in COPILOT_EXCLUDED_BLOCK_TYPES
+ assert BlockType.WEBHOOK_MANUAL in COPILOT_EXCLUDED_BLOCK_TYPES
+ assert BlockType.NOTE in COPILOT_EXCLUDED_BLOCK_TYPES
+ assert BlockType.HUMAN_IN_THE_LOOP in COPILOT_EXCLUDED_BLOCK_TYPES
+ assert BlockType.AGENT in COPILOT_EXCLUDED_BLOCK_TYPES
+
+ def test_excluded_block_ids_contains_smart_decision_maker(self):
+ """Verify SmartDecisionMakerBlock is in COPILOT_EXCLUDED_BLOCK_IDS."""
+ assert "3b191d9f-356f-482d-8238-ba04b6d18381" in COPILOT_EXCLUDED_BLOCK_IDS
+
+ @pytest.mark.asyncio(loop_scope="session")
+ async def test_excluded_block_type_filtered_from_results(self):
+ """Verify blocks with excluded BlockTypes are filtered from search results."""
+ session = make_session(user_id=_TEST_USER_ID)
+
+ # Mock search returns an INPUT block (excluded) and a STANDARD block (included)
+ search_results = [
+ {"content_id": "input-block-id", "score": 0.9},
+ {"content_id": "standard-block-id", "score": 0.8},
+ ]
+
+ input_block = make_mock_block("input-block-id", "Input Block", BlockType.INPUT)
+ standard_block = make_mock_block(
+ "standard-block-id", "HTTP Request", BlockType.STANDARD
+ )
+
+ def mock_get_block(block_id):
+ return {
+ "input-block-id": input_block,
+ "standard-block-id": standard_block,
+ }.get(block_id)
+
+ with patch(
+ "backend.api.features.chat.tools.find_block.unified_hybrid_search",
+ new_callable=AsyncMock,
+ return_value=(search_results, 2),
+ ):
+ with patch(
+ "backend.api.features.chat.tools.find_block.get_block",
+ side_effect=mock_get_block,
+ ):
+ tool = FindBlockTool()
+ response = await tool._execute(
+ user_id=_TEST_USER_ID, session=session, query="test"
+ )
+
+ # Should only return the standard block, not the INPUT block
+ assert isinstance(response, BlockListResponse)
+ assert len(response.blocks) == 1
+ assert response.blocks[0].id == "standard-block-id"
+
+ @pytest.mark.asyncio(loop_scope="session")
+ async def test_excluded_block_id_filtered_from_results(self):
+ """Verify SmartDecisionMakerBlock is filtered from search results."""
+ session = make_session(user_id=_TEST_USER_ID)
+
+ smart_decision_id = "3b191d9f-356f-482d-8238-ba04b6d18381"
+ search_results = [
+ {"content_id": smart_decision_id, "score": 0.9},
+ {"content_id": "normal-block-id", "score": 0.8},
+ ]
+
+ # SmartDecisionMakerBlock has STANDARD type but is excluded by ID
+ smart_block = make_mock_block(
+ smart_decision_id, "Smart Decision Maker", BlockType.STANDARD
+ )
+ normal_block = make_mock_block(
+ "normal-block-id", "Normal Block", BlockType.STANDARD
+ )
+
+ def mock_get_block(block_id):
+ return {
+ smart_decision_id: smart_block,
+ "normal-block-id": normal_block,
+ }.get(block_id)
+
+ with patch(
+ "backend.api.features.chat.tools.find_block.unified_hybrid_search",
+ new_callable=AsyncMock,
+ return_value=(search_results, 2),
+ ):
+ with patch(
+ "backend.api.features.chat.tools.find_block.get_block",
+ side_effect=mock_get_block,
+ ):
+ tool = FindBlockTool()
+ response = await tool._execute(
+ user_id=_TEST_USER_ID, session=session, query="decision"
+ )
+
+ # Should only return normal block, not SmartDecisionMakerBlock
+ assert isinstance(response, BlockListResponse)
+ assert len(response.blocks) == 1
+ assert response.blocks[0].id == "normal-block-id"
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py
index 51bb2c0575..590f81ff23 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py
@@ -8,6 +8,10 @@ from typing import Any
from pydantic_core import PydanticUndefined
from backend.api.features.chat.model import ChatSession
+from backend.api.features.chat.tools.find_block import (
+ COPILOT_EXCLUDED_BLOCK_IDS,
+ COPILOT_EXCLUDED_BLOCK_TYPES,
+)
from backend.data.block import get_block
from backend.data.execution import ExecutionContext
from backend.data.model import CredentialsMetaInput
@@ -212,6 +216,19 @@ class RunBlockTool(BaseTool):
session_id=session_id,
)
+ # Check if block is excluded from CoPilot (graph-only blocks)
+ if (
+ block.block_type in COPILOT_EXCLUDED_BLOCK_TYPES
+ or block.id in COPILOT_EXCLUDED_BLOCK_IDS
+ ):
+ return ErrorResponse(
+ message=(
+ f"Block '{block.name}' cannot be run directly in CoPilot. "
+ "This block is designed for use within graphs only."
+ ),
+ session_id=session_id,
+ )
+
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
creds_manager = IntegrationCredentialsManager()
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py
new file mode 100644
index 0000000000..2aae45e875
--- /dev/null
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block_test.py
@@ -0,0 +1,106 @@
+"""Tests for block execution guards in RunBlockTool."""
+
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from backend.api.features.chat.tools.models import ErrorResponse
+from backend.api.features.chat.tools.run_block import RunBlockTool
+from backend.data.block import BlockType
+
+from ._test_data import make_session
+
+_TEST_USER_ID = "test-user-run-block"
+
+
+def make_mock_block(
+ block_id: str, name: str, block_type: BlockType, disabled: bool = False
+):
+ """Create a mock block for testing."""
+ mock = MagicMock()
+ mock.id = block_id
+ mock.name = name
+ mock.block_type = block_type
+ mock.disabled = disabled
+ mock.input_schema = MagicMock()
+ mock.input_schema.jsonschema.return_value = {"properties": {}, "required": []}
+ mock.input_schema.get_credentials_fields_info.return_value = []
+ return mock
+
+
+class TestRunBlockFiltering:
+ """Tests for block execution guards in RunBlockTool."""
+
+ @pytest.mark.asyncio(loop_scope="session")
+ async def test_excluded_block_type_returns_error(self):
+ """Attempting to execute a block with excluded BlockType returns error."""
+ session = make_session(user_id=_TEST_USER_ID)
+
+ input_block = make_mock_block("input-block-id", "Input Block", BlockType.INPUT)
+
+ with patch(
+ "backend.api.features.chat.tools.run_block.get_block",
+ return_value=input_block,
+ ):
+ tool = RunBlockTool()
+ response = await tool._execute(
+ user_id=_TEST_USER_ID,
+ session=session,
+ block_id="input-block-id",
+ input_data={},
+ )
+
+ assert isinstance(response, ErrorResponse)
+ assert "cannot be run directly in CoPilot" in response.message
+ assert "designed for use within graphs only" in response.message
+
+ @pytest.mark.asyncio(loop_scope="session")
+ async def test_excluded_block_id_returns_error(self):
+ """Attempting to execute SmartDecisionMakerBlock returns error."""
+ session = make_session(user_id=_TEST_USER_ID)
+
+ smart_decision_id = "3b191d9f-356f-482d-8238-ba04b6d18381"
+ smart_block = make_mock_block(
+ smart_decision_id, "Smart Decision Maker", BlockType.STANDARD
+ )
+
+ with patch(
+ "backend.api.features.chat.tools.run_block.get_block",
+ return_value=smart_block,
+ ):
+ tool = RunBlockTool()
+ response = await tool._execute(
+ user_id=_TEST_USER_ID,
+ session=session,
+ block_id=smart_decision_id,
+ input_data={},
+ )
+
+ assert isinstance(response, ErrorResponse)
+ assert "cannot be run directly in CoPilot" in response.message
+
+ @pytest.mark.asyncio(loop_scope="session")
+ async def test_non_excluded_block_passes_guard(self):
+ """Non-excluded blocks pass the filtering guard (may fail later for other reasons)."""
+ session = make_session(user_id=_TEST_USER_ID)
+
+ standard_block = make_mock_block(
+ "standard-id", "HTTP Request", BlockType.STANDARD
+ )
+
+ with patch(
+ "backend.api.features.chat.tools.run_block.get_block",
+ return_value=standard_block,
+ ):
+ tool = RunBlockTool()
+ response = await tool._execute(
+ user_id=_TEST_USER_ID,
+ session=session,
+ block_id="standard-id",
+ input_data={},
+ )
+
+ # Should NOT be an ErrorResponse about CoPilot exclusion
+ # (may be other errors like missing credentials, but not the exclusion guard)
+ if isinstance(response, ErrorResponse):
+ assert "cannot be run directly in CoPilot" not in response.message
diff --git a/autogpt_platform/backend/backend/data/execution_queue_test.py b/autogpt_platform/backend/backend/data/execution_queue_test.py
index ffe0fb265b..7a76adfe05 100644
--- a/autogpt_platform/backend/backend/data/execution_queue_test.py
+++ b/autogpt_platform/backend/backend/data/execution_queue_test.py
@@ -3,8 +3,6 @@
import queue
import threading
-import pytest
-
from backend.data.execution import ExecutionQueue
From caf9ff34e653406dd4fb90cac6f26cc90b38f358 Mon Sep 17 00:00:00 2001
From: Bently
Date: Mon, 9 Feb 2026 10:24:08 +0000
Subject: [PATCH 14/18] fix(backend): Handle stale RabbitMQ channels on
connection drop (#11929)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
### Changes 🏗️
Fixes
[**AUTOGPT-SERVER-1TN**](https://autoagpt.sentry.io/issues/?query=AUTOGPT-SERVER-1TN)
(~39K events since Feb 2025) and related connection issues
**6JC/6JD/6JE/6JF** (~6K combined).
#### Problem
When the RabbitMQ TCP connection drops (network blip, server restart,
etc.):
1. `connect_robust` (aio_pika) automatically reconnects the underlying
AMQP connection
2. But `AsyncRabbitMQ._channel` still references the **old dead
channel**
3. `is_ready` checks `not self._channel.is_closed` — but the channel
object doesn't know the transport is gone
4. `publish_message` tries to use the stale channel →
`ChannelInvalidStateError: No active transport in channel`
5. `@func_retry` retries 5 times, but each retry hits the same stale
channel (it passes `is_ready`)
This means every connection drop generates errors until the process is
restarted.
#### Fix
**New `_ensure_channel()` helper** that resets stale channels before
reconnecting, so `connect()` creates a fresh one instead of
short-circuiting on `is_connected`.
**Explicit `ChannelInvalidStateError` handling in `publish_message`:**
1. First attempt uses `_ensure_channel()` (handles normal staleness)
2. If publish throws `ChannelInvalidStateError`, does a full reconnect
(resets both `_channel` and `_connection`) and retries once
3. `@func_retry` provides additional retry resilience on top
**Simplified `get_channel()`** to use the same resilient helper.
**1 file changed, 62 insertions, 24 deletions.**
#### Impact
- Eliminates ~39K `ChannelInvalidStateError` Sentry events
- RabbitMQ operations self-heal after connection drops without process
restart
- Related transport EOF errors (6JC/6JD/6JE/6JF) should also reduce
---
.../backend/backend/data/rabbitmq.py | 81 +++++++++++++++----
1 file changed, 66 insertions(+), 15 deletions(-)
diff --git a/autogpt_platform/backend/backend/data/rabbitmq.py b/autogpt_platform/backend/backend/data/rabbitmq.py
index bdf2090083..524e21748a 100644
--- a/autogpt_platform/backend/backend/data/rabbitmq.py
+++ b/autogpt_platform/backend/backend/data/rabbitmq.py
@@ -1,3 +1,4 @@
+import asyncio
import logging
from abc import ABC, abstractmethod
from enum import Enum
@@ -225,6 +226,10 @@ class SyncRabbitMQ(RabbitMQBase):
class AsyncRabbitMQ(RabbitMQBase):
"""Asynchronous RabbitMQ client"""
+ def __init__(self, config: RabbitMQConfig):
+ super().__init__(config)
+ self._reconnect_lock: asyncio.Lock | None = None
+
@property
def is_connected(self) -> bool:
return bool(self._connection and not self._connection.is_closed)
@@ -235,7 +240,17 @@ class AsyncRabbitMQ(RabbitMQBase):
@conn_retry("AsyncRabbitMQ", "Acquiring async connection")
async def connect(self):
- if self.is_connected:
+ if self.is_connected and self._channel and not self._channel.is_closed:
+ return
+
+ if (
+ self.is_connected
+ and self._connection
+ and (self._channel is None or self._channel.is_closed)
+ ):
+ self._channel = await self._connection.channel()
+ await self._channel.set_qos(prefetch_count=1)
+ await self.declare_infrastructure()
return
self._connection = await aio_pika.connect_robust(
@@ -291,24 +306,46 @@ class AsyncRabbitMQ(RabbitMQBase):
exchange, routing_key=queue.routing_key or queue.name
)
- @func_retry
- async def publish_message(
+ @property
+ def _lock(self) -> asyncio.Lock:
+ if self._reconnect_lock is None:
+ self._reconnect_lock = asyncio.Lock()
+ return self._reconnect_lock
+
+ async def _ensure_channel(self) -> aio_pika.abc.AbstractChannel:
+ """Get a valid channel, reconnecting if the current one is stale.
+
+ Uses a lock to prevent concurrent reconnection attempts from racing.
+ """
+ if self.is_ready:
+ return self._channel # type: ignore # is_ready guarantees non-None
+
+ async with self._lock:
+ # Double-check after acquiring lock
+ if self.is_ready:
+ return self._channel # type: ignore
+
+ self._channel = None
+ await self.connect()
+
+ if self._channel is None:
+ raise RuntimeError("Channel should be established after connect")
+
+ return self._channel
+
+ async def _publish_once(
self,
routing_key: str,
message: str,
exchange: Optional[Exchange] = None,
persistent: bool = True,
) -> None:
- if not self.is_ready:
- await self.connect()
-
- if self._channel is None:
- raise RuntimeError("Channel should be established after connect")
+ channel = await self._ensure_channel()
if exchange:
- exchange_obj = await self._channel.get_exchange(exchange.name)
+ exchange_obj = await channel.get_exchange(exchange.name)
else:
- exchange_obj = self._channel.default_exchange
+ exchange_obj = channel.default_exchange
await exchange_obj.publish(
aio_pika.Message(
@@ -322,9 +359,23 @@ class AsyncRabbitMQ(RabbitMQBase):
routing_key=routing_key,
)
+ @func_retry
+ async def publish_message(
+ self,
+ routing_key: str,
+ message: str,
+ exchange: Optional[Exchange] = None,
+ persistent: bool = True,
+ ) -> None:
+ try:
+ await self._publish_once(routing_key, message, exchange, persistent)
+ except aio_pika.exceptions.ChannelInvalidStateError:
+ logger.warning(
+ "RabbitMQ channel invalid, forcing reconnect and retrying publish"
+ )
+ async with self._lock:
+ self._channel = None
+ await self._publish_once(routing_key, message, exchange, persistent)
+
async def get_channel(self) -> aio_pika.abc.AbstractChannel:
- if not self.is_ready:
- await self.connect()
- if self._channel is None:
- raise RuntimeError("Channel should be established after connect")
- return self._channel
+ return await self._ensure_channel()
From 1f4105e8f9554d7bff016ce8216041bf4c378bf3 Mon Sep 17 00:00:00 2001
From: Bently
Date: Mon, 9 Feb 2026 10:25:08 +0000
Subject: [PATCH 15/18] fix(frontend): Handle object values in FileInput
component (#11948)
Fixes
[#11800](https://github.com/Significant-Gravitas/AutoGPT/issues/11800)
## Problem
The FileInput component crashed with `TypeError: e.startsWith is not a
function` when the value was an object (from external API) instead of a
string.
## Example Input Object
When using the external API
(`/external-api/v1/graphs/{id}/execute/{version}`), file inputs can be
passed as objects:
```json
{
"node_input": {
"input_image": {
"name": "image.jpeg",
"type": "image/jpeg",
"size": 131147,
"data": "/9j/4QAW..."
}
}
}
```
## Changes
- Updated `getFileLabelFromValue()` to handle object format: `{ name,
type, size, data }`
- Added type guards for string vs object values
- Graceful fallback for edge cases (null, undefined, empty object)
## Test cases verified
- Object with name: returns filename
- Object with type only: extracts and formats MIME type
- String data URI: parses correctly
- String file path: extracts extension
- Edge cases: returns "File" fallback
---
.../components/atoms/FileInput/FileInput.tsx | 26 ++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
diff --git a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx
index d43063b411..2677a7483b 100644
--- a/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx
+++ b/autogpt_platform/frontend/src/components/atoms/FileInput/FileInput.tsx
@@ -104,7 +104,31 @@ export function FileInput(props: Props) {
return false;
}
- const getFileLabelFromValue = (val: string) => {
+ const getFileLabelFromValue = (val: unknown): string => {
+ // Handle object format from external API: { name, type, size, data }
+ if (val && typeof val === "object") {
+ const obj = val as Record;
+ if (typeof obj.name === "string") {
+ return getFileLabel(
+ obj.name,
+ typeof obj.type === "string" ? obj.type : "",
+ );
+ }
+ if (typeof obj.type === "string") {
+ const mimeParts = obj.type.split("/");
+ if (mimeParts.length > 1) {
+ return `${mimeParts[1].toUpperCase()} file`;
+ }
+ return `${obj.type} file`;
+ }
+ return "File";
+ }
+
+ // Handle string values (data URIs or file paths)
+ if (typeof val !== "string") {
+ return "File";
+ }
+
if (val.startsWith("data:")) {
const matches = val.match(/^data:([^;]+);/);
if (matches?.[1]) {
From 5a30d114164d3e680e56e2ec5d7c1f6d772b5586 Mon Sep 17 00:00:00 2001
From: Otto
Date: Mon, 9 Feb 2026 13:43:55 +0000
Subject: [PATCH 16/18] refactor(copilot): Code cleanup and deduplication
(#11950)
## Summary
Code cleanup of the AI Copilot codebase - rebased onto latest dev.
## Changes
### New Files
- `backend/util/validation.py` - UUID validation helpers
- `backend/api/features/chat/tools/helpers.py` - Shared tool utilities
### Credential Matching Consolidation
- Added shared utilities to `utils.py`
- Refactored `run_block._check_block_credentials()` with discriminator
support
- Extracted `_resolve_discriminated_credentials()` for multi-provider
handling
### Routes Cleanup
- Extracted `_create_stream_generator()` and `SSE_RESPONSE_HEADERS`
### Tool Files Cleanup
- Updated `run_agent.py` and `run_block.py` to use shared helpers
**WIP** - This PR will be updated incrementally.
---
.../api/features/chat/tools/helpers.py | 29 +++
.../api/features/chat/tools/run_agent.py | 21 +-
.../api/features/chat/tools/run_block.py | 185 +++++++-----------
.../backend/api/features/chat/tools/utils.py | 96 ++++++++-
4 files changed, 201 insertions(+), 130 deletions(-)
create mode 100644 autogpt_platform/backend/backend/api/features/chat/tools/helpers.py
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/helpers.py b/autogpt_platform/backend/backend/api/features/chat/tools/helpers.py
new file mode 100644
index 0000000000..cf53605ac0
--- /dev/null
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/helpers.py
@@ -0,0 +1,29 @@
+"""Shared helpers for chat tools."""
+
+from typing import Any
+
+
+def get_inputs_from_schema(
+ input_schema: dict[str, Any],
+ exclude_fields: set[str] | None = None,
+) -> list[dict[str, Any]]:
+ """Extract input field info from JSON schema."""
+ if not isinstance(input_schema, dict):
+ return []
+
+ exclude = exclude_fields or set()
+ properties = input_schema.get("properties", {})
+ required = set(input_schema.get("required", []))
+
+ return [
+ {
+ "name": name,
+ "title": schema.get("title", name),
+ "type": schema.get("type", "string"),
+ "description": schema.get("description", ""),
+ "required": name in required,
+ "default": schema.get("default"),
+ }
+ for name, schema in properties.items()
+ if name not in exclude
+ ]
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py
index 73d4cf81f2..a9f19bcf62 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_agent.py
@@ -24,6 +24,7 @@ from backend.util.timezone_utils import (
)
from .base import BaseTool
+from .helpers import get_inputs_from_schema
from .models import (
AgentDetails,
AgentDetailsResponse,
@@ -261,7 +262,7 @@ class RunAgentTool(BaseTool):
),
requirements={
"credentials": requirements_creds_list,
- "inputs": self._get_inputs_list(graph.input_schema),
+ "inputs": get_inputs_from_schema(graph.input_schema),
"execution_modes": self._get_execution_modes(graph),
},
),
@@ -369,22 +370,6 @@ class RunAgentTool(BaseTool):
session_id=session_id,
)
- def _get_inputs_list(self, input_schema: dict[str, Any]) -> list[dict[str, Any]]:
- """Extract inputs list from schema."""
- inputs_list = []
- if isinstance(input_schema, dict) and "properties" in input_schema:
- for field_name, field_schema in input_schema["properties"].items():
- inputs_list.append(
- {
- "name": field_name,
- "title": field_schema.get("title", field_name),
- "type": field_schema.get("type", "string"),
- "description": field_schema.get("description", ""),
- "required": field_name in input_schema.get("required", []),
- }
- )
- return inputs_list
-
def _get_execution_modes(self, graph: GraphModel) -> list[str]:
"""Get available execution modes for the graph."""
trigger_info = graph.trigger_setup_info
@@ -398,7 +383,7 @@ class RunAgentTool(BaseTool):
suffix: str,
) -> str:
"""Build a message describing available inputs for an agent."""
- inputs_list = self._get_inputs_list(graph.input_schema)
+ inputs_list = get_inputs_from_schema(graph.input_schema)
required_names = [i["name"] for i in inputs_list if i["required"]]
optional_names = [i["name"] for i in inputs_list if not i["required"]]
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py
index 590f81ff23..fc4a470fdd 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/run_block.py
@@ -12,14 +12,15 @@ from backend.api.features.chat.tools.find_block import (
COPILOT_EXCLUDED_BLOCK_IDS,
COPILOT_EXCLUDED_BLOCK_TYPES,
)
-from backend.data.block import get_block
+from backend.data.block import AnyBlockSchema, get_block
from backend.data.execution import ExecutionContext
-from backend.data.model import CredentialsMetaInput
+from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.data.workspace import get_or_create_workspace
from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.util.exceptions import BlockError
from .base import BaseTool
+from .helpers import get_inputs_from_schema
from .models import (
BlockOutputResponse,
ErrorResponse,
@@ -28,7 +29,10 @@ from .models import (
ToolResponseBase,
UserReadiness,
)
-from .utils import build_missing_credentials_from_field_info
+from .utils import (
+ build_missing_credentials_from_field_info,
+ match_credentials_to_requirements,
+)
logger = logging.getLogger(__name__)
@@ -77,91 +81,6 @@ class RunBlockTool(BaseTool):
def requires_auth(self) -> bool:
return True
- async def _check_block_credentials(
- self,
- user_id: str,
- block: Any,
- input_data: dict[str, Any] | None = None,
- ) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
- """
- Check if user has required credentials for a block.
-
- Args:
- user_id: User ID
- block: Block to check credentials for
- input_data: Input data for the block (used to determine provider via discriminator)
-
- Returns:
- tuple[matched_credentials, missing_credentials]
- """
- matched_credentials: dict[str, CredentialsMetaInput] = {}
- missing_credentials: list[CredentialsMetaInput] = []
- input_data = input_data or {}
-
- # Get credential field info from block's input schema
- credentials_fields_info = block.input_schema.get_credentials_fields_info()
-
- if not credentials_fields_info:
- return matched_credentials, missing_credentials
-
- # Get user's available credentials
- creds_manager = IntegrationCredentialsManager()
- available_creds = await creds_manager.store.get_all_creds(user_id)
-
- for field_name, field_info in credentials_fields_info.items():
- effective_field_info = field_info
- if field_info.discriminator and field_info.discriminator_mapping:
- # Get discriminator from input, falling back to schema default
- discriminator_value = input_data.get(field_info.discriminator)
- if discriminator_value is None:
- field = block.input_schema.model_fields.get(
- field_info.discriminator
- )
- if field and field.default is not PydanticUndefined:
- discriminator_value = field.default
-
- if (
- discriminator_value
- and discriminator_value in field_info.discriminator_mapping
- ):
- effective_field_info = field_info.discriminate(discriminator_value)
- logger.debug(
- f"Discriminated provider for {field_name}: "
- f"{discriminator_value} -> {effective_field_info.provider}"
- )
-
- matching_cred = next(
- (
- cred
- for cred in available_creds
- if cred.provider in effective_field_info.provider
- and cred.type in effective_field_info.supported_types
- ),
- None,
- )
-
- if matching_cred:
- matched_credentials[field_name] = CredentialsMetaInput(
- id=matching_cred.id,
- provider=matching_cred.provider, # type: ignore
- type=matching_cred.type,
- title=matching_cred.title,
- )
- else:
- # Create a placeholder for the missing credential
- provider = next(iter(effective_field_info.provider), "unknown")
- cred_type = next(iter(effective_field_info.supported_types), "api_key")
- missing_credentials.append(
- CredentialsMetaInput(
- id=field_name,
- provider=provider, # type: ignore
- type=cred_type, # type: ignore
- title=field_name.replace("_", " ").title(),
- )
- )
-
- return matched_credentials, missing_credentials
-
async def _execute(
self,
user_id: str | None,
@@ -232,8 +151,8 @@ class RunBlockTool(BaseTool):
logger.info(f"Executing block {block.name} ({block_id}) for user {user_id}")
creds_manager = IntegrationCredentialsManager()
- matched_credentials, missing_credentials = await self._check_block_credentials(
- user_id, block, input_data
+ matched_credentials, missing_credentials = (
+ await self._resolve_block_credentials(user_id, block, input_data)
)
if missing_credentials:
@@ -362,29 +281,75 @@ class RunBlockTool(BaseTool):
session_id=session_id,
)
- def _get_inputs_list(self, block: Any) -> list[dict[str, Any]]:
+ async def _resolve_block_credentials(
+ self,
+ user_id: str,
+ block: AnyBlockSchema,
+ input_data: dict[str, Any] | None = None,
+ ) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
+ """
+ Resolve credentials for a block by matching user's available credentials.
+
+ Args:
+ user_id: User ID
+ block: Block to resolve credentials for
+ input_data: Input data for the block (used to determine provider via discriminator)
+
+ Returns:
+ tuple of (matched_credentials, missing_credentials) - matched credentials
+ are used for block execution, missing ones indicate setup requirements.
+ """
+ input_data = input_data or {}
+ requirements = self._resolve_discriminated_credentials(block, input_data)
+
+ if not requirements:
+ return {}, []
+
+ return await match_credentials_to_requirements(user_id, requirements)
+
+ def _get_inputs_list(self, block: AnyBlockSchema) -> list[dict[str, Any]]:
"""Extract non-credential inputs from block schema."""
- inputs_list = []
schema = block.input_schema.jsonschema()
- properties = schema.get("properties", {})
- required_fields = set(schema.get("required", []))
-
- # Get credential field names to exclude
credentials_fields = set(block.input_schema.get_credentials_fields().keys())
+ return get_inputs_from_schema(schema, exclude_fields=credentials_fields)
- for field_name, field_schema in properties.items():
- # Skip credential fields
- if field_name in credentials_fields:
- continue
+ def _resolve_discriminated_credentials(
+ self,
+ block: AnyBlockSchema,
+ input_data: dict[str, Any],
+ ) -> dict[str, CredentialsFieldInfo]:
+ """Resolve credential requirements, applying discriminator logic where needed."""
+ credentials_fields_info = block.input_schema.get_credentials_fields_info()
+ if not credentials_fields_info:
+ return {}
- inputs_list.append(
- {
- "name": field_name,
- "title": field_schema.get("title", field_name),
- "type": field_schema.get("type", "string"),
- "description": field_schema.get("description", ""),
- "required": field_name in required_fields,
- }
- )
+ resolved: dict[str, CredentialsFieldInfo] = {}
- return inputs_list
+ for field_name, field_info in credentials_fields_info.items():
+ effective_field_info = field_info
+
+ if field_info.discriminator and field_info.discriminator_mapping:
+ discriminator_value = input_data.get(field_info.discriminator)
+ if discriminator_value is None:
+ field = block.input_schema.model_fields.get(
+ field_info.discriminator
+ )
+ if field and field.default is not PydanticUndefined:
+ discriminator_value = field.default
+
+ if (
+ discriminator_value
+ and discriminator_value in field_info.discriminator_mapping
+ ):
+ effective_field_info = field_info.discriminate(discriminator_value)
+ # For host-scoped credentials, add the discriminator value
+ # (e.g., URL) so _credential_is_for_host can match it
+ effective_field_info.discriminator_values.add(discriminator_value)
+ logger.debug(
+ f"Discriminated provider for {field_name}: "
+ f"{discriminator_value} -> {effective_field_info.provider}"
+ )
+
+ resolved[field_name] = effective_field_info
+
+ return resolved
diff --git a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py
index cda0914809..80a842bf36 100644
--- a/autogpt_platform/backend/backend/api/features/chat/tools/utils.py
+++ b/autogpt_platform/backend/backend/api/features/chat/tools/utils.py
@@ -8,6 +8,7 @@ from backend.api.features.library import model as library_model
from backend.api.features.store import db as store_db
from backend.data.graph import GraphModel
from backend.data.model import (
+ Credentials,
CredentialsFieldInfo,
CredentialsMetaInput,
HostScopedCredentials,
@@ -223,6 +224,99 @@ async def get_or_create_library_agent(
return library_agents[0]
+async def match_credentials_to_requirements(
+ user_id: str,
+ requirements: dict[str, CredentialsFieldInfo],
+) -> tuple[dict[str, CredentialsMetaInput], list[CredentialsMetaInput]]:
+ """
+ Match user's credentials against a dictionary of credential requirements.
+
+ This is the core matching logic shared by both graph and block credential matching.
+ """
+ matched: dict[str, CredentialsMetaInput] = {}
+ missing: list[CredentialsMetaInput] = []
+
+ if not requirements:
+ return matched, missing
+
+ available_creds = await get_user_credentials(user_id)
+
+ for field_name, field_info in requirements.items():
+ matching_cred = find_matching_credential(available_creds, field_info)
+
+ if matching_cred:
+ try:
+ matched[field_name] = create_credential_meta_from_match(matching_cred)
+ except Exception as e:
+ logger.error(
+ f"Failed to create CredentialsMetaInput for field '{field_name}': "
+ f"provider={matching_cred.provider}, type={matching_cred.type}, "
+ f"credential_id={matching_cred.id}",
+ exc_info=True,
+ )
+ provider = next(iter(field_info.provider), "unknown")
+ cred_type = next(iter(field_info.supported_types), "api_key")
+ missing.append(
+ CredentialsMetaInput(
+ id=field_name,
+ provider=provider, # type: ignore
+ type=cred_type, # type: ignore
+ title=f"{field_name} (validation failed: {e})",
+ )
+ )
+ else:
+ provider = next(iter(field_info.provider), "unknown")
+ cred_type = next(iter(field_info.supported_types), "api_key")
+ missing.append(
+ CredentialsMetaInput(
+ id=field_name,
+ provider=provider, # type: ignore
+ type=cred_type, # type: ignore
+ title=field_name.replace("_", " ").title(),
+ )
+ )
+
+ return matched, missing
+
+
+async def get_user_credentials(user_id: str) -> list[Credentials]:
+ """Get all available credentials for a user."""
+ creds_manager = IntegrationCredentialsManager()
+ return await creds_manager.store.get_all_creds(user_id)
+
+
+def find_matching_credential(
+ available_creds: list[Credentials],
+ field_info: CredentialsFieldInfo,
+) -> Credentials | None:
+ """Find a credential that matches the required provider, type, scopes, and host."""
+ for cred in available_creds:
+ if cred.provider not in field_info.provider:
+ continue
+ if cred.type not in field_info.supported_types:
+ continue
+ if cred.type == "oauth2" and not _credential_has_required_scopes(
+ cred, field_info
+ ):
+ continue
+ if cred.type == "host_scoped" and not _credential_is_for_host(cred, field_info):
+ continue
+ return cred
+ return None
+
+
+def create_credential_meta_from_match(
+ matching_cred: Credentials,
+) -> CredentialsMetaInput:
+ """Create a CredentialsMetaInput from a matched credential."""
+ return CredentialsMetaInput(
+ id=matching_cred.id,
+ provider=matching_cred.provider, # type: ignore
+ type=matching_cred.type,
+ title=matching_cred.title,
+ )
+
+
async def match_user_credentials_to_graph(
user_id: str,
graph: GraphModel,
@@ -331,8 +425,6 @@ def _credential_has_required_scopes(
# If no scopes are required, any credential matches
if not requirements.required_scopes:
return True
-
- # Check that credential scopes are a superset of required scopes
return set(credential.scopes).issuperset(requirements.required_scopes)
From 6467f6734f145af70356d61ae5f461e8dc65346f Mon Sep 17 00:00:00 2001
From: Reinier van der Leer
Date: Mon, 9 Feb 2026 15:05:29 +0100
Subject: [PATCH 17/18] debug(backend/chat): Add timing logging to chat stream
generation mechanism (#12019)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[SECRT-1912: Investigate & eliminate chat session start
latency](https://linear.app/autogpt/issue/SECRT-1912)
### Changes 🏗️
- Add timing logs to `backend.api.features.chat` in `routes.py`,
`service.py`, and `stream_registry.py`
- Remove unneeded DB join in `create_chat_session`
### Checklist 📋
#### For code changes:
- [x] I have clearly listed my changes in the PR description
- [x] I have made a test plan
- [x] I have tested my changes according to the test plan:
- CI checks
---
.../backend/backend/api/features/chat/db.py | 5 +-
.../backend/api/features/chat/routes.py | 226 +++++++++++++-
.../backend/api/features/chat/service.py | 147 ++++++++-
.../api/features/chat/stream_registry.py | 285 +++++++++++++++++-
4 files changed, 629 insertions(+), 34 deletions(-)
diff --git a/autogpt_platform/backend/backend/api/features/chat/db.py b/autogpt_platform/backend/backend/api/features/chat/db.py
index d34b4e5b07..303ea0a698 100644
--- a/autogpt_platform/backend/backend/api/features/chat/db.py
+++ b/autogpt_platform/backend/backend/api/features/chat/db.py
@@ -45,10 +45,7 @@ async def create_chat_session(
successfulAgentRuns=SafeJson({}),
successfulAgentSchedules=SafeJson({}),
)
- return await PrismaChatSession.prisma().create(
- data=data,
- include={"Messages": True},
- )
+ return await PrismaChatSession.prisma().create(data=data)
async def update_chat_session(
diff --git a/autogpt_platform/backend/backend/api/features/chat/routes.py b/autogpt_platform/backend/backend/api/features/chat/routes.py
index 3e731d86ac..74e6e8ba1e 100644
--- a/autogpt_platform/backend/backend/api/features/chat/routes.py
+++ b/autogpt_platform/backend/backend/api/features/chat/routes.py
@@ -266,12 +266,38 @@ async def stream_chat_post(
"""
import asyncio
+ import time
+
+ stream_start_time = time.perf_counter()
+
+ # Base log metadata (task_id added after creation)
+ log_meta = {"component": "ChatStream", "session_id": session_id}
+ if user_id:
+ log_meta["user_id"] = user_id
+
+ logger.info(
+ f"[TIMING] stream_chat_post STARTED, session={session_id}, "
+ f"user={user_id}, message_len={len(request.message)}",
+ extra={"json_fields": log_meta},
+ )
session = await _validate_and_get_session(session_id, user_id)
+ logger.info(
+ f"[TIMING] session validated in {(time.perf_counter() - stream_start_time)*1000:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "duration_ms": (time.perf_counter() - stream_start_time) * 1000,
+ }
+ },
+ )
# Create a task in the stream registry for reconnection support
task_id = str(uuid_module.uuid4())
operation_id = str(uuid_module.uuid4())
+ log_meta["task_id"] = task_id
+
+ task_create_start = time.perf_counter()
await stream_registry.create_task(
task_id=task_id,
session_id=session_id,
@@ -280,14 +306,46 @@ async def stream_chat_post(
tool_name="chat",
operation_id=operation_id,
)
+ logger.info(
+ f"[TIMING] create_task completed in {(time.perf_counter() - task_create_start)*1000:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "duration_ms": (time.perf_counter() - task_create_start) * 1000,
+ }
+ },
+ )
# Background task that runs the AI generation independently of SSE connection
async def run_ai_generation():
+ import time as time_module
+
+ gen_start_time = time_module.perf_counter()
+ logger.info(
+ f"[TIMING] run_ai_generation STARTED, task={task_id}, session={session_id}, user={user_id}",
+ extra={"json_fields": log_meta},
+ )
+ first_chunk_time, ttfc = None, None
+ chunk_count = 0
try:
# Emit a start event with task_id for reconnection
start_chunk = StreamStart(messageId=task_id, taskId=task_id)
await stream_registry.publish_chunk(task_id, start_chunk)
+ logger.info(
+ f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time)*1000:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "elapsed_ms": (time_module.perf_counter() - gen_start_time)
+ * 1000,
+ }
+ },
+ )
+ logger.info(
+ "[TIMING] Calling stream_chat_completion",
+ extra={"json_fields": log_meta},
+ )
async for chunk in chat_service.stream_chat_completion(
session_id,
request.message,
@@ -296,54 +354,202 @@ async def stream_chat_post(
session=session, # Pass pre-fetched session to avoid double-fetch
context=request.context,
):
+ chunk_count += 1
+ if first_chunk_time is None:
+ first_chunk_time = time_module.perf_counter()
+ ttfc = first_chunk_time - gen_start_time
+ logger.info(
+ f"[TIMING] FIRST AI CHUNK at {ttfc:.2f}s, type={type(chunk).__name__}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "chunk_type": type(chunk).__name__,
+ "time_to_first_chunk_ms": ttfc * 1000,
+ }
+ },
+ )
# Write to Redis (subscribers will receive via XREAD)
await stream_registry.publish_chunk(task_id, chunk)
- # Mark task as completed
+ gen_end_time = time_module.perf_counter()
+ total_time = (gen_end_time - gen_start_time) * 1000
+ logger.info(
+ f"[TIMING] run_ai_generation FINISHED in {total_time/1000:.1f}s; "
+ f"task={task_id}, session={session_id}, "
+ f"ttfc={ttfc or -1:.2f}s, n_chunks={chunk_count}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "total_time_ms": total_time,
+ "time_to_first_chunk_ms": (
+ ttfc * 1000 if ttfc is not None else None
+ ),
+ "n_chunks": chunk_count,
+ }
+ },
+ )
+
await stream_registry.mark_task_completed(task_id, "completed")
except Exception as e:
+ elapsed = time_module.perf_counter() - gen_start_time
logger.error(
- f"Error in background AI generation for session {session_id}: {e}"
+ f"[TIMING] run_ai_generation ERROR after {elapsed:.2f}s: {e}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "elapsed_ms": elapsed * 1000,
+ "error": str(e),
+ }
+ },
)
await stream_registry.mark_task_completed(task_id, "failed")
# Start the AI generation in a background task
bg_task = asyncio.create_task(run_ai_generation())
await stream_registry.set_task_asyncio_task(task_id, bg_task)
+ setup_time = (time.perf_counter() - stream_start_time) * 1000
+ logger.info(
+ f"[TIMING] Background task started, setup={setup_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}},
+ )
# SSE endpoint that subscribes to the task's stream
async def event_generator() -> AsyncGenerator[str, None]:
+ import time as time_module
+
+ event_gen_start = time_module.perf_counter()
+ logger.info(
+ f"[TIMING] event_generator STARTED, task={task_id}, session={session_id}, "
+ f"user={user_id}",
+ extra={"json_fields": log_meta},
+ )
subscriber_queue = None
+ first_chunk_yielded = False
+ chunks_yielded = 0
try:
# Subscribe to the task stream (this replays existing messages + live updates)
+ subscribe_start = time_module.perf_counter()
+ logger.info(
+ "[TIMING] Calling subscribe_to_task",
+ extra={"json_fields": log_meta},
+ )
subscriber_queue = await stream_registry.subscribe_to_task(
task_id=task_id,
user_id=user_id,
last_message_id="0-0", # Get all messages from the beginning
)
+ subscribe_time = (time_module.perf_counter() - subscribe_start) * 1000
+ logger.info(
+ f"[TIMING] subscribe_to_task completed in {subscribe_time:.1f}ms, "
+ f"queue_ok={subscriber_queue is not None}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "duration_ms": subscribe_time,
+ "queue_obtained": subscriber_queue is not None,
+ }
+ },
+ )
if subscriber_queue is None:
+ logger.info(
+ "[TIMING] subscriber_queue is None, yielding finish",
+ extra={"json_fields": log_meta},
+ )
yield StreamFinish().to_sse()
yield "data: [DONE]\n\n"
return
# Read from the subscriber queue and yield to SSE
+ logger.info(
+ "[TIMING] Starting to read from subscriber_queue",
+ extra={"json_fields": log_meta},
+ )
while True:
try:
+ queue_wait_start = time_module.perf_counter()
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0)
+ queue_wait_time = (
+ time_module.perf_counter() - queue_wait_start
+ ) * 1000
+ chunks_yielded += 1
+
+ if not first_chunk_yielded:
+ first_chunk_yielded = True
+ elapsed = time_module.perf_counter() - event_gen_start
+ logger.info(
+ f"[TIMING] FIRST CHUNK from queue at {elapsed:.2f}s, "
+ f"type={type(chunk).__name__}, "
+ f"wait={queue_wait_time:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "chunk_type": type(chunk).__name__,
+ "elapsed_ms": elapsed * 1000,
+ "queue_wait_ms": queue_wait_time,
+ }
+ },
+ )
+ elif chunks_yielded % 50 == 0:
+ logger.info(
+ f"[TIMING] Chunk #{chunks_yielded}, "
+ f"type={type(chunk).__name__}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "chunk_number": chunks_yielded,
+ "chunk_type": type(chunk).__name__,
+ }
+ },
+ )
+
yield chunk.to_sse()
# Check for finish signal
if isinstance(chunk, StreamFinish):
+ total_time = time_module.perf_counter() - event_gen_start
+ logger.info(
+ f"[TIMING] StreamFinish received in {total_time:.2f}s; "
+ f"n_chunks={chunks_yielded}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "chunks_yielded": chunks_yielded,
+ "total_time_ms": total_time * 1000,
+ }
+ },
+ )
break
except asyncio.TimeoutError:
# Send heartbeat to keep connection alive
+ logger.info(
+ f"[TIMING] Heartbeat timeout, chunks_so_far={chunks_yielded}",
+ extra={
+ "json_fields": {**log_meta, "chunks_so_far": chunks_yielded}
+ },
+ )
yield StreamHeartbeat().to_sse()
except GeneratorExit:
+ logger.info(
+ f"[TIMING] GeneratorExit (client disconnected), chunks={chunks_yielded}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "chunks_yielded": chunks_yielded,
+ "reason": "client_disconnect",
+ }
+ },
+ )
pass # Client disconnected - background task continues
except Exception as e:
- logger.error(f"Error in SSE stream for task {task_id}: {e}")
+ elapsed = (time_module.perf_counter() - event_gen_start) * 1000
+ logger.error(
+ f"[TIMING] event_generator ERROR after {elapsed:.1f}ms: {e}",
+ extra={
+ "json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}
+ },
+ )
finally:
# Unsubscribe when client disconnects or stream ends to prevent resource leak
if subscriber_queue is not None:
@@ -357,6 +563,18 @@ async def stream_chat_post(
exc_info=True,
)
# AI SDK protocol termination - always yield even if unsubscribe fails
+ total_time = time_module.perf_counter() - event_gen_start
+ logger.info(
+ f"[TIMING] event_generator FINISHED in {total_time:.2f}s; "
+ f"task={task_id}, session={session_id}, n_chunks={chunks_yielded}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "total_time_ms": total_time * 1000,
+ "chunks_yielded": chunks_yielded,
+ }
+ },
+ )
yield "data: [DONE]\n\n"
return StreamingResponse(
@@ -425,7 +643,7 @@ async def stream_chat_get(
"Chat stream completed",
extra={
"session_id": session_id,
- "chunk_count": chunk_count,
+ "n_chunks": chunk_count,
"first_chunk_type": first_chunk_type,
},
)
diff --git a/autogpt_platform/backend/backend/api/features/chat/service.py b/autogpt_platform/backend/backend/api/features/chat/service.py
index 06da6bdf2b..da18421b98 100644
--- a/autogpt_platform/backend/backend/api/features/chat/service.py
+++ b/autogpt_platform/backend/backend/api/features/chat/service.py
@@ -371,21 +371,45 @@ async def stream_chat_completion(
ValueError: If max_context_messages is exceeded
"""
+ completion_start = time.monotonic()
+
+ # Build log metadata for structured logging
+ log_meta = {"component": "ChatService", "session_id": session_id}
+ if user_id:
+ log_meta["user_id"] = user_id
+
logger.info(
- f"Streaming chat completion for session {session_id} for message {message} and user id {user_id}. Message is user message: {is_user_message}"
+ f"[TIMING] stream_chat_completion STARTED, session={session_id}, user={user_id}, "
+ f"message_len={len(message) if message else 0}, is_user={is_user_message}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "message_len": len(message) if message else 0,
+ "is_user_message": is_user_message,
+ }
+ },
)
# Only fetch from Redis if session not provided (initial call)
if session is None:
+ fetch_start = time.monotonic()
session = await get_chat_session(session_id, user_id)
+ fetch_time = (time.monotonic() - fetch_start) * 1000
logger.info(
- f"Fetched session from Redis: {session.session_id if session else 'None'}, "
- f"message_count={len(session.messages) if session else 0}"
+ f"[TIMING] get_chat_session took {fetch_time:.1f}ms, "
+ f"n_messages={len(session.messages) if session else 0}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "duration_ms": fetch_time,
+ "n_messages": len(session.messages) if session else 0,
+ }
+ },
)
else:
logger.info(
- f"Using provided session object: {session.session_id}, "
- f"message_count={len(session.messages)}"
+ f"[TIMING] Using provided session, messages={len(session.messages)}",
+ extra={"json_fields": {**log_meta, "n_messages": len(session.messages)}},
)
if not session:
@@ -406,17 +430,25 @@ async def stream_chat_completion(
# Track user message in PostHog
if is_user_message:
+ posthog_start = time.monotonic()
track_user_message(
user_id=user_id,
session_id=session_id,
message_length=len(message),
)
+ posthog_time = (time.monotonic() - posthog_start) * 1000
+ logger.info(
+ f"[TIMING] track_user_message took {posthog_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": posthog_time}},
+ )
- logger.info(
- f"Upserting session: {session.session_id} with user id {session.user_id}, "
- f"message_count={len(session.messages)}"
- )
+ upsert_start = time.monotonic()
session = await upsert_chat_session(session)
+ upsert_time = (time.monotonic() - upsert_start) * 1000
+ logger.info(
+ f"[TIMING] upsert_chat_session took {upsert_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": upsert_time}},
+ )
assert session, "Session not found"
# Generate title for new sessions on first user message (non-blocking)
@@ -454,7 +486,13 @@ async def stream_chat_completion(
asyncio.create_task(_update_title())
# Build system prompt with business understanding
+ prompt_start = time.monotonic()
system_prompt, understanding = await _build_system_prompt(user_id)
+ prompt_time = (time.monotonic() - prompt_start) * 1000
+ logger.info(
+ f"[TIMING] _build_system_prompt took {prompt_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": prompt_time}},
+ )
# Initialize variables for streaming
assistant_response = ChatMessage(
@@ -483,9 +521,18 @@ async def stream_chat_completion(
text_block_id = str(uuid_module.uuid4())
# Yield message start
+ setup_time = (time.monotonic() - completion_start) * 1000
+ logger.info(
+ f"[TIMING] Setup complete, yielding StreamStart at {setup_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}},
+ )
yield StreamStart(messageId=message_id)
try:
+ logger.info(
+ "[TIMING] Calling _stream_chat_chunks",
+ extra={"json_fields": log_meta},
+ )
async for chunk in _stream_chat_chunks(
session=session,
tools=tools,
@@ -893,9 +940,21 @@ async def _stream_chat_chunks(
SSE formatted JSON response objects
"""
+ import time as time_module
+
+ stream_chunks_start = time_module.perf_counter()
model = config.model
- logger.info("Starting pure chat stream")
+ # Build log metadata for structured logging
+ log_meta = {"component": "ChatService", "session_id": session.session_id}
+ if session.user_id:
+ log_meta["user_id"] = session.user_id
+
+ logger.info(
+ f"[TIMING] _stream_chat_chunks STARTED, session={session.session_id}, "
+ f"user={session.user_id}, n_messages={len(session.messages)}",
+ extra={"json_fields": {**log_meta, "n_messages": len(session.messages)}},
+ )
messages = session.to_openai_messages()
if system_prompt:
@@ -906,12 +965,18 @@ async def _stream_chat_chunks(
messages = [system_message] + messages
# Apply context window management
+ context_start = time_module.perf_counter()
context_result = await _manage_context_window(
messages=messages,
model=model,
api_key=config.api_key,
base_url=config.base_url,
)
+ context_time = (time_module.perf_counter() - context_start) * 1000
+ logger.info(
+ f"[TIMING] _manage_context_window took {context_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": context_time}},
+ )
if context_result.error:
if "System prompt dropped" in context_result.error:
@@ -946,9 +1011,19 @@ async def _stream_chat_chunks(
while retry_count <= MAX_RETRIES:
try:
+ elapsed = (time_module.perf_counter() - stream_chunks_start) * 1000
+ retry_info = (
+ f" (retry {retry_count}/{MAX_RETRIES})" if retry_count > 0 else ""
+ )
logger.info(
- f"Creating OpenAI chat completion stream..."
- f"{f' (retry {retry_count}/{MAX_RETRIES})' if retry_count > 0 else ''}"
+ f"[TIMING] Creating OpenAI stream at {elapsed:.1f}ms{retry_info}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "elapsed_ms": elapsed,
+ "retry_count": retry_count,
+ }
+ },
)
# Build extra_body for OpenRouter tracing and PostHog analytics
@@ -965,6 +1040,7 @@ async def _stream_chat_chunks(
:128
] # OpenRouter limit
+ api_call_start = time_module.perf_counter()
stream = await client.chat.completions.create(
model=model,
messages=cast(list[ChatCompletionMessageParam], messages),
@@ -974,6 +1050,11 @@ async def _stream_chat_chunks(
stream_options=ChatCompletionStreamOptionsParam(include_usage=True),
extra_body=extra_body,
)
+ api_init_time = (time_module.perf_counter() - api_call_start) * 1000
+ logger.info(
+ f"[TIMING] OpenAI stream object returned in {api_init_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": api_init_time}},
+ )
# Variables to accumulate tool calls
tool_calls: list[dict[str, Any]] = []
@@ -984,10 +1065,13 @@ async def _stream_chat_chunks(
# Track if we've started the text block
text_started = False
+ first_content_chunk = True
+ chunk_count = 0
# Process the stream
chunk: ChatCompletionChunk
async for chunk in stream:
+ chunk_count += 1
if chunk.usage:
yield StreamUsage(
promptTokens=chunk.usage.prompt_tokens,
@@ -1010,6 +1094,23 @@ async def _stream_chat_chunks(
if not text_started and text_block_id:
yield StreamTextStart(id=text_block_id)
text_started = True
+ # Log timing for first content chunk
+ if first_content_chunk:
+ first_content_chunk = False
+ ttfc = (
+ time_module.perf_counter() - api_call_start
+ ) * 1000
+ logger.info(
+ f"[TIMING] FIRST CONTENT CHUNK at {ttfc:.1f}ms "
+ f"(since API call), n_chunks={chunk_count}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "time_to_first_chunk_ms": ttfc,
+ "n_chunks": chunk_count,
+ }
+ },
+ )
# Stream the text delta
text_response = StreamTextDelta(
id=text_block_id or "",
@@ -1066,7 +1167,21 @@ async def _stream_chat_chunks(
toolName=tool_calls[idx]["function"]["name"],
)
emitted_start_for_idx.add(idx)
- logger.info(f"Stream complete. Finish reason: {finish_reason}")
+ stream_duration = time_module.perf_counter() - api_call_start
+ logger.info(
+ f"[TIMING] OpenAI stream COMPLETE, finish_reason={finish_reason}, "
+ f"duration={stream_duration:.2f}s, "
+ f"n_chunks={chunk_count}, n_tool_calls={len(tool_calls)}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "stream_duration_ms": stream_duration * 1000,
+ "finish_reason": finish_reason,
+ "n_chunks": chunk_count,
+ "n_tool_calls": len(tool_calls),
+ }
+ },
+ )
# Yield all accumulated tool calls after the stream is complete
# This ensures all tool call arguments have been fully received
@@ -1086,6 +1201,12 @@ async def _stream_chat_chunks(
# Re-raise to trigger retry logic in the parent function
raise
+ total_time = (time_module.perf_counter() - stream_chunks_start) * 1000
+ logger.info(
+ f"[TIMING] _stream_chat_chunks COMPLETED in {total_time/1000:.1f}s; "
+ f"session={session.session_id}, user={session.user_id}",
+ extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
+ )
yield StreamFinish()
return
except Exception as e:
diff --git a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py
index 88a5023e2b..509d20d9f4 100644
--- a/autogpt_platform/backend/backend/api/features/chat/stream_registry.py
+++ b/autogpt_platform/backend/backend/api/features/chat/stream_registry.py
@@ -104,6 +104,24 @@ async def create_task(
Returns:
The created ActiveTask instance (metadata only)
"""
+ import time
+
+ start_time = time.perf_counter()
+
+ # Build log metadata for structured logging
+ log_meta = {
+ "component": "StreamRegistry",
+ "task_id": task_id,
+ "session_id": session_id,
+ }
+ if user_id:
+ log_meta["user_id"] = user_id
+
+ logger.info(
+ f"[TIMING] create_task STARTED, task={task_id}, session={session_id}, user={user_id}",
+ extra={"json_fields": log_meta},
+ )
+
task = ActiveTask(
task_id=task_id,
session_id=session_id,
@@ -114,10 +132,18 @@ async def create_task(
)
# Store metadata in Redis
+ redis_start = time.perf_counter()
redis = await get_redis_async()
+ redis_time = (time.perf_counter() - redis_start) * 1000
+ logger.info(
+ f"[TIMING] get_redis_async took {redis_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": redis_time}},
+ )
+
meta_key = _get_task_meta_key(task_id)
op_key = _get_operation_mapping_key(operation_id)
+ hset_start = time.perf_counter()
await redis.hset( # type: ignore[misc]
meta_key,
mapping={
@@ -131,12 +157,22 @@ async def create_task(
"created_at": task.created_at.isoformat(),
},
)
+ hset_time = (time.perf_counter() - hset_start) * 1000
+ logger.info(
+ f"[TIMING] redis.hset took {hset_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": hset_time}},
+ )
+
await redis.expire(meta_key, config.stream_ttl)
# Create operation_id -> task_id mapping for webhook lookups
await redis.set(op_key, task_id, ex=config.stream_ttl)
- logger.debug(f"Created task {task_id} for session {session_id}")
+ total_time = (time.perf_counter() - start_time) * 1000
+ logger.info(
+ f"[TIMING] create_task COMPLETED in {total_time:.1f}ms; task={task_id}, session={session_id}",
+ extra={"json_fields": {**log_meta, "total_time_ms": total_time}},
+ )
return task
@@ -156,26 +192,60 @@ async def publish_chunk(
Returns:
The Redis Stream message ID
"""
+ import time
+
+ start_time = time.perf_counter()
+ chunk_type = type(chunk).__name__
chunk_json = chunk.model_dump_json()
message_id = "0-0"
+ # Build log metadata
+ log_meta = {
+ "component": "StreamRegistry",
+ "task_id": task_id,
+ "chunk_type": chunk_type,
+ }
+
try:
redis = await get_redis_async()
stream_key = _get_task_stream_key(task_id)
# Write to Redis Stream for persistence and real-time delivery
+ xadd_start = time.perf_counter()
raw_id = await redis.xadd(
stream_key,
{"data": chunk_json},
maxlen=config.stream_max_length,
)
+ xadd_time = (time.perf_counter() - xadd_start) * 1000
message_id = raw_id if isinstance(raw_id, str) else raw_id.decode()
# Set TTL on stream to match task metadata TTL
await redis.expire(stream_key, config.stream_ttl)
+
+ total_time = (time.perf_counter() - start_time) * 1000
+ # Only log timing for significant chunks or slow operations
+ if (
+ chunk_type
+ in ("StreamStart", "StreamFinish", "StreamTextStart", "StreamTextEnd")
+ or total_time > 50
+ ):
+ logger.info(
+ f"[TIMING] publish_chunk {chunk_type} in {total_time:.1f}ms (xadd={xadd_time:.1f}ms)",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "total_time_ms": total_time,
+ "xadd_time_ms": xadd_time,
+ "message_id": message_id,
+ }
+ },
+ )
except Exception as e:
+ elapsed = (time.perf_counter() - start_time) * 1000
logger.error(
- f"Failed to publish chunk for task {task_id}: {e}",
+ f"[TIMING] Failed to publish chunk {chunk_type} after {elapsed:.1f}ms: {e}",
+ extra={"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}},
exc_info=True,
)
@@ -200,24 +270,61 @@ async def subscribe_to_task(
An asyncio Queue that will receive stream chunks, or None if task not found
or user doesn't have access
"""
+ import time
+
+ start_time = time.perf_counter()
+
+ # Build log metadata
+ log_meta = {"component": "StreamRegistry", "task_id": task_id}
+ if user_id:
+ log_meta["user_id"] = user_id
+
+ logger.info(
+ f"[TIMING] subscribe_to_task STARTED, task={task_id}, user={user_id}, last_msg={last_message_id}",
+ extra={"json_fields": {**log_meta, "last_message_id": last_message_id}},
+ )
+
+ redis_start = time.perf_counter()
redis = await get_redis_async()
meta_key = _get_task_meta_key(task_id)
meta: dict[Any, Any] = await redis.hgetall(meta_key) # type: ignore[misc]
+ hgetall_time = (time.perf_counter() - redis_start) * 1000
+ logger.info(
+ f"[TIMING] Redis hgetall took {hgetall_time:.1f}ms",
+ extra={"json_fields": {**log_meta, "duration_ms": hgetall_time}},
+ )
if not meta:
- logger.debug(f"Task {task_id} not found in Redis")
+ elapsed = (time.perf_counter() - start_time) * 1000
+ logger.info(
+ f"[TIMING] Task not found in Redis after {elapsed:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "elapsed_ms": elapsed,
+ "reason": "task_not_found",
+ }
+ },
+ )
return None
# Note: Redis client uses decode_responses=True, so keys are strings
task_status = meta.get("status", "")
task_user_id = meta.get("user_id", "") or None
+ log_meta["session_id"] = meta.get("session_id", "")
# Validate ownership - if task has an owner, requester must match
if task_user_id:
if user_id != task_user_id:
logger.warning(
- f"User {user_id} denied access to task {task_id} "
- f"owned by {task_user_id}"
+ f"[TIMING] Access denied: user {user_id} tried to access task owned by {task_user_id}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "task_owner": task_user_id,
+ "reason": "access_denied",
+ }
+ },
)
return None
@@ -225,7 +332,19 @@ async def subscribe_to_task(
stream_key = _get_task_stream_key(task_id)
# Step 1: Replay messages from Redis Stream
+ xread_start = time.perf_counter()
messages = await redis.xread({stream_key: last_message_id}, block=0, count=1000)
+ xread_time = (time.perf_counter() - xread_start) * 1000
+ logger.info(
+ f"[TIMING] Redis xread (replay) took {xread_time:.1f}ms, status={task_status}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "duration_ms": xread_time,
+ "task_status": task_status,
+ }
+ },
+ )
replayed_count = 0
replay_last_id = last_message_id
@@ -244,19 +363,48 @@ async def subscribe_to_task(
except Exception as e:
logger.warning(f"Failed to replay message: {e}")
- logger.debug(f"Task {task_id}: replayed {replayed_count} messages")
+ logger.info(
+ f"[TIMING] Replayed {replayed_count} messages, last_id={replay_last_id}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "n_messages_replayed": replayed_count,
+ "replay_last_id": replay_last_id,
+ }
+ },
+ )
# Step 2: If task is still running, start stream listener for live updates
if task_status == "running":
+ logger.info(
+ "[TIMING] Task still running, starting _stream_listener",
+ extra={"json_fields": {**log_meta, "task_status": task_status}},
+ )
listener_task = asyncio.create_task(
- _stream_listener(task_id, subscriber_queue, replay_last_id)
+ _stream_listener(task_id, subscriber_queue, replay_last_id, log_meta)
)
# Track listener task for cleanup on unsubscribe
_listener_tasks[id(subscriber_queue)] = (task_id, listener_task)
else:
# Task is completed/failed - add finish marker
+ logger.info(
+ f"[TIMING] Task already {task_status}, adding StreamFinish",
+ extra={"json_fields": {**log_meta, "task_status": task_status}},
+ )
await subscriber_queue.put(StreamFinish())
+ total_time = (time.perf_counter() - start_time) * 1000
+ logger.info(
+ f"[TIMING] subscribe_to_task COMPLETED in {total_time:.1f}ms; task={task_id}, "
+ f"n_messages_replayed={replayed_count}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "total_time_ms": total_time,
+ "n_messages_replayed": replayed_count,
+ }
+ },
+ )
return subscriber_queue
@@ -264,6 +412,7 @@ async def _stream_listener(
task_id: str,
subscriber_queue: asyncio.Queue[StreamBaseResponse],
last_replayed_id: str,
+ log_meta: dict | None = None,
) -> None:
"""Listen to Redis Stream for new messages using blocking XREAD.
@@ -274,10 +423,27 @@ async def _stream_listener(
task_id: Task ID to listen for
subscriber_queue: Queue to deliver messages to
last_replayed_id: Last message ID from replay (continue from here)
+ log_meta: Structured logging metadata
"""
+ import time
+
+ start_time = time.perf_counter()
+
+ # Use provided log_meta or build minimal one
+ if log_meta is None:
+ log_meta = {"component": "StreamRegistry", "task_id": task_id}
+
+ logger.info(
+ f"[TIMING] _stream_listener STARTED, task={task_id}, last_id={last_replayed_id}",
+ extra={"json_fields": {**log_meta, "last_replayed_id": last_replayed_id}},
+ )
+
queue_id = id(subscriber_queue)
# Track the last successfully delivered message ID for recovery hints
last_delivered_id = last_replayed_id
+ messages_delivered = 0
+ first_message_time = None
+ xread_count = 0
try:
redis = await get_redis_async()
@@ -287,9 +453,39 @@ async def _stream_listener(
while True:
# Block for up to 30 seconds waiting for new messages
# This allows periodic checking if task is still running
+ xread_start = time.perf_counter()
+ xread_count += 1
messages = await redis.xread(
{stream_key: current_id}, block=30000, count=100
)
+ xread_time = (time.perf_counter() - xread_start) * 1000
+
+ if messages:
+ msg_count = sum(len(msgs) for _, msgs in messages)
+ logger.info(
+ f"[TIMING] xread #{xread_count} returned {msg_count} messages in {xread_time:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "xread_count": xread_count,
+ "n_messages": msg_count,
+ "duration_ms": xread_time,
+ }
+ },
+ )
+ elif xread_time > 1000:
+ # Only log timeouts (30s blocking)
+ logger.info(
+ f"[TIMING] xread #{xread_count} timeout after {xread_time:.1f}ms",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "xread_count": xread_count,
+ "duration_ms": xread_time,
+ "reason": "timeout",
+ }
+ },
+ )
if not messages:
# Timeout - check if task is still running
@@ -326,10 +522,30 @@ async def _stream_listener(
)
# Update last delivered ID on successful delivery
last_delivered_id = current_id
+ messages_delivered += 1
+ if first_message_time is None:
+ first_message_time = time.perf_counter()
+ elapsed = (first_message_time - start_time) * 1000
+ logger.info(
+ f"[TIMING] FIRST live message at {elapsed:.1f}ms, type={type(chunk).__name__}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "elapsed_ms": elapsed,
+ "chunk_type": type(chunk).__name__,
+ }
+ },
+ )
except asyncio.TimeoutError:
logger.warning(
- f"Subscriber queue full for task {task_id}, "
- f"message delivery timed out after {QUEUE_PUT_TIMEOUT}s"
+ f"[TIMING] Subscriber queue full, delivery timed out after {QUEUE_PUT_TIMEOUT}s",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "timeout_s": QUEUE_PUT_TIMEOUT,
+ "reason": "queue_full",
+ }
+ },
)
# Send overflow error with recovery info
try:
@@ -351,15 +567,44 @@ async def _stream_listener(
# Stop listening on finish
if isinstance(chunk, StreamFinish):
+ total_time = (time.perf_counter() - start_time) * 1000
+ logger.info(
+ f"[TIMING] StreamFinish received in {total_time/1000:.1f}s; delivered={messages_delivered}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "total_time_ms": total_time,
+ "messages_delivered": messages_delivered,
+ }
+ },
+ )
return
except Exception as e:
- logger.warning(f"Error processing stream message: {e}")
+ logger.warning(
+ f"Error processing stream message: {e}",
+ extra={"json_fields": {**log_meta, "error": str(e)}},
+ )
except asyncio.CancelledError:
- logger.debug(f"Stream listener cancelled for task {task_id}")
+ elapsed = (time.perf_counter() - start_time) * 1000
+ logger.info(
+ f"[TIMING] _stream_listener CANCELLED after {elapsed:.1f}ms, delivered={messages_delivered}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "elapsed_ms": elapsed,
+ "messages_delivered": messages_delivered,
+ "reason": "cancelled",
+ }
+ },
+ )
raise # Re-raise to propagate cancellation
except Exception as e:
- logger.error(f"Stream listener error for task {task_id}: {e}")
+ elapsed = (time.perf_counter() - start_time) * 1000
+ logger.error(
+ f"[TIMING] _stream_listener ERROR after {elapsed:.1f}ms: {e}",
+ extra={"json_fields": {**log_meta, "elapsed_ms": elapsed, "error": str(e)}},
+ )
# On error, send finish to unblock subscriber
try:
await asyncio.wait_for(
@@ -368,10 +613,24 @@ async def _stream_listener(
)
except (asyncio.TimeoutError, asyncio.QueueFull):
logger.warning(
- f"Could not deliver finish event for task {task_id} after error"
+ "Could not deliver finish event after error",
+ extra={"json_fields": log_meta},
)
finally:
# Clean up listener task mapping on exit
+ total_time = (time.perf_counter() - start_time) * 1000
+ logger.info(
+ f"[TIMING] _stream_listener FINISHED in {total_time/1000:.1f}s; task={task_id}, "
+ f"delivered={messages_delivered}, xread_count={xread_count}",
+ extra={
+ "json_fields": {
+ **log_meta,
+ "total_time_ms": total_time,
+ "messages_delivered": messages_delivered,
+ "xread_count": xread_count,
+ }
+ },
+ )
_listener_tasks.pop(queue_id, None)
From 81f8290f0176eea29be3428c5e6ed21b82dc5da2 Mon Sep 17 00:00:00 2001
From: Otto
Date: Tue, 10 Feb 2026 07:35:13 +0000
Subject: [PATCH 18/18] debug(backend/db): Add diagnostic logging for vector
type errors (#12024)
Adds diagnostic logging when the `type vector does not exist` error
occurs in raw SQL queries.
## Problem
We're seeing intermittent "type vector does not exist" errors on
dev-behave ([Sentry
issue](https://significant-gravitas.sentry.io/issues/7205929979/)). The
pgvector extension should be in the search_path, but occasionally
queries fail to resolve the vector type.
## Solution
When a query fails with this specific error, we now log:
- `SHOW search_path` - what schemas are being searched
- `SELECT current_schema()` - the active schema
- `SELECT current_user, session_user, current_database()` - connection
context
This diagnostic info will help identify why the vector extension isn't
visible in certain cases.
## Changes
- Added `_log_vector_error_diagnostics()` helper function in
`backend/data/db.py`
- Wrapped SQL execution in try/except to catch and diagnose vector type
errors
- Original exception is re-raised after logging (no behavior change)
## Testing
This is observational/diagnostic code. It will be validated by waiting
for the error to occur naturally on dev and checking the logs.
## Rollout
Once we've captured diagnostic logs and identified the root cause, this
logging can be removed or reduced in verbosity.
---
.../api/features/store/hybrid_search.py | 94 ++++++++++++++++++-
1 file changed, 92 insertions(+), 2 deletions(-)
diff --git a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py
index e1b8f402c8..b10bfbcc06 100644
--- a/autogpt_platform/backend/backend/api/features/store/hybrid_search.py
+++ b/autogpt_platform/backend/backend/api/features/store/hybrid_search.py
@@ -8,6 +8,7 @@ Includes BM25 reranking for improved lexical relevance.
import logging
import re
+import time
from dataclasses import dataclass
from typing import Any, Literal
@@ -362,7 +363,11 @@ async def unified_hybrid_search(
LIMIT {limit_param} OFFSET {offset_param}
"""
- results = await query_raw_with_schema(sql_query, *params)
+ try:
+ results = await query_raw_with_schema(sql_query, *params)
+ except Exception as e:
+ await _log_vector_error_diagnostics(e)
+ raise
total = results[0]["total_count"] if results else 0
# Apply BM25 reranking
@@ -686,7 +691,11 @@ async def hybrid_search(
LIMIT {limit_param} OFFSET {offset_param}
"""
- results = await query_raw_with_schema(sql_query, *params)
+ try:
+ results = await query_raw_with_schema(sql_query, *params)
+ except Exception as e:
+ await _log_vector_error_diagnostics(e)
+ raise
total = results[0]["total_count"] if results else 0
@@ -718,6 +727,87 @@ async def hybrid_search_simple(
return await hybrid_search(query=query, page=page, page_size=page_size)
+# ============================================================================
+# Diagnostics
+# ============================================================================
+
+# Rate limit: only log vector error diagnostics once per this interval
+_VECTOR_DIAG_INTERVAL_SECONDS = 60
+_last_vector_diag_time: float = 0
+
+
+async def _log_vector_error_diagnostics(error: Exception) -> None:
+ """Log diagnostic info when 'type vector does not exist' error occurs.
+
+ Note: Diagnostic queries use query_raw_with_schema which may run on a different
+ pooled connection than the one that failed. Session-level search_path can differ,
+ so these diagnostics show cluster-wide state, not necessarily the failed session.
+
+ Includes rate limiting to avoid log spam - only logs once per minute.
+ Caller should re-raise the error after calling this function.
+ """
+ global _last_vector_diag_time
+
+ # Check if this is the vector type error
+ error_str = str(error).lower()
+ if not (
+ "type" in error_str and "vector" in error_str and "does not exist" in error_str
+ ):
+ return
+
+ # Rate limit: only log once per interval
+ now = time.time()
+ if now - _last_vector_diag_time < _VECTOR_DIAG_INTERVAL_SECONDS:
+ return
+ _last_vector_diag_time = now
+
+ try:
+ diagnostics: dict[str, object] = {}
+
+ try:
+ search_path_result = await query_raw_with_schema("SHOW search_path")
+ diagnostics["search_path"] = search_path_result
+ except Exception as e:
+ diagnostics["search_path"] = f"Error: {e}"
+
+ try:
+ schema_result = await query_raw_with_schema("SELECT current_schema()")
+ diagnostics["current_schema"] = schema_result
+ except Exception as e:
+ diagnostics["current_schema"] = f"Error: {e}"
+
+ try:
+ user_result = await query_raw_with_schema(
+ "SELECT current_user, session_user, current_database()"
+ )
+ diagnostics["user_info"] = user_result
+ except Exception as e:
+ diagnostics["user_info"] = f"Error: {e}"
+
+ try:
+ # Check pgvector extension installation (cluster-wide, stable info)
+ ext_result = await query_raw_with_schema(
+ "SELECT extname, extversion, nspname as schema "
+ "FROM pg_extension e "
+ "JOIN pg_namespace n ON e.extnamespace = n.oid "
+ "WHERE extname = 'vector'"
+ )
+ diagnostics["pgvector_extension"] = ext_result
+ except Exception as e:
+ diagnostics["pgvector_extension"] = f"Error: {e}"
+
+ logger.error(
+ f"Vector type error diagnostics:\n"
+ f" Error: {error}\n"
+ f" search_path: {diagnostics.get('search_path')}\n"
+ f" current_schema: {diagnostics.get('current_schema')}\n"
+ f" user_info: {diagnostics.get('user_info')}\n"
+ f" pgvector_extension: {diagnostics.get('pgvector_extension')}"
+ )
+ except Exception as diag_error:
+ logger.error(f"Failed to collect vector error diagnostics: {diag_error}")
+
+
# Backward compatibility alias - HybridSearchWeights maps to StoreAgentSearchWeights
# for existing code that expects the popularity parameter
HybridSearchWeights = StoreAgentSearchWeights