Compare commits

..

6 Commits

Author SHA1 Message Date
Nicholas Tindle
2b8134a711 Merge branch 'dev' into ntindle/google-issues-fix 2026-02-09 13:46:37 -06:00
Nicholas Tindle
90b3b5ba16 fix(backend): Fix misplaced section header in graph_test.py
Move the _reassign_ids section comment to above the actual _reassign_ids
tests, and label the combine() tests correctly.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-08 16:11:47 -06:00
Nicholas Tindle
f4f81bc4fc fix(backend): Remove _credentials_id key on fork instead of setting to None
Setting _credentials_id to None on fork was ambiguous — both "forked,
needs re-auth" and "chained data from upstream" were represented as None.
This caused _acquire_auto_credentials to silently skip credential
acquisition for forked agents, leading to confusing TypeErrors at runtime.

Now the key is deleted entirely, making the three states unambiguous:
- Present with value: user-selected credentials
- Present as None: chained data from upstream block
- Absent: forked/needs re-authentication

Also adds pre-run validation for the missing key case and makes error
messages provider-agnostic.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 17:34:16 -06:00
Nicholas Tindle
c5abc01f25 fix(backend): Add error handling for auto-credentials store lookup
Wrap get_creds_by_id call in try/except in the auto-credentials
validation path to match the error handling pattern used for regular
credentials.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:53:29 -06:00
Nicholas Tindle
8b7053c1de merge: Resolve conflicts with dev (PR #11986 graph model refactor)
Adapt auto-credentials filtering to dev's refactored graph model:
- aggregate_credentials_inputs() now returns 3-tuples (field_info, node_pairs, is_required)
- credentials_input_schema moved to GraphModel, builds JSON schema directly
- Update regular/auto_credentials_inputs properties for 3-tuple format
- Update test mocks and assertions for new tuple format and class hierarchy

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:39:57 -06:00
Nicholas Tindle
e00c1202ad fix(platform): Fix Google Drive auto-credentials handling across the platform
- Tag auto-credentials with `is_auto_credential` and `input_field_name` on `CredentialsFieldInfo` to distinguish them from regular user-provided credentials
- Add `regular_credentials_inputs` and `auto_credentials_inputs` properties to `Graph` so UI schemas, CoPilot, and library presets only surface regular credentials
- Extract `_acquire_auto_credentials()` helper in executor to resolve embedded `_credentials_id` at execution time with proper lock management
- Validate auto-credentials ownership in `_validate_node_input_credentials()` to catch stale/missing credentials before execution
- Clear `_credentials_id` in `_reassign_ids()` on graph fork so cloned agents require re-authentication
- Propagate `is_auto_credential` through `combine()` and `discriminate()` on `CredentialsFieldInfo`
- Add `referrerPolicy: "no-referrer-when-downgrade"` to Google API script loading to fix Firefox API key validation
- Comprehensive test coverage for all new behavior

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-06 16:08:53 -06:00
271 changed files with 22295 additions and 12298 deletions

View File

@@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_branch }}
fetch-depth: 0

View File

@@ -30,7 +30,7 @@ jobs:
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -40,7 +40,7 @@ jobs:
actions: read # Required for CI access
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -58,7 +58,7 @@ jobs:
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@@ -27,7 +27,7 @@ jobs:
# If you do not check out your code, Copilot will do this for you.
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true

View File

@@ -23,7 +23,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -23,7 +23,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0

View File

@@ -28,7 +28,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 1

View File

@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.git_ref || github.ref_name }}
@@ -52,7 +52,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v4
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -17,7 +17,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
ref: ${{ github.ref_name || 'master' }}
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Trigger deploy workflow
uses: peter-evans/repository-dispatch@v4
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DEPLOY_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -68,7 +68,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true

View File

@@ -82,7 +82,7 @@ jobs:
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v4
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -110,7 +110,7 @@ jobs:
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v4
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
@@ -168,7 +168,7 @@ jobs:
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v4
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure

View File

@@ -31,7 +31,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Check for component changes
uses: dorny/paths-filter@v3
@@ -71,7 +71,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v6
@@ -107,7 +107,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -148,7 +148,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
submodules: recursive
@@ -277,7 +277,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
submodules: recursive

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v6
@@ -56,14 +56,14 @@ jobs:
run: pnpm install --frozen-lockfile
types:
runs-on: big-boi
runs-on: ubuntu-latest
needs: setup
strategy:
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
submodules: recursive
@@ -85,7 +85,7 @@ jobs:
- name: Run docker compose
run: |
docker compose -f ../docker-compose.yml --profile local up -d deps_backend
docker compose -f ../docker-compose.yml --profile local --profile deps_backend up -d
- name: Restore dependencies cache
uses: actions/cache@v5

View File

@@ -11,7 +11,7 @@ jobs:
steps:
# - name: Wait some time for all actions to start
# run: sleep 30
- uses: actions/checkout@v6
- uses: actions/checkout@v4
# with:
# fetch-depth: 0
- name: Set up Python

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
[[package]]
name = "annotated-doc"
@@ -67,7 +67,7 @@ description = "Backport of asyncio.Runner, a context manager that controls event
optional = false
python-versions = "<3.11,>=3.8"
groups = ["dev"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5"},
{file = "backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162"},
@@ -326,118 +326,100 @@ files = [
[[package]]
name = "coverage"
version = "7.13.4"
version = "7.10.5"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.10"
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "coverage-7.13.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fc31c787a84f8cd6027eba44010517020e0d18487064cd3d8968941856d1415"},
{file = "coverage-7.13.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a32ebc02a1805adf637fc8dec324b5cdacd2e493515424f70ee33799573d661b"},
{file = "coverage-7.13.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e24f9156097ff9dc286f2f913df3a7f63c0e333dcafa3c196f2c18b4175ca09a"},
{file = "coverage-7.13.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8041b6c5bfdc03257666e9881d33b1abc88daccaf73f7b6340fb7946655cd10f"},
{file = "coverage-7.13.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2a09cfa6a5862bc2fc6ca7c3def5b2926194a56b8ab78ffcf617d28911123012"},
{file = "coverage-7.13.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:296f8b0af861d3970c2a4d8c91d48eb4dd4771bcef9baedec6a9b515d7de3def"},
{file = "coverage-7.13.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e101609bcbbfb04605ea1027b10dc3735c094d12d40826a60f897b98b1c30256"},
{file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aa3feb8db2e87ff5e6d00d7e1480ae241876286691265657b500886c98f38bda"},
{file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4fc7fa81bbaf5a02801b65346c8b3e657f1d93763e58c0abdf7c992addd81a92"},
{file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:33901f604424145c6e9c2398684b92e176c0b12df77d52db81c20abd48c3794c"},
{file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:bb28c0f2cf2782508a40cec377935829d5fcc3ad9a3681375af4e84eb34b6b58"},
{file = "coverage-7.13.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9d107aff57a83222ddbd8d9ee705ede2af2cc926608b57abed8ef96b50b7e8f9"},
{file = "coverage-7.13.4-cp310-cp310-win32.whl", hash = "sha256:a6f94a7d00eb18f1b6d403c91a88fd58cfc92d4b16080dfdb774afc8294469bf"},
{file = "coverage-7.13.4-cp310-cp310-win_amd64.whl", hash = "sha256:2cb0f1e000ebc419632bbe04366a8990b6e32c4e0b51543a6484ffe15eaeda95"},
{file = "coverage-7.13.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d490ba50c3f35dd7c17953c68f3270e7ccd1c6642e2d2afe2d8e720b98f5a053"},
{file = "coverage-7.13.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19bc3c88078789f8ef36acb014d7241961dbf883fd2533d18cb1e7a5b4e28b11"},
{file = "coverage-7.13.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3998e5a32e62fdf410c0dbd3115df86297995d6e3429af80b8798aad894ca7aa"},
{file = "coverage-7.13.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8e264226ec98e01a8e1054314af91ee6cde0eacac4f465cc93b03dbe0bce2fd7"},
{file = "coverage-7.13.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a3aa4e7b9e416774b21797365b358a6e827ffadaaca81b69ee02946852449f00"},
{file = "coverage-7.13.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:71ca20079dd8f27fcf808817e281e90220475cd75115162218d0e27549f95fef"},
{file = "coverage-7.13.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e2f25215f1a359ab17320b47bcdaca3e6e6356652e8256f2441e4ef972052903"},
{file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d65b2d373032411e86960604dc4edac91fdfb5dca539461cf2cbe78327d1e64f"},
{file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94eb63f9b363180aff17de3e7c8760c3ba94664ea2695c52f10111244d16a299"},
{file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e856bf6616714c3a9fbc270ab54103f4e685ba236fa98c054e8f87f266c93505"},
{file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:65dfcbe305c3dfe658492df2d85259e0d79ead4177f9ae724b6fb245198f55d6"},
{file = "coverage-7.13.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b507778ae8a4c915436ed5c2e05b4a6cecfa70f734e19c22a005152a11c7b6a9"},
{file = "coverage-7.13.4-cp311-cp311-win32.whl", hash = "sha256:784fc3cf8be001197b652d51d3fd259b1e2262888693a4636e18879f613a62a9"},
{file = "coverage-7.13.4-cp311-cp311-win_amd64.whl", hash = "sha256:2421d591f8ca05b308cf0092807308b2facbefe54af7c02ac22548b88b95c98f"},
{file = "coverage-7.13.4-cp311-cp311-win_arm64.whl", hash = "sha256:79e73a76b854d9c6088fe5d8b2ebe745f8681c55f7397c3c0a016192d681045f"},
{file = "coverage-7.13.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02231499b08dabbe2b96612993e5fc34217cdae907a51b906ac7fca8027a4459"},
{file = "coverage-7.13.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40aa8808140e55dc022b15d8aa7f651b6b3d68b365ea0398f1441e0b04d859c3"},
{file = "coverage-7.13.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5b856a8ccf749480024ff3bd7310adaef57bf31fd17e1bfc404b7940b6986634"},
{file = "coverage-7.13.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2c048ea43875fbf8b45d476ad79f179809c590ec7b79e2035c662e7afa3192e3"},
{file = "coverage-7.13.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b7b38448866e83176e28086674fe7368ab8590e4610fb662b44e345b86d63ffa"},
{file = "coverage-7.13.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:de6defc1c9badbf8b9e67ae90fd00519186d6ab64e5cc5f3d21359c2a9b2c1d3"},
{file = "coverage-7.13.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7eda778067ad7ffccd23ecffce537dface96212576a07924cbf0d8799d2ded5a"},
{file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e87f6c587c3f34356c3759f0420693e35e7eb0e2e41e4c011cb6ec6ecbbf1db7"},
{file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8248977c2e33aecb2ced42fef99f2d319e9904a36e55a8a68b69207fb7e43edc"},
{file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:25381386e80ae727608e662474db537d4df1ecd42379b5ba33c84633a2b36d47"},
{file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:ee756f00726693e5ba94d6df2bdfd64d4852d23b09bb0bc700e3b30e6f333985"},
{file = "coverage-7.13.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fdfc1e28e7c7cdce44985b3043bc13bbd9c747520f94a4d7164af8260b3d91f0"},
{file = "coverage-7.13.4-cp312-cp312-win32.whl", hash = "sha256:01d4cbc3c283a17fc1e42d614a119f7f438eabb593391283adca8dc86eff1246"},
{file = "coverage-7.13.4-cp312-cp312-win_amd64.whl", hash = "sha256:9401ebc7ef522f01d01d45532c68c5ac40fb27113019b6b7d8b208f6e9baa126"},
{file = "coverage-7.13.4-cp312-cp312-win_arm64.whl", hash = "sha256:b1ec7b6b6e93255f952e27ab58fbc68dcc468844b16ecbee881aeb29b6ab4d8d"},
{file = "coverage-7.13.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b66a2da594b6068b48b2692f043f35d4d3693fb639d5ea8b39533c2ad9ac3ab9"},
{file = "coverage-7.13.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3599eb3992d814d23b35c536c28df1a882caa950f8f507cef23d1cbf334995ac"},
{file = "coverage-7.13.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:93550784d9281e374fb5a12bf1324cc8a963fd63b2d2f223503ef0fd4aa339ea"},
{file = "coverage-7.13.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b720ce6a88a2755f7c697c23268ddc47a571b88052e6b155224347389fdf6a3b"},
{file = "coverage-7.13.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b322db1284a2ed3aa28ffd8ebe3db91c929b7a333c0820abec3d838ef5b3525"},
{file = "coverage-7.13.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4594c67d8a7c89cf922d9df0438c7c7bb022ad506eddb0fdb2863359ff78242"},
{file = "coverage-7.13.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:53d133df809c743eb8bce33b24bcababb371f4441340578cd406e084d94a6148"},
{file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76451d1978b95ba6507a039090ba076105c87cc76fc3efd5d35d72093964d49a"},
{file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7f57b33491e281e962021de110b451ab8a24182589be17e12a22c79047935e23"},
{file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1731dc33dc276dafc410a885cbf5992f1ff171393e48a21453b78727d090de80"},
{file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:bd60d4fe2f6fa7dff9223ca1bbc9f05d2b6697bc5961072e5d3b952d46e1b1ea"},
{file = "coverage-7.13.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9181a3ccead280b828fae232df12b16652702b49d41e99d657f46cc7b1f6ec7a"},
{file = "coverage-7.13.4-cp313-cp313-win32.whl", hash = "sha256:f53d492307962561ac7de4cd1de3e363589b000ab69617c6156a16ba7237998d"},
{file = "coverage-7.13.4-cp313-cp313-win_amd64.whl", hash = "sha256:e6f70dec1cc557e52df5306d051ef56003f74d56e9c4dd7ddb07e07ef32a84dd"},
{file = "coverage-7.13.4-cp313-cp313-win_arm64.whl", hash = "sha256:fb07dc5da7e849e2ad31a5d74e9bece81f30ecf5a42909d0a695f8bd1874d6af"},
{file = "coverage-7.13.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40d74da8e6c4b9ac18b15331c4b5ebc35a17069410cad462ad4f40dcd2d50c0d"},
{file = "coverage-7.13.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4223b4230a376138939a9173f1bdd6521994f2aff8047fae100d6d94d50c5a12"},
{file = "coverage-7.13.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1d4be36a5114c499f9f1f9195e95ebf979460dbe2d88e6816ea202010ba1c34b"},
{file = "coverage-7.13.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:200dea7d1e8095cc6e98cdabe3fd1d21ab17d3cee6dab00cadbb2fe35d9c15b9"},
{file = "coverage-7.13.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8eb931ee8e6d8243e253e5ed7336deea6904369d2fd8ae6e43f68abbf167092"},
{file = "coverage-7.13.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:75eab1ebe4f2f64d9509b984f9314d4aa788540368218b858dad56dc8f3e5eb9"},
{file = "coverage-7.13.4-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c35eb28c1d085eb7d8c9b3296567a1bebe03ce72962e932431b9a61f28facf26"},
{file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb88b316ec33760714a4720feb2816a3a59180fd58c1985012054fa7aebee4c2"},
{file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7d41eead3cc673cbd38a4417deb7fd0b4ca26954ff7dc6078e33f6ff97bed940"},
{file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:fb26a934946a6afe0e326aebe0730cdff393a8bc0bbb65a2f41e30feddca399c"},
{file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:dae88bc0fc77edaa65c14be099bd57ee140cf507e6bfdeea7938457ab387efb0"},
{file = "coverage-7.13.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:845f352911777a8e722bfce168958214951e07e47e5d5d9744109fa5fe77f79b"},
{file = "coverage-7.13.4-cp313-cp313t-win32.whl", hash = "sha256:2fa8d5f8de70688a28240de9e139fa16b153cc3cbb01c5f16d88d6505ebdadf9"},
{file = "coverage-7.13.4-cp313-cp313t-win_amd64.whl", hash = "sha256:9351229c8c8407645840edcc277f4a2d44814d1bc34a2128c11c2a031d45a5dd"},
{file = "coverage-7.13.4-cp313-cp313t-win_arm64.whl", hash = "sha256:30b8d0512f2dc8c8747557e8fb459d6176a2c9e5731e2b74d311c03b78451997"},
{file = "coverage-7.13.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:300deaee342f90696ed186e3a00c71b5b3d27bffe9e827677954f4ee56969601"},
{file = "coverage-7.13.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29e3220258d682b6226a9b0925bc563ed9a1ebcff3cad30f043eceea7eaf2689"},
{file = "coverage-7.13.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:391ee8f19bef69210978363ca930f7328081c6a0152f1166c91f0b5fdd2a773c"},
{file = "coverage-7.13.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0dd7ab8278f0d58a0128ba2fca25824321f05d059c1441800e934ff2efa52129"},
{file = "coverage-7.13.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78cdf0d578b15148b009ccf18c686aa4f719d887e76e6b40c38ffb61d264a552"},
{file = "coverage-7.13.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:48685fee12c2eb3b27c62f2658e7ea21e9c3239cba5a8a242801a0a3f6a8c62a"},
{file = "coverage-7.13.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4e83efc079eb39480e6346a15a1bcb3e9b04759c5202d157e1dd4303cd619356"},
{file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ecae9737b72408d6a950f7e525f30aca12d4bd8dd95e37342e5beb3a2a8c4f71"},
{file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ae4578f8528569d3cf303fef2ea569c7f4c4059a38c8667ccef15c6e1f118aa5"},
{file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6fdef321fdfbb30a197efa02d48fcd9981f0d8ad2ae8903ac318adc653f5df98"},
{file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b0f6ccf3dbe577170bebfce1318707d0e8c3650003cb4b3a9dd744575daa8b5"},
{file = "coverage-7.13.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75fcd519f2a5765db3f0e391eb3b7d150cce1a771bf4c9f861aeab86c767a3c0"},
{file = "coverage-7.13.4-cp314-cp314-win32.whl", hash = "sha256:8e798c266c378da2bd819b0677df41ab46d78065fb2a399558f3f6cae78b2fbb"},
{file = "coverage-7.13.4-cp314-cp314-win_amd64.whl", hash = "sha256:245e37f664d89861cf2329c9afa2c1fe9e6d4e1a09d872c947e70718aeeac505"},
{file = "coverage-7.13.4-cp314-cp314-win_arm64.whl", hash = "sha256:ad27098a189e5838900ce4c2a99f2fe42a0bf0c2093c17c69b45a71579e8d4a2"},
{file = "coverage-7.13.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:85480adfb35ffc32d40918aad81b89c69c9cc5661a9b8a81476d3e645321a056"},
{file = "coverage-7.13.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:79be69cf7f3bf9b0deeeb062eab7ac7f36cd4cc4c4dd694bd28921ba4d8596cc"},
{file = "coverage-7.13.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:caa421e2684e382c5d8973ac55e4f36bed6821a9bad5c953494de960c74595c9"},
{file = "coverage-7.13.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:14375934243ee05f56c45393fe2ce81fe5cc503c07cee2bdf1725fb8bef3ffaf"},
{file = "coverage-7.13.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:25a41c3104d08edb094d9db0d905ca54d0cd41c928bb6be3c4c799a54753af55"},
{file = "coverage-7.13.4-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6f01afcff62bf9a08fb32b2c1d6e924236c0383c02c790732b6537269e466a72"},
{file = "coverage-7.13.4-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eb9078108fbf0bcdde37c3f4779303673c2fa1fe8f7956e68d447d0dd426d38a"},
{file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e086334e8537ddd17e5f16a344777c1ab8194986ec533711cbe6c41cde841b6"},
{file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:725d985c5ab621268b2edb8e50dfe57633dc69bda071abc470fed55a14935fd3"},
{file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3c06f0f1337c667b971ca2f975523347e63ec5e500b9aa5882d91931cd3ef750"},
{file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:590c0ed4bf8e85f745e6b805b2e1c457b2e33d5255dd9729743165253bc9ad39"},
{file = "coverage-7.13.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:eb30bf180de3f632cd043322dad5751390e5385108b2807368997d1a92a509d0"},
{file = "coverage-7.13.4-cp314-cp314t-win32.whl", hash = "sha256:c4240e7eded42d131a2d2c4dec70374b781b043ddc79a9de4d55ca71f8e98aea"},
{file = "coverage-7.13.4-cp314-cp314t-win_amd64.whl", hash = "sha256:4c7d3cc01e7350f2f0f6f7036caaf5673fb56b6998889ccfe9e1c1fe75a9c932"},
{file = "coverage-7.13.4-cp314-cp314t-win_arm64.whl", hash = "sha256:23e3f687cf945070d1c90f85db66d11e3025665d8dafa831301a0e0038f3db9b"},
{file = "coverage-7.13.4-py3-none-any.whl", hash = "sha256:1af1641e57cf7ba1bd67d677c9abdbcd6cc2ab7da3bca7fa1e2b7e50e65f2ad0"},
{file = "coverage-7.13.4.tar.gz", hash = "sha256:e5c8f6ed1e61a8b2dcdf31eb0b9bbf0130750ca79c1c49eb898e2ad86f5ccc91"},
{file = "coverage-7.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6a5c3414bfc7451b879141ce772c546985163cf553f08e0f135f0699a911801"},
{file = "coverage-7.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bc8e4d99ce82f1710cc3c125adc30fd1487d3cf6c2cd4994d78d68a47b16989a"},
{file = "coverage-7.10.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:02252dc1216e512a9311f596b3169fad54abcb13827a8d76d5630c798a50a754"},
{file = "coverage-7.10.5-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73269df37883e02d460bee0cc16be90509faea1e3bd105d77360b512d5bb9c33"},
{file = "coverage-7.10.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f8a81b0614642f91c9effd53eec284f965577591f51f547a1cbeb32035b4c2f"},
{file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6a29f8e0adb7f8c2b95fa2d4566a1d6e6722e0a637634c6563cb1ab844427dd9"},
{file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fcf6ab569436b4a647d4e91accba12509ad9f2554bc93d3aee23cc596e7f99c3"},
{file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:90dc3d6fb222b194a5de60af8d190bedeeddcbc7add317e4a3cd333ee6b7c879"},
{file = "coverage-7.10.5-cp310-cp310-win32.whl", hash = "sha256:414a568cd545f9dc75f0686a0049393de8098414b58ea071e03395505b73d7a8"},
{file = "coverage-7.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:e551f9d03347196271935fd3c0c165f0e8c049220280c1120de0084d65e9c7ff"},
{file = "coverage-7.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c177e6ffe2ebc7c410785307758ee21258aa8e8092b44d09a2da767834f075f2"},
{file = "coverage-7.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:14d6071c51ad0f703d6440827eaa46386169b5fdced42631d5a5ac419616046f"},
{file = "coverage-7.10.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:61f78c7c3bc272a410c5ae3fde7792b4ffb4acc03d35a7df73ca8978826bb7ab"},
{file = "coverage-7.10.5-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f39071caa126f69d63f99b324fb08c7b1da2ec28cbb1fe7b5b1799926492f65c"},
{file = "coverage-7.10.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343a023193f04d46edc46b2616cdbee68c94dd10208ecd3adc56fcc54ef2baa1"},
{file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:585ffe93ae5894d1ebdee69fc0b0d4b7c75d8007983692fb300ac98eed146f78"},
{file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0ef4e66f006ed181df29b59921bd8fc7ed7cd6a9289295cd8b2824b49b570df"},
{file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eb7b0bbf7cc1d0453b843eca7b5fa017874735bef9bfdfa4121373d2cc885ed6"},
{file = "coverage-7.10.5-cp311-cp311-win32.whl", hash = "sha256:1d043a8a06987cc0c98516e57c4d3fc2c1591364831e9deb59c9e1b4937e8caf"},
{file = "coverage-7.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:fefafcca09c3ac56372ef64a40f5fe17c5592fab906e0fdffd09543f3012ba50"},
{file = "coverage-7.10.5-cp311-cp311-win_arm64.whl", hash = "sha256:7e78b767da8b5fc5b2faa69bb001edafcd6f3995b42a331c53ef9572c55ceb82"},
{file = "coverage-7.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2d05c7e73c60a4cecc7d9b60dbfd603b4ebc0adafaef371445b47d0f805c8a9"},
{file = "coverage-7.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32ddaa3b2c509778ed5373b177eb2bf5662405493baeff52278a0b4f9415188b"},
{file = "coverage-7.10.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dd382410039fe062097aa0292ab6335a3f1e7af7bba2ef8d27dcda484918f20c"},
{file = "coverage-7.10.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7fa22800f3908df31cea6fb230f20ac49e343515d968cc3a42b30d5c3ebf9b5a"},
{file = "coverage-7.10.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f366a57ac81f5e12797136552f5b7502fa053c861a009b91b80ed51f2ce651c6"},
{file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1dc8f1980a272ad4a6c84cba7981792344dad33bf5869361576b7aef42733a"},
{file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2285c04ee8676f7938b02b4936d9b9b672064daab3187c20f73a55f3d70e6b4a"},
{file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c2492e4dd9daab63f5f56286f8a04c51323d237631eb98505d87e4c4ff19ec34"},
{file = "coverage-7.10.5-cp312-cp312-win32.whl", hash = "sha256:38a9109c4ee8135d5df5505384fc2f20287a47ccbe0b3f04c53c9a1989c2bbaf"},
{file = "coverage-7.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:6b87f1ad60b30bc3c43c66afa7db6b22a3109902e28c5094957626a0143a001f"},
{file = "coverage-7.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:672a6c1da5aea6c629819a0e1461e89d244f78d7b60c424ecf4f1f2556c041d8"},
{file = "coverage-7.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ef3b83594d933020f54cf65ea1f4405d1f4e41a009c46df629dd964fcb6e907c"},
{file = "coverage-7.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2b96bfdf7c0ea9faebce088a3ecb2382819da4fbc05c7b80040dbc428df6af44"},
{file = "coverage-7.10.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:63df1fdaffa42d914d5c4d293e838937638bf75c794cf20bee12978fc8c4e3bc"},
{file = "coverage-7.10.5-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8002dc6a049aac0e81ecec97abfb08c01ef0c1fbf962d0c98da3950ace89b869"},
{file = "coverage-7.10.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63d4bb2966d6f5f705a6b0c6784c8969c468dbc4bcf9d9ded8bff1c7e092451f"},
{file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1f672efc0731a6846b157389b6e6d5d5e9e59d1d1a23a5c66a99fd58339914d5"},
{file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3f39cef43d08049e8afc1fde4a5da8510fc6be843f8dea350ee46e2a26b2f54c"},
{file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2968647e3ed5a6c019a419264386b013979ff1fb67dd11f5c9886c43d6a31fc2"},
{file = "coverage-7.10.5-cp313-cp313-win32.whl", hash = "sha256:0d511dda38595b2b6934c2b730a1fd57a3635c6aa2a04cb74714cdfdd53846f4"},
{file = "coverage-7.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:9a86281794a393513cf117177fd39c796b3f8e3759bb2764259a2abba5cce54b"},
{file = "coverage-7.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:cebd8e906eb98bb09c10d1feed16096700b1198d482267f8bf0474e63a7b8d84"},
{file = "coverage-7.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0520dff502da5e09d0d20781df74d8189ab334a1e40d5bafe2efaa4158e2d9e7"},
{file = "coverage-7.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d9cd64aca68f503ed3f1f18c7c9174cbb797baba02ca8ab5112f9d1c0328cd4b"},
{file = "coverage-7.10.5-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0913dd1613a33b13c4f84aa6e3f4198c1a21ee28ccb4f674985c1f22109f0aae"},
{file = "coverage-7.10.5-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1b7181c0feeb06ed8a02da02792f42f829a7b29990fef52eff257fef0885d760"},
{file = "coverage-7.10.5-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36d42b7396b605f774d4372dd9c49bed71cbabce4ae1ccd074d155709dd8f235"},
{file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b4fdc777e05c4940b297bf47bf7eedd56a39a61dc23ba798e4b830d585486ca5"},
{file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:42144e8e346de44a6f1dbd0a56575dd8ab8dfa7e9007da02ea5b1c30ab33a7db"},
{file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:66c644cbd7aed8fe266d5917e2c9f65458a51cfe5eeff9c05f15b335f697066e"},
{file = "coverage-7.10.5-cp313-cp313t-win32.whl", hash = "sha256:2d1b73023854068c44b0c554578a4e1ef1b050ed07cf8b431549e624a29a66ee"},
{file = "coverage-7.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:54a1532c8a642d8cc0bd5a9a51f5a9dcc440294fd06e9dda55e743c5ec1a8f14"},
{file = "coverage-7.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:74d5b63fe3f5f5d372253a4ef92492c11a4305f3550631beaa432fc9df16fcff"},
{file = "coverage-7.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:68c5e0bc5f44f68053369fa0d94459c84548a77660a5f2561c5e5f1e3bed7031"},
{file = "coverage-7.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cf33134ffae93865e32e1e37df043bef15a5e857d8caebc0099d225c579b0fa3"},
{file = "coverage-7.10.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad8fa9d5193bafcf668231294241302b5e683a0518bf1e33a9a0dfb142ec3031"},
{file = "coverage-7.10.5-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:146fa1531973d38ab4b689bc764592fe6c2f913e7e80a39e7eeafd11f0ef6db2"},
{file = "coverage-7.10.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6013a37b8a4854c478d3219ee8bc2392dea51602dd0803a12d6f6182a0061762"},
{file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:eb90fe20db9c3d930fa2ad7a308207ab5b86bf6a76f54ab6a40be4012d88fcae"},
{file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:384b34482272e960c438703cafe63316dfbea124ac62006a455c8410bf2a2262"},
{file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:467dc74bd0a1a7de2bedf8deaf6811f43602cb532bd34d81ffd6038d6d8abe99"},
{file = "coverage-7.10.5-cp314-cp314-win32.whl", hash = "sha256:556d23d4e6393ca898b2e63a5bca91e9ac2d5fb13299ec286cd69a09a7187fde"},
{file = "coverage-7.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:f4446a9547681533c8fa3e3c6cf62121eeee616e6a92bd9201c6edd91beffe13"},
{file = "coverage-7.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:5e78bd9cf65da4c303bf663de0d73bf69f81e878bf72a94e9af67137c69b9fe9"},
{file = "coverage-7.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5661bf987d91ec756a47c7e5df4fbcb949f39e32f9334ccd3f43233bbb65e508"},
{file = "coverage-7.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a46473129244db42a720439a26984f8c6f834762fc4573616c1f37f13994b357"},
{file = "coverage-7.10.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1f64b8d3415d60f24b058b58d859e9512624bdfa57a2d1f8aff93c1ec45c429b"},
{file = "coverage-7.10.5-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:44d43de99a9d90b20e0163f9770542357f58860a26e24dc1d924643bd6aa7cb4"},
{file = "coverage-7.10.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a931a87e5ddb6b6404e65443b742cb1c14959622777f2a4efd81fba84f5d91ba"},
{file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9559b906a100029274448f4c8b8b0a127daa4dade5661dfd821b8c188058842"},
{file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b08801e25e3b4526ef9ced1aa29344131a8f5213c60c03c18fe4c6170ffa2874"},
{file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ed9749bb8eda35f8b636fb7632f1c62f735a236a5d4edadd8bbcc5ea0542e732"},
{file = "coverage-7.10.5-cp314-cp314t-win32.whl", hash = "sha256:609b60d123fc2cc63ccee6d17e4676699075db72d14ac3c107cc4976d516f2df"},
{file = "coverage-7.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:0666cf3d2c1626b5a3463fd5b05f5e21f99e6aec40a3192eee4d07a15970b07f"},
{file = "coverage-7.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:bc85eb2d35e760120540afddd3044a5bf69118a91a296a8b3940dfc4fdcfe1e2"},
{file = "coverage-7.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:62835c1b00c4a4ace24c1a88561a5a59b612fbb83a525d1c70ff5720c97c0610"},
{file = "coverage-7.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5255b3bbcc1d32a4069d6403820ac8e6dbcc1d68cb28a60a1ebf17e47028e898"},
{file = "coverage-7.10.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3876385722e335d6e991c430302c24251ef9c2a9701b2b390f5473199b1b8ebf"},
{file = "coverage-7.10.5-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8048ce4b149c93447a55d279078c8ae98b08a6951a3c4d2d7e87f4efc7bfe100"},
{file = "coverage-7.10.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4028e7558e268dd8bcf4d9484aad393cafa654c24b4885f6f9474bf53183a82a"},
{file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03f47dc870eec0367fcdd603ca6a01517d2504e83dc18dbfafae37faec66129a"},
{file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2d488d7d42b6ded7ea0704884f89dcabd2619505457de8fc9a6011c62106f6e5"},
{file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b3dcf2ead47fa8be14224ee817dfc1df98043af568fe120a22f81c0eb3c34ad2"},
{file = "coverage-7.10.5-cp39-cp39-win32.whl", hash = "sha256:02650a11324b80057b8c9c29487020073d5e98a498f1857f37e3f9b6ea1b2426"},
{file = "coverage-7.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:b45264dd450a10f9e03237b41a9a24e85cbb1e278e5a32adb1a303f58f0017f3"},
{file = "coverage-7.10.5-py3-none-any.whl", hash = "sha256:0be24d35e4db1d23d0db5c0f6a74a962e2ec83c426b5cac09f4234aadef38e4a"},
{file = "coverage-7.10.5.tar.gz", hash = "sha256:f2e57716a78bc3ae80b2207be0709a3b2b63b9f2dcf9740ee6ac03588a2015b6"},
]
[package.dependencies]
@@ -541,7 +523,7 @@ description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["main", "dev"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
{file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
@@ -2180,23 +2162,23 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "pytest-cov"
version = "7.0.0"
version = "6.2.1"
description = "Pytest plugin for measuring coverage."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861"},
{file = "pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1"},
{file = "pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5"},
{file = "pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2"},
]
[package.dependencies]
coverage = {version = ">=7.10.6", extras = ["toml"]}
coverage = {version = ">=7.5", extras = ["toml"]}
pluggy = ">=1.2"
pytest = ">=7"
pytest = ">=6.2.5"
[package.extras]
testing = ["process-tests", "pytest-xdist", "virtualenv"]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
[[package]]
name = "pytest-mock"
@@ -2563,7 +2545,7 @@ description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2911,4 +2893,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<4.0"
content-hash = "40eae94995dc0a388fa832ed4af9b6137f28d5b5ced3aaea70d5f91d4d9a179d"
content-hash = "b7ac335a86aa44c3d7d2802298818b389a6f1286e3e9b7b0edb2ff06377cecaf"

View File

@@ -26,7 +26,7 @@ pyright = "^1.1.408"
pytest = "^8.4.1"
pytest-asyncio = "^1.3.0"
pytest-mock = "^3.15.1"
pytest-cov = "^7.0.0"
pytest-cov = "^6.2.1"
ruff = "^0.15.0"
[build-system]

View File

@@ -93,12 +93,6 @@ class ChatConfig(BaseSettings):
description="Name of the prompt in Langfuse to fetch",
)
# Extended thinking configuration for Claude models
thinking_enabled: bool = Field(
default=True,
description="Enable adaptive thinking for Claude models via OpenRouter",
)
@field_validator("api_key", mode="before")
@classmethod
def get_api_key(cls, v):

View File

@@ -2,7 +2,7 @@ import asyncio
import logging
import uuid
from datetime import UTC, datetime
from typing import Any, cast
from typing import Any
from weakref import WeakValueDictionary
from openai.types.chat import (
@@ -104,26 +104,6 @@ class ChatSession(BaseModel):
successful_agent_runs: dict[str, int] = {}
successful_agent_schedules: dict[str, int] = {}
def add_tool_call_to_current_turn(self, tool_call: dict) -> None:
"""Attach a tool_call to the current turn's assistant message.
Searches backwards for the most recent assistant message (stopping at
any user message boundary). If found, appends the tool_call to it.
Otherwise creates a new assistant message with the tool_call.
"""
for msg in reversed(self.messages):
if msg.role == "user":
break
if msg.role == "assistant":
if not msg.tool_calls:
msg.tool_calls = []
msg.tool_calls.append(tool_call)
return
self.messages.append(
ChatMessage(role="assistant", content="", tool_calls=[tool_call])
)
@staticmethod
def new(user_id: str) -> "ChatSession":
return ChatSession(
@@ -192,47 +172,6 @@ class ChatSession(BaseModel):
successful_agent_schedules=successful_agent_schedules,
)
@staticmethod
def _merge_consecutive_assistant_messages(
messages: list[ChatCompletionMessageParam],
) -> list[ChatCompletionMessageParam]:
"""Merge consecutive assistant messages into single messages.
Long-running tool flows can create split assistant messages: one with
text content and another with tool_calls. Anthropic's API requires
tool_result blocks to reference a tool_use in the immediately preceding
assistant message, so these splits cause 400 errors via OpenRouter.
"""
if len(messages) < 2:
return messages
result: list[ChatCompletionMessageParam] = [messages[0]]
for msg in messages[1:]:
prev = result[-1]
if prev.get("role") != "assistant" or msg.get("role") != "assistant":
result.append(msg)
continue
prev = cast(ChatCompletionAssistantMessageParam, prev)
curr = cast(ChatCompletionAssistantMessageParam, msg)
curr_content = curr.get("content") or ""
if curr_content:
prev_content = prev.get("content") or ""
prev["content"] = (
f"{prev_content}\n{curr_content}" if prev_content else curr_content
)
curr_tool_calls = curr.get("tool_calls")
if curr_tool_calls:
prev_tool_calls = prev.get("tool_calls")
prev["tool_calls"] = (
list(prev_tool_calls) + list(curr_tool_calls)
if prev_tool_calls
else list(curr_tool_calls)
)
return result
def to_openai_messages(self) -> list[ChatCompletionMessageParam]:
messages = []
for message in self.messages:
@@ -319,7 +258,7 @@ class ChatSession(BaseModel):
name=message.name or "",
)
)
return self._merge_consecutive_assistant_messages(messages)
return messages
async def _get_session_from_cache(session_id: str) -> ChatSession | None:

View File

@@ -1,16 +1,4 @@
from typing import cast
import pytest
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_message_tool_call_param import (
ChatCompletionMessageToolCallParam,
Function,
)
from .model import (
ChatMessage,
@@ -129,205 +117,3 @@ async def test_chatsession_db_storage(setup_test_user, test_user_id):
loaded.tool_calls is not None
), f"Tool calls missing for {orig.role} message"
assert len(orig.tool_calls) == len(loaded.tool_calls)
# --------------------------------------------------------------------------- #
# _merge_consecutive_assistant_messages #
# --------------------------------------------------------------------------- #
_tc = ChatCompletionMessageToolCallParam(
id="tc1", type="function", function=Function(name="do_stuff", arguments="{}")
)
_tc2 = ChatCompletionMessageToolCallParam(
id="tc2", type="function", function=Function(name="other", arguments="{}")
)
def test_merge_noop_when_no_consecutive_assistants():
"""Messages without consecutive assistants are returned unchanged."""
msgs = [
ChatCompletionUserMessageParam(role="user", content="hi"),
ChatCompletionAssistantMessageParam(role="assistant", content="hello"),
ChatCompletionUserMessageParam(role="user", content="bye"),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
assert len(merged) == 3
assert [m["role"] for m in merged] == ["user", "assistant", "user"]
def test_merge_splits_text_and_tool_calls():
"""The exact bug scenario: text-only assistant followed by tool_calls-only assistant."""
msgs = [
ChatCompletionUserMessageParam(role="user", content="build agent"),
ChatCompletionAssistantMessageParam(
role="assistant", content="Let me build that"
),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc]
),
ChatCompletionToolMessageParam(role="tool", content="ok", tool_call_id="tc1"),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs)
assert len(merged) == 3
assert merged[0]["role"] == "user"
assert merged[2]["role"] == "tool"
a = cast(ChatCompletionAssistantMessageParam, merged[1])
assert a["role"] == "assistant"
assert a.get("content") == "Let me build that"
assert a.get("tool_calls") == [_tc]
def test_merge_combines_tool_calls_from_both():
"""Both consecutive assistants have tool_calls — they get merged."""
msgs: list[ChatCompletionAssistantMessageParam] = [
ChatCompletionAssistantMessageParam(
role="assistant", content="text", tool_calls=[_tc]
),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc2]
),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
assert len(merged) == 1
a = cast(ChatCompletionAssistantMessageParam, merged[0])
assert a.get("tool_calls") == [_tc, _tc2]
assert a.get("content") == "text"
def test_merge_three_consecutive_assistants():
"""Three consecutive assistants collapse into one."""
msgs: list[ChatCompletionAssistantMessageParam] = [
ChatCompletionAssistantMessageParam(role="assistant", content="a"),
ChatCompletionAssistantMessageParam(role="assistant", content="b"),
ChatCompletionAssistantMessageParam(
role="assistant", content="", tool_calls=[_tc]
),
]
merged = ChatSession._merge_consecutive_assistant_messages(msgs) # type: ignore[arg-type]
assert len(merged) == 1
a = cast(ChatCompletionAssistantMessageParam, merged[0])
assert a.get("content") == "a\nb"
assert a.get("tool_calls") == [_tc]
def test_merge_empty_and_single_message():
"""Edge cases: empty list and single message."""
assert ChatSession._merge_consecutive_assistant_messages([]) == []
single: list[ChatCompletionMessageParam] = [
ChatCompletionUserMessageParam(role="user", content="hi")
]
assert ChatSession._merge_consecutive_assistant_messages(single) == single
# --------------------------------------------------------------------------- #
# add_tool_call_to_current_turn #
# --------------------------------------------------------------------------- #
_raw_tc = {
"id": "tc1",
"type": "function",
"function": {"name": "f", "arguments": "{}"},
}
_raw_tc2 = {
"id": "tc2",
"type": "function",
"function": {"name": "g", "arguments": "{}"},
}
def test_add_tool_call_appends_to_existing_assistant():
"""When the last assistant is from the current turn, tool_call is added to it."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="working on it"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 2 # no new message created
assert session.messages[1].tool_calls == [_raw_tc]
def test_add_tool_call_creates_assistant_when_none_exists():
"""When there's no current-turn assistant, a new one is created."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 2
assert session.messages[1].role == "assistant"
assert session.messages[1].tool_calls == [_raw_tc]
def test_add_tool_call_does_not_cross_user_boundary():
"""A user message acts as a boundary — previous assistant is not modified."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="assistant", content="old turn"),
ChatMessage(role="user", content="new message"),
]
session.add_tool_call_to_current_turn(_raw_tc)
assert len(session.messages) == 3 # new assistant was created
assert session.messages[0].tool_calls is None # old assistant untouched
assert session.messages[2].role == "assistant"
assert session.messages[2].tool_calls == [_raw_tc]
def test_add_tool_call_multiple_times():
"""Multiple long-running tool calls accumulate on the same assistant."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="hi"),
ChatMessage(role="assistant", content="doing stuff"),
]
session.add_tool_call_to_current_turn(_raw_tc)
# Simulate a pending tool result in between (like _yield_tool_call does)
session.messages.append(
ChatMessage(role="tool", content="pending", tool_call_id="tc1")
)
session.add_tool_call_to_current_turn(_raw_tc2)
assert len(session.messages) == 3 # user, assistant, tool — no extra assistant
assert session.messages[1].tool_calls == [_raw_tc, _raw_tc2]
def test_to_openai_messages_merges_split_assistants():
"""End-to-end: session with split assistants produces valid OpenAI messages."""
session = ChatSession.new(user_id="u")
session.messages = [
ChatMessage(role="user", content="build agent"),
ChatMessage(role="assistant", content="Let me build that"),
ChatMessage(
role="assistant",
content="",
tool_calls=[
{
"id": "tc1",
"type": "function",
"function": {"name": "create_agent", "arguments": "{}"},
}
],
),
ChatMessage(role="tool", content="done", tool_call_id="tc1"),
ChatMessage(role="assistant", content="Saved!"),
ChatMessage(role="user", content="show me an example run"),
]
openai_msgs = session.to_openai_messages()
# The two consecutive assistants at index 1,2 should be merged
roles = [m["role"] for m in openai_msgs]
assert roles == ["user", "assistant", "tool", "assistant", "user"]
# The merged assistant should have both content and tool_calls
merged = cast(ChatCompletionAssistantMessageParam, openai_msgs[1])
assert merged.get("content") == "Let me build that"
tc_list = merged.get("tool_calls")
assert tc_list is not None and len(list(tc_list)) == 1
assert list(tc_list)[0]["id"] == "tc1"

View File

@@ -10,8 +10,6 @@ from typing import Any
from pydantic import BaseModel, Field
from backend.util.json import dumps as json_dumps
class ResponseType(str, Enum):
"""Types of streaming responses following AI SDK protocol."""
@@ -20,10 +18,6 @@ class ResponseType(str, Enum):
START = "start"
FINISH = "finish"
# Step lifecycle (one LLM API call within a message)
START_STEP = "start-step"
FINISH_STEP = "finish-step"
# Text streaming
TEXT_START = "text-start"
TEXT_DELTA = "text-delta"
@@ -63,16 +57,6 @@ class StreamStart(StreamBaseResponse):
description="Task ID for SSE reconnection. Clients can reconnect using GET /tasks/{taskId}/stream",
)
def to_sse(self) -> str:
"""Convert to SSE format, excluding non-protocol fields like taskId."""
import json
data: dict[str, Any] = {
"type": self.type.value,
"messageId": self.messageId,
}
return f"data: {json.dumps(data)}\n\n"
class StreamFinish(StreamBaseResponse):
"""End of message/stream."""
@@ -80,26 +64,6 @@ class StreamFinish(StreamBaseResponse):
type: ResponseType = ResponseType.FINISH
class StreamStartStep(StreamBaseResponse):
"""Start of a step (one LLM API call within a message).
The AI SDK uses this to add a step-start boundary to message.parts,
enabling visual separation between multiple LLM calls in a single message.
"""
type: ResponseType = ResponseType.START_STEP
class StreamFinishStep(StreamBaseResponse):
"""End of a step (one LLM API call within a message).
The AI SDK uses this to reset activeTextParts and activeReasoningParts,
so the next LLM call in a tool-call continuation starts with clean state.
"""
type: ResponseType = ResponseType.FINISH_STEP
# ========== Text Streaming ==========
@@ -153,7 +117,7 @@ class StreamToolOutputAvailable(StreamBaseResponse):
type: ResponseType = ResponseType.TOOL_OUTPUT_AVAILABLE
toolCallId: str = Field(..., description="Tool call ID this responds to")
output: str | dict[str, Any] = Field(..., description="Tool execution output")
# Keep these for internal backend use
# Additional fields for internal use (not part of AI SDK spec but useful)
toolName: str | None = Field(
default=None, description="Name of the tool that was executed"
)
@@ -161,17 +125,6 @@ class StreamToolOutputAvailable(StreamBaseResponse):
default=True, description="Whether the tool execution succeeded"
)
def to_sse(self) -> str:
"""Convert to SSE format, excluding non-spec fields."""
import json
data = {
"type": self.type.value,
"toolCallId": self.toolCallId,
"output": self.output,
}
return f"data: {json.dumps(data)}\n\n"
# ========== Other ==========
@@ -195,18 +148,6 @@ class StreamError(StreamBaseResponse):
default=None, description="Additional error details"
)
def to_sse(self) -> str:
"""Convert to SSE format, only emitting fields required by AI SDK protocol.
The AI SDK uses z.strictObject({type, errorText}) which rejects
any extra fields like `code` or `details`.
"""
data = {
"type": self.type.value,
"errorText": self.errorText,
}
return f"data: {json_dumps(data)}\n\n"
class StreamHeartbeat(StreamBaseResponse):
"""Heartbeat to keep SSE connection alive during long-running operations.

View File

@@ -6,7 +6,7 @@ from collections.abc import AsyncGenerator
from typing import Annotated
from autogpt_libs import auth
from fastapi import APIRouter, Depends, Header, HTTPException, Query, Response, Security
from fastapi import APIRouter, Depends, Header, HTTPException, Query, Security
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
@@ -17,29 +17,7 @@ from . import stream_registry
from .completion_handler import process_operation_failure, process_operation_success
from .config import ChatConfig
from .model import ChatSession, create_chat_session, get_chat_session, get_user_sessions
from .response_model import StreamFinish, StreamHeartbeat
from .tools.models import (
AgentDetailsResponse,
AgentOutputResponse,
AgentPreviewResponse,
AgentSavedResponse,
AgentsFoundResponse,
BlockListResponse,
BlockOutputResponse,
ClarificationNeededResponse,
DocPageResponse,
DocSearchResultsResponse,
ErrorResponse,
ExecutionStartedResponse,
InputValidationErrorResponse,
NeedLoginResponse,
NoResultsResponse,
OperationInProgressResponse,
OperationPendingResponse,
OperationStartedResponse,
SetupRequirementsResponse,
UnderstandingUpdatedResponse,
)
from .response_model import StreamFinish, StreamHeartbeat, StreamStart
config = ChatConfig()
@@ -291,6 +269,8 @@ async def stream_chat_post(
import time
stream_start_time = time.perf_counter()
# Base log metadata (task_id added after creation)
log_meta = {"component": "ChatStream", "session_id": session_id}
if user_id:
log_meta["user_id"] = user_id
@@ -348,6 +328,24 @@ async def stream_chat_post(
first_chunk_time, ttfc = None, None
chunk_count = 0
try:
# Emit a start event with task_id for reconnection
start_chunk = StreamStart(messageId=task_id, taskId=task_id)
await stream_registry.publish_chunk(task_id, start_chunk)
logger.info(
f"[TIMING] StreamStart published at {(time_module.perf_counter() - gen_start_time)*1000:.1f}ms",
extra={
"json_fields": {
**log_meta,
"elapsed_ms": (time_module.perf_counter() - gen_start_time)
* 1000,
}
},
)
logger.info(
"[TIMING] Calling stream_chat_completion",
extra={"json_fields": log_meta},
)
async for chunk in chat_service.stream_chat_completion(
session_id,
request.message,
@@ -355,7 +353,6 @@ async def stream_chat_post(
user_id=user_id,
session=session, # Pass pre-fetched session to avoid double-fetch
context=request.context,
_task_id=task_id, # Pass task_id so service emits start with taskId for reconnection
):
chunk_count += 1
if first_chunk_time is None:
@@ -391,6 +388,7 @@ async def stream_chat_post(
}
},
)
await stream_registry.mark_task_completed(task_id, "completed")
except Exception as e:
elapsed = time_module.perf_counter() - gen_start_time
@@ -430,13 +428,34 @@ async def stream_chat_post(
chunks_yielded = 0
try:
# Subscribe to the task stream (this replays existing messages + live updates)
subscribe_start = time_module.perf_counter()
logger.info(
"[TIMING] Calling subscribe_to_task",
extra={"json_fields": log_meta},
)
subscriber_queue = await stream_registry.subscribe_to_task(
task_id=task_id,
user_id=user_id,
last_message_id="0-0", # Get all messages from the beginning
)
subscribe_time = (time_module.perf_counter() - subscribe_start) * 1000
logger.info(
f"[TIMING] subscribe_to_task completed in {subscribe_time:.1f}ms, "
f"queue_ok={subscriber_queue is not None}",
extra={
"json_fields": {
**log_meta,
"duration_ms": subscribe_time,
"queue_obtained": subscriber_queue is not None,
}
},
)
if subscriber_queue is None:
logger.info(
"[TIMING] subscriber_queue is None, yielding finish",
extra={"json_fields": log_meta},
)
yield StreamFinish().to_sse()
yield "data: [DONE]\n\n"
return
@@ -448,7 +467,11 @@ async def stream_chat_post(
)
while True:
try:
queue_wait_start = time_module.perf_counter()
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0)
queue_wait_time = (
time_module.perf_counter() - queue_wait_start
) * 1000
chunks_yielded += 1
if not first_chunk_yielded:
@@ -456,12 +479,26 @@ async def stream_chat_post(
elapsed = time_module.perf_counter() - event_gen_start
logger.info(
f"[TIMING] FIRST CHUNK from queue at {elapsed:.2f}s, "
f"type={type(chunk).__name__}",
f"type={type(chunk).__name__}, "
f"wait={queue_wait_time:.1f}ms",
extra={
"json_fields": {
**log_meta,
"chunk_type": type(chunk).__name__,
"elapsed_ms": elapsed * 1000,
"queue_wait_ms": queue_wait_time,
}
},
)
elif chunks_yielded % 50 == 0:
logger.info(
f"[TIMING] Chunk #{chunks_yielded}, "
f"type={type(chunk).__name__}",
extra={
"json_fields": {
**log_meta,
"chunk_number": chunks_yielded,
"chunk_type": type(chunk).__name__,
}
},
)
@@ -484,6 +521,13 @@ async def stream_chat_post(
)
break
except asyncio.TimeoutError:
# Send heartbeat to keep connection alive
logger.info(
f"[TIMING] Heartbeat timeout, chunks_so_far={chunks_yielded}",
extra={
"json_fields": {**log_meta, "chunks_so_far": chunks_yielded}
},
)
yield StreamHeartbeat().to_sse()
except GeneratorExit:
@@ -548,90 +592,63 @@ async def stream_chat_post(
@router.get(
"/sessions/{session_id}/stream",
)
async def resume_session_stream(
async def stream_chat_get(
session_id: str,
message: Annotated[str, Query(min_length=1, max_length=10000)],
user_id: str | None = Depends(auth.get_user_id),
is_user_message: bool = Query(default=True),
):
"""
Resume an active stream for a session.
Stream chat responses for a session (GET - legacy endpoint).
Called by the AI SDK's ``useChat(resume: true)`` on page load.
Checks for an active (in-progress) task on the session and either replays
the full SSE stream or returns 204 No Content if nothing is running.
Streams the AI/completion responses in real time over Server-Sent Events (SSE), including:
- Text fragments as they are generated
- Tool call UI elements (if invoked)
- Tool execution results
Args:
session_id: The chat session identifier.
session_id: The chat session identifier to associate with the streamed messages.
message: The user's new message to process.
user_id: Optional authenticated user ID.
is_user_message: Whether the message is a user message.
Returns:
StreamingResponse (SSE) when an active stream exists,
or 204 No Content when there is nothing to resume.
StreamingResponse: SSE-formatted response chunks.
"""
import asyncio
active_task, _last_id = await stream_registry.get_active_task_for_session(
session_id, user_id
)
if not active_task:
return Response(status_code=204)
subscriber_queue = await stream_registry.subscribe_to_task(
task_id=active_task.task_id,
user_id=user_id,
last_message_id="0-0", # Full replay so useChat rebuilds the message
)
if subscriber_queue is None:
return Response(status_code=204)
session = await _validate_and_get_session(session_id, user_id)
async def event_generator() -> AsyncGenerator[str, None]:
chunk_count = 0
first_chunk_type: str | None = None
try:
while True:
try:
chunk = await asyncio.wait_for(subscriber_queue.get(), timeout=30.0)
if chunk_count < 3:
logger.info(
"Resume stream chunk",
extra={
"session_id": session_id,
"chunk_type": str(chunk.type),
},
)
if not first_chunk_type:
first_chunk_type = str(chunk.type)
chunk_count += 1
yield chunk.to_sse()
if isinstance(chunk, StreamFinish):
break
except asyncio.TimeoutError:
yield StreamHeartbeat().to_sse()
except GeneratorExit:
pass
except Exception as e:
logger.error(f"Error in resume stream for session {session_id}: {e}")
finally:
try:
await stream_registry.unsubscribe_from_task(
active_task.task_id, subscriber_queue
async for chunk in chat_service.stream_chat_completion(
session_id,
message,
is_user_message=is_user_message,
user_id=user_id,
session=session, # Pass pre-fetched session to avoid double-fetch
):
if chunk_count < 3:
logger.info(
"Chat stream chunk",
extra={
"session_id": session_id,
"chunk_type": str(chunk.type),
},
)
except Exception as unsub_err:
logger.error(
f"Error unsubscribing from task {active_task.task_id}: {unsub_err}",
exc_info=True,
)
logger.info(
"Resume stream completed",
extra={
"session_id": session_id,
"n_chunks": chunk_count,
"first_chunk_type": first_chunk_type,
},
)
yield "data: [DONE]\n\n"
if not first_chunk_type:
first_chunk_type = str(chunk.type)
chunk_count += 1
yield chunk.to_sse()
logger.info(
"Chat stream completed",
extra={
"session_id": session_id,
"n_chunks": chunk_count,
"first_chunk_type": first_chunk_type,
},
)
# AI SDK protocol termination
yield "data: [DONE]\n\n"
return StreamingResponse(
event_generator(),
@@ -639,8 +656,8 @@ async def resume_session_stream(
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
"x-vercel-ai-ui-message-stream": "v1",
"X-Accel-Buffering": "no", # Disable nginx buffering
"x-vercel-ai-ui-message-stream": "v1", # AI SDK protocol header
},
)
@@ -952,42 +969,3 @@ async def health_check() -> dict:
"service": "chat",
"version": "0.1.0",
}
# ========== Schema Export (for OpenAPI / Orval codegen) ==========
ToolResponseUnion = (
AgentsFoundResponse
| NoResultsResponse
| AgentDetailsResponse
| SetupRequirementsResponse
| ExecutionStartedResponse
| NeedLoginResponse
| ErrorResponse
| InputValidationErrorResponse
| AgentOutputResponse
| UnderstandingUpdatedResponse
| AgentPreviewResponse
| AgentSavedResponse
| ClarificationNeededResponse
| BlockListResponse
| BlockOutputResponse
| DocSearchResultsResponse
| DocPageResponse
| OperationStartedResponse
| OperationPendingResponse
| OperationInProgressResponse
)
@router.get(
"/schema/tool-responses",
response_model=ToolResponseUnion,
include_in_schema=True,
summary="[Dummy] Tool response type export for codegen",
description="This endpoint is not meant to be called. It exists solely to "
"expose tool response models in the OpenAPI schema for frontend codegen.",
)
async def _tool_response_schema() -> ToolResponseUnion: # type: ignore[return]
"""Never called at runtime. Exists only so Orval generates TS types."""
raise HTTPException(status_code=501, detail="Schema-only endpoint")

View File

@@ -52,10 +52,8 @@ from .response_model import (
StreamBaseResponse,
StreamError,
StreamFinish,
StreamFinishStep,
StreamHeartbeat,
StreamStart,
StreamStartStep,
StreamTextDelta,
StreamTextEnd,
StreamTextStart,
@@ -353,10 +351,6 @@ async def stream_chat_completion(
retry_count: int = 0,
session: ChatSession | None = None,
context: dict[str, str] | None = None, # {url: str, content: str}
_continuation_message_id: (
str | None
) = None, # Internal: reuse message ID for tool call continuations
_task_id: str | None = None, # Internal: task ID for SSE reconnection support
) -> AsyncGenerator[StreamBaseResponse, None]:
"""Main entry point for streaming chat completions with database handling.
@@ -523,21 +517,16 @@ async def stream_chat_completion(
# Generate unique IDs for AI SDK protocol
import uuid as uuid_module
is_continuation = _continuation_message_id is not None
message_id = _continuation_message_id or str(uuid_module.uuid4())
message_id = str(uuid_module.uuid4())
text_block_id = str(uuid_module.uuid4())
# Only yield message start for the initial call, not for continuations.
# Yield message start
setup_time = (time.monotonic() - completion_start) * 1000
logger.info(
f"[TIMING] Setup complete, yielding StreamStart at {setup_time:.1f}ms",
extra={"json_fields": {**log_meta, "setup_time_ms": setup_time}},
)
if not is_continuation:
yield StreamStart(messageId=message_id, taskId=_task_id)
# Emit start-step before each LLM call (AI SDK uses this to add step boundaries)
yield StreamStartStep()
yield StreamStart(messageId=message_id)
try:
logger.info(
@@ -643,10 +632,6 @@ async def stream_chat_completion(
)
yield chunk
elif isinstance(chunk, StreamFinish):
if has_done_tool_call:
# Tool calls happened — close the step but don't send message-level finish.
# The continuation will open a new step, and finish will come at the end.
yield StreamFinishStep()
if not has_done_tool_call:
# Emit text-end before finish if we received text but haven't closed it
if has_received_text and not text_streaming_ended:
@@ -678,8 +663,6 @@ async def stream_chat_completion(
has_saved_assistant_message = True
has_yielded_end = True
# Emit finish-step before finish (resets AI SDK text/reasoning state)
yield StreamFinishStep()
yield chunk
elif isinstance(chunk, StreamError):
has_yielded_error = True
@@ -729,10 +712,6 @@ async def stream_chat_completion(
logger.info(
f"Retryable error encountered. Attempt {retry_count + 1}/{config.max_retries}"
)
# Close the current step before retrying so the recursive call's
# StreamStartStep doesn't produce unbalanced step events.
if not has_yielded_end:
yield StreamFinishStep()
should_retry = True
else:
# Non-retryable error or max retries exceeded
@@ -768,7 +747,6 @@ async def stream_chat_completion(
error_response = StreamError(errorText=error_message)
yield error_response
if not has_yielded_end:
yield StreamFinishStep()
yield StreamFinish()
return
@@ -783,8 +761,6 @@ async def stream_chat_completion(
retry_count=retry_count + 1,
session=session,
context=context,
_continuation_message_id=message_id, # Reuse message ID since start was already sent
_task_id=_task_id,
):
yield chunk
return # Exit after retry to avoid double-saving in finally block
@@ -800,13 +776,9 @@ async def stream_chat_completion(
# Build the messages list in the correct order
messages_to_save: list[ChatMessage] = []
# Add assistant message with tool_calls if any.
# Use extend (not assign) to preserve tool_calls already added by
# _yield_tool_call for long-running tools.
# Add assistant message with tool_calls if any
if accumulated_tool_calls:
if not assistant_response.tool_calls:
assistant_response.tool_calls = []
assistant_response.tool_calls.extend(accumulated_tool_calls)
assistant_response.tool_calls = accumulated_tool_calls
logger.info(
f"Added {len(accumulated_tool_calls)} tool calls to assistant message"
)
@@ -858,8 +830,6 @@ async def stream_chat_completion(
session=session, # Pass session object to avoid Redis refetch
context=context,
tool_call_response=str(tool_response_messages),
_continuation_message_id=message_id, # Reuse message ID to avoid duplicates
_task_id=_task_id,
):
yield chunk
@@ -1070,10 +1040,6 @@ async def _stream_chat_chunks(
:128
] # OpenRouter limit
# Enable adaptive thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in model.lower():
extra_body["reasoning"] = {"enabled": True}
api_call_start = time_module.perf_counter()
stream = await client.chat.completions.create(
model=model,
@@ -1408,9 +1374,13 @@ async def _yield_tool_call(
operation_id=operation_id,
)
# Attach the tool_call to the current turn's assistant message
# (or create one if this is a tool-only response with no text).
session.add_tool_call_to_current_turn(tool_calls[yield_idx])
# Save assistant message with tool_call FIRST (required by LLM)
assistant_message = ChatMessage(
role="assistant",
content="",
tool_calls=[tool_calls[yield_idx]],
)
session.messages.append(assistant_message)
# Then save pending tool result
pending_message = ChatMessage(
@@ -1716,7 +1686,6 @@ async def _execute_long_running_tool_with_streaming(
task_id,
StreamError(errorText=str(e)),
)
await stream_registry.publish_chunk(task_id, StreamFinishStep())
await stream_registry.publish_chunk(task_id, StreamFinish())
await _update_pending_operation(
@@ -1833,10 +1802,6 @@ async def _generate_llm_continuation(
if session_id:
extra_body["session_id"] = session_id[:128]
# Enable adaptive thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in config.model.lower():
extra_body["reasoning"] = {"enabled": True}
retry_count = 0
last_error: Exception | None = None
response = None
@@ -1967,10 +1932,6 @@ async def _generate_llm_continuation_with_streaming(
if session_id:
extra_body["session_id"] = session_id[:128]
# Enable adaptive thinking for Anthropic models via OpenRouter
if config.thinking_enabled and "anthropic" in config.model.lower():
extra_body["reasoning"] = {"enabled": True}
# Make streaming LLM call (no tools - just text response)
from typing import cast
@@ -1982,7 +1943,6 @@ async def _generate_llm_continuation_with_streaming(
# Publish start event
await stream_registry.publish_chunk(task_id, StreamStart(messageId=message_id))
await stream_registry.publish_chunk(task_id, StreamStartStep())
await stream_registry.publish_chunk(task_id, StreamTextStart(id=text_block_id))
# Stream the response
@@ -2006,7 +1966,6 @@ async def _generate_llm_continuation_with_streaming(
# Publish end events
await stream_registry.publish_chunk(task_id, StreamTextEnd(id=text_block_id))
await stream_registry.publish_chunk(task_id, StreamFinishStep())
if assistant_content:
# Reload session from DB to avoid race condition with user messages
@@ -2048,5 +2007,4 @@ async def _generate_llm_continuation_with_streaming(
task_id,
StreamError(errorText=f"Failed to generate response: {e}"),
)
await stream_registry.publish_chunk(task_id, StreamFinishStep())
await stream_registry.publish_chunk(task_id, StreamFinish())

View File

@@ -857,10 +857,8 @@ def _reconstruct_chunk(chunk_data: dict) -> StreamBaseResponse | None:
ResponseType,
StreamError,
StreamFinish,
StreamFinishStep,
StreamHeartbeat,
StreamStart,
StreamStartStep,
StreamTextDelta,
StreamTextEnd,
StreamTextStart,
@@ -874,8 +872,6 @@ def _reconstruct_chunk(chunk_data: dict) -> StreamBaseResponse | None:
type_to_class: dict[str, type[StreamBaseResponse]] = {
ResponseType.START.value: StreamStart,
ResponseType.FINISH.value: StreamFinish,
ResponseType.START_STEP.value: StreamStartStep,
ResponseType.FINISH_STEP.value: StreamFinishStep,
ResponseType.TEXT_START.value: StreamTextStart,
ResponseType.TEXT_DELTA.value: StreamTextDelta,
ResponseType.TEXT_END.value: StreamTextEnd,

View File

@@ -118,7 +118,7 @@ def build_missing_credentials_from_graph(
preserving all supported credential types for each field.
"""
matched_keys = set(matched_credentials.keys()) if matched_credentials else set()
aggregated_fields = graph.aggregate_credentials_inputs()
aggregated_fields = graph.regular_credentials_inputs
return {
field_key: _serialize_missing_credential(field_key, field_info)
@@ -338,7 +338,7 @@ async def match_user_credentials_to_graph(
missing_creds: list[str] = []
# Get aggregated credentials requirements from the graph
aggregated_creds = graph.aggregate_credentials_inputs()
aggregated_creds = graph.regular_credentials_inputs
logger.debug(
f"Matching credentials for graph {graph.id}: {len(aggregated_creds)} required"
)

View File

@@ -0,0 +1,78 @@
"""Tests for chat tools utility functions."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from backend.data.model import CredentialsFieldInfo
def _make_regular_field() -> CredentialsFieldInfo:
return CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
def test_build_missing_credentials_excludes_auto_creds():
"""
build_missing_credentials_from_graph() should use regular_credentials_inputs
and thus exclude auto_credentials from the "missing" set.
"""
from backend.api.features.chat.tools.utils import (
build_missing_credentials_from_graph,
)
regular_field = _make_regular_field()
mock_graph = MagicMock()
# regular_credentials_inputs should only return the non-auto field
mock_graph.regular_credentials_inputs = {
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
}
result = build_missing_credentials_from_graph(mock_graph, matched_credentials=None)
# Should include the regular credential
assert "github_api_key" in result
# Should NOT include the auto_credential (not in regular_credentials_inputs)
assert "google_oauth2" not in result
@pytest.mark.asyncio
async def test_match_user_credentials_excludes_auto_creds():
"""
match_user_credentials_to_graph() should use regular_credentials_inputs
and thus exclude auto_credentials from matching.
"""
from backend.api.features.chat.tools.utils import match_user_credentials_to_graph
regular_field = _make_regular_field()
mock_graph = MagicMock()
mock_graph.id = "test-graph"
# regular_credentials_inputs returns only non-auto fields
mock_graph.regular_credentials_inputs = {
"github_api_key": (regular_field, {("node-1", "credentials")}, True),
}
# Mock the credentials manager to return no credentials
with patch(
"backend.api.features.chat.tools.utils.IntegrationCredentialsManager"
) as MockCredsMgr:
mock_store = AsyncMock()
mock_store.get_all_creds.return_value = []
MockCredsMgr.return_value.store = mock_store
matched, missing = await match_user_credentials_to_graph(
user_id="test-user", graph=mock_graph
)
# No credentials available, so github should be missing
assert len(matched) == 0
assert len(missing) == 1
assert "github_api_key" in missing[0]

View File

@@ -1103,7 +1103,7 @@ async def create_preset_from_graph_execution(
raise NotFoundError(
f"Graph #{graph_execution.graph_id} not found or accessible"
)
elif len(graph.aggregate_credentials_inputs()) > 0:
elif len(graph.regular_credentials_inputs) > 0:
raise ValueError(
f"Graph execution #{graph_exec_id} can't be turned into a preset "
"because it was run before this feature existed "

View File

@@ -8,7 +8,6 @@ Includes BM25 reranking for improved lexical relevance.
import logging
import re
import time
from dataclasses import dataclass
from typing import Any, Literal
@@ -363,11 +362,7 @@ async def unified_hybrid_search(
LIMIT {limit_param} OFFSET {offset_param}
"""
try:
results = await query_raw_with_schema(sql_query, *params)
except Exception as e:
await _log_vector_error_diagnostics(e)
raise
results = await query_raw_with_schema(sql_query, *params)
total = results[0]["total_count"] if results else 0
# Apply BM25 reranking
@@ -691,11 +686,7 @@ async def hybrid_search(
LIMIT {limit_param} OFFSET {offset_param}
"""
try:
results = await query_raw_with_schema(sql_query, *params)
except Exception as e:
await _log_vector_error_diagnostics(e)
raise
results = await query_raw_with_schema(sql_query, *params)
total = results[0]["total_count"] if results else 0
@@ -727,87 +718,6 @@ async def hybrid_search_simple(
return await hybrid_search(query=query, page=page, page_size=page_size)
# ============================================================================
# Diagnostics
# ============================================================================
# Rate limit: only log vector error diagnostics once per this interval
_VECTOR_DIAG_INTERVAL_SECONDS = 60
_last_vector_diag_time: float = 0
async def _log_vector_error_diagnostics(error: Exception) -> None:
"""Log diagnostic info when 'type vector does not exist' error occurs.
Note: Diagnostic queries use query_raw_with_schema which may run on a different
pooled connection than the one that failed. Session-level search_path can differ,
so these diagnostics show cluster-wide state, not necessarily the failed session.
Includes rate limiting to avoid log spam - only logs once per minute.
Caller should re-raise the error after calling this function.
"""
global _last_vector_diag_time
# Check if this is the vector type error
error_str = str(error).lower()
if not (
"type" in error_str and "vector" in error_str and "does not exist" in error_str
):
return
# Rate limit: only log once per interval
now = time.time()
if now - _last_vector_diag_time < _VECTOR_DIAG_INTERVAL_SECONDS:
return
_last_vector_diag_time = now
try:
diagnostics: dict[str, object] = {}
try:
search_path_result = await query_raw_with_schema("SHOW search_path")
diagnostics["search_path"] = search_path_result
except Exception as e:
diagnostics["search_path"] = f"Error: {e}"
try:
schema_result = await query_raw_with_schema("SELECT current_schema()")
diagnostics["current_schema"] = schema_result
except Exception as e:
diagnostics["current_schema"] = f"Error: {e}"
try:
user_result = await query_raw_with_schema(
"SELECT current_user, session_user, current_database()"
)
diagnostics["user_info"] = user_result
except Exception as e:
diagnostics["user_info"] = f"Error: {e}"
try:
# Check pgvector extension installation (cluster-wide, stable info)
ext_result = await query_raw_with_schema(
"SELECT extname, extversion, nspname as schema "
"FROM pg_extension e "
"JOIN pg_namespace n ON e.extnamespace = n.oid "
"WHERE extname = 'vector'"
)
diagnostics["pgvector_extension"] = ext_result
except Exception as e:
diagnostics["pgvector_extension"] = f"Error: {e}"
logger.error(
f"Vector type error diagnostics:\n"
f" Error: {error}\n"
f" search_path: {diagnostics.get('search_path')}\n"
f" current_schema: {diagnostics.get('current_schema')}\n"
f" user_info: {diagnostics.get('user_info')}\n"
f" pgvector_extension: {diagnostics.get('pgvector_extension')}"
)
except Exception as diag_error:
logger.error(f"Failed to collect vector error diagnostics: {diag_error}")
# Backward compatibility alias - HybridSearchWeights maps to StoreAgentSearchWeights
# for existing code that expects the popularity parameter
HybridSearchWeights = StoreAgentSearchWeights

View File

@@ -21,71 +21,43 @@ logger = logging.getLogger(__name__)
class HumanInTheLoopBlock(Block):
"""
Pauses execution and waits for human approval or rejection of the data.
This block pauses execution and waits for human approval or modification of the data.
When executed, this block creates a pending review entry and sets the node execution
status to REVIEW. The execution remains paused until a human user either approves
or rejects the data.
When executed, it creates a pending review entry and sets the node execution status
to REVIEW. The execution will remain paused until a human user either:
- Approves the data (with or without modifications)
- Rejects the data
**How it works:**
- The input data is presented to a human reviewer
- The reviewer can approve or reject (and optionally modify the data if editable)
- On approval: the data flows out through the `approved_data` output pin
- On rejection: the data flows out through the `rejected_data` output pin
**Important:** The output pins yield the actual data itself, NOT status strings.
The approval/rejection decision determines WHICH output pin fires, not the value.
You do NOT need to compare the output to "APPROVED" or "REJECTED" - simply connect
downstream blocks to the appropriate output pin for each case.
**Example usage:**
- Connect `approved_data` → next step in your workflow (data was approved)
- Connect `rejected_data` → error handling or notification (data was rejected)
This is useful for workflows that require human validation or intervention before
proceeding to the next steps.
"""
class Input(BlockSchemaInput):
data: Any = SchemaField(
description="The data to be reviewed by a human user. "
"This exact data will be passed through to either approved_data or "
"rejected_data output based on the reviewer's decision."
)
data: Any = SchemaField(description="The data to be reviewed by a human user")
name: str = SchemaField(
description="A descriptive name for what this data represents. "
"This helps the reviewer understand what they are reviewing.",
description="A descriptive name for what this data represents",
)
editable: bool = SchemaField(
description="Whether the human reviewer can edit the data before "
"approving or rejecting it",
description="Whether the human reviewer can edit the data",
default=True,
advanced=True,
)
class Output(BlockSchemaOutput):
approved_data: Any = SchemaField(
description="Outputs the input data when the reviewer APPROVES it. "
"The value is the actual data itself (not a status string like 'APPROVED'). "
"If the reviewer edited the data, this contains the modified version. "
"Connect downstream blocks here for the 'approved' workflow path."
description="The data when approved (may be modified by reviewer)"
)
rejected_data: Any = SchemaField(
description="Outputs the input data when the reviewer REJECTS it. "
"The value is the actual data itself (not a status string like 'REJECTED'). "
"If the reviewer edited the data, this contains the modified version. "
"Connect downstream blocks here for the 'rejected' workflow path."
description="The data when rejected (may be modified by reviewer)"
)
review_message: str = SchemaField(
description="Optional message provided by the reviewer explaining their "
"decision. Only outputs when the reviewer provides a message; "
"this pin does not fire if no message was given.",
default="",
description="Any message provided by the reviewer", default=""
)
def __init__(self):
super().__init__(
id="8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d",
description="Pause execution for human review. Data flows through "
"approved_data or rejected_data output based on the reviewer's decision. "
"Outputs contain the actual data, not status strings.",
description="Pause execution and wait for human approval or modification of data",
categories={BlockCategory.BASIC},
input_schema=HumanInTheLoopBlock.Input,
output_schema=HumanInTheLoopBlock.Output,

View File

@@ -319,6 +319,8 @@ class BlockSchema(BaseModel):
"credentials_provider": [config.get("provider", "google")],
"credentials_types": [config.get("type", "oauth2")],
"credentials_scopes": config.get("scopes"),
"is_auto_credential": True,
"input_field_name": info["field_name"],
}
result[kwarg_name] = CredentialsFieldInfo.model_validate(
auto_schema, by_alias=True

View File

@@ -447,8 +447,7 @@ class GraphModel(Graph, GraphMeta):
@computed_field
@property
def credentials_input_schema(self) -> dict[str, Any]:
graph_credentials_inputs = self.aggregate_credentials_inputs()
graph_credentials_inputs = self.regular_credentials_inputs
logger.debug(
f"Combined credentials input fields for graph #{self.id} ({self.name}): "
f"{graph_credentials_inputs}"
@@ -604,6 +603,28 @@ class GraphModel(Graph, GraphMeta):
for key, (field_info, node_field_pairs) in combined.items()
}
@property
def regular_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""Credentials that need explicit user mapping (CredentialsMetaInput fields)."""
return {
k: v
for k, v in self.aggregate_credentials_inputs().items()
if not v[0].is_auto_credential
}
@property
def auto_credentials_inputs(
self,
) -> dict[str, tuple[CredentialsFieldInfo, set[tuple[str, str]], bool]]:
"""Credentials embedded in file fields (_credentials_id), resolved at execution time."""
return {
k: v
for k, v in self.aggregate_credentials_inputs().items()
if v[0].is_auto_credential
}
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
"""
Reassigns all IDs in the graph to new UUIDs.
@@ -654,6 +675,16 @@ class GraphModel(Graph, GraphMeta):
) and graph_id in graph_id_map:
node.input_default["graph_id"] = graph_id_map[graph_id]
# Clear auto-credentials references (e.g., _credentials_id in
# GoogleDriveFile fields) so the new user must re-authenticate
# with their own account
for node in graph.nodes:
if not node.input_default:
continue
for key, value in node.input_default.items():
if isinstance(value, dict) and "_credentials_id" in value:
del value["_credentials_id"]
def validate_graph(
self,
for_run: bool = False,
@@ -743,11 +774,6 @@ class GraphModel(Graph, GraphMeta):
# For invalid blocks, we still raise immediately as this is a structural issue
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
if block.disabled:
raise ValueError(
f"Block {node.block_id} is disabled and cannot be used in graphs"
)
node_input_mask = (
nodes_input_masks.get(node.id, {}) if nodes_input_masks else {}
)

View File

@@ -463,3 +463,329 @@ def test_node_credentials_optional_with_other_metadata():
assert node.credentials_optional is True
assert node.metadata["position"] == {"x": 100, "y": 200}
assert node.metadata["customized_name"] == "My Custom Node"
# ============================================================================
# Tests for CredentialsFieldInfo.combine() field propagation
def test_combine_preserves_is_auto_credential_flag():
"""
CredentialsFieldInfo.combine() must propagate is_auto_credential and
input_field_name to the combined result. Regression test for reviewer
finding that combine() dropped these fields.
"""
from backend.data.model import CredentialsFieldInfo
auto_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google"],
"credentials_types": ["oauth2"],
"credentials_scopes": ["drive.readonly"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
},
by_alias=True,
)
# combine() takes *args of (field_info, key) tuples
combined = CredentialsFieldInfo.combine(
(auto_field, ("node-1", "credentials")),
(auto_field, ("node-2", "credentials")),
)
assert len(combined) == 1
group_key = next(iter(combined))
combined_info, combined_keys = combined[group_key]
assert combined_info.is_auto_credential is True
assert combined_info.input_field_name == "spreadsheet"
assert combined_keys == {("node-1", "credentials"), ("node-2", "credentials")}
def test_combine_preserves_regular_credential_defaults():
"""Regular credentials should have is_auto_credential=False after combine()."""
from backend.data.model import CredentialsFieldInfo
regular_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
combined = CredentialsFieldInfo.combine(
(regular_field, ("node-1", "credentials")),
)
group_key = next(iter(combined))
combined_info, _ = combined[group_key]
assert combined_info.is_auto_credential is False
assert combined_info.input_field_name is None
# ============================================================================
# Tests for _reassign_ids credential clearing (Fix 3: SECRT-1772)
def test_reassign_ids_clears_credentials_id():
"""
[SECRT-1772] _reassign_ids should clear _credentials_id from
GoogleDriveFile-style input_default fields so forked agents
don't retain the original creator's credential references.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "original-cred-id",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/file-123",
},
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
# _credentials_id key should be removed (not set to None) so that
# _acquire_auto_credentials correctly errors instead of treating it as chained data
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
def test_reassign_ids_preserves_non_credential_fields():
"""
Regression guard: _reassign_ids should NOT modify non-credential fields
like name, mimeType, id, url.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "cred-abc",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
"url": "https://docs.google.com/spreadsheets/d/file-123",
},
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
field = graph.nodes[0].input_default["spreadsheet"]
assert field["id"] == "file-123"
assert field["name"] == "test.xlsx"
assert field["mimeType"] == "application/vnd.google-apps.spreadsheet"
assert field["url"] == "https://docs.google.com/spreadsheets/d/file-123"
def test_reassign_ids_handles_no_credentials():
"""
Regression guard: _reassign_ids should not error when input_default
has no dict fields with _credentials_id.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"input": "some value",
"another_input": 42,
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
# Should not error, fields unchanged
assert graph.nodes[0].input_default["input"] == "some value"
assert graph.nodes[0].input_default["another_input"] == 42
def test_reassign_ids_handles_multiple_credential_fields():
"""
[SECRT-1772] When a node has multiple dict fields with _credentials_id,
ALL of them should be cleared.
"""
from backend.data.graph import GraphModel
node = Node(
id="node-1",
block_id=StoreValueBlock().id,
input_default={
"spreadsheet": {
"_credentials_id": "cred-1",
"id": "file-1",
"name": "file1.xlsx",
},
"doc_file": {
"_credentials_id": "cred-2",
"id": "file-2",
"name": "file2.docx",
},
"plain_input": "not a dict",
},
)
graph = Graph(
id="test-graph",
name="Test",
description="Test",
nodes=[node],
links=[],
)
GraphModel._reassign_ids(graph, user_id="new-user", graph_id_map={})
assert "_credentials_id" not in graph.nodes[0].input_default["spreadsheet"]
assert "_credentials_id" not in graph.nodes[0].input_default["doc_file"]
assert graph.nodes[0].input_default["plain_input"] == "not a dict"
# ============================================================================
# Tests for discriminate() field propagation
def test_discriminate_preserves_is_auto_credential_flag():
"""
CredentialsFieldInfo.discriminate() must propagate is_auto_credential and
input_field_name to the discriminated result. Regression test for
discriminate() dropping these fields (same class of bug as combine()).
"""
from backend.data.model import CredentialsFieldInfo
auto_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google", "openai"],
"credentials_types": ["oauth2"],
"credentials_scopes": ["drive.readonly"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
"discriminator": "model",
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
},
by_alias=True,
)
discriminated = auto_field.discriminate("gemini")
assert discriminated.is_auto_credential is True
assert discriminated.input_field_name == "spreadsheet"
assert discriminated.provider == frozenset(["google"])
def test_discriminate_preserves_regular_credential_defaults():
"""Regular credentials should have is_auto_credential=False after discriminate()."""
from backend.data.model import CredentialsFieldInfo
regular_field = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google", "openai"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
"discriminator": "model",
"discriminator_mapping": {"gpt-4": "openai", "gemini": "google"},
},
by_alias=True,
)
discriminated = regular_field.discriminate("gpt-4")
assert discriminated.is_auto_credential is False
assert discriminated.input_field_name is None
assert discriminated.provider == frozenset(["openai"])
# ============================================================================
# Tests for credentials_input_schema excluding auto_credentials
def test_credentials_input_schema_excludes_auto_creds():
"""
GraphModel.credentials_input_schema should exclude auto_credentials
(is_auto_credential=True) from the schema. Auto_credentials are
transparently resolved at execution time via file picker data.
"""
from datetime import datetime, timezone
from unittest.mock import PropertyMock, patch
from backend.data.graph import GraphModel, NodeModel
from backend.data.model import CredentialsFieldInfo
regular_field_info = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
graph = GraphModel(
id="test-graph",
version=1,
name="Test",
description="Test",
user_id="test-user",
created_at=datetime.now(timezone.utc),
nodes=[
NodeModel(
id="node-1",
block_id=StoreValueBlock().id,
input_default={},
graph_id="test-graph",
graph_version=1,
),
],
links=[],
)
# Mock regular_credentials_inputs to return only the non-auto field (3-tuple)
regular_only = {
"github_credentials": (
regular_field_info,
{("node-1", "credentials")},
True,
),
}
with patch.object(
type(graph),
"regular_credentials_inputs",
new_callable=PropertyMock,
return_value=regular_only,
):
schema = graph.credentials_input_schema
field_names = set(schema.get("properties", {}).keys())
# Should include regular credential but NOT auto_credential
assert "github_credentials" in field_names
assert "google_credentials" not in field_names

View File

@@ -571,6 +571,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator: Optional[str] = None
discriminator_mapping: Optional[dict[str, CP]] = None
discriminator_values: set[Any] = Field(default_factory=set)
is_auto_credential: bool = False
input_field_name: Optional[str] = None
@classmethod
def combine(
@@ -651,6 +653,9 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
+ "_credentials"
)
# Propagate is_auto_credential from the combined field.
# All fields in a group should share the same is_auto_credential
# value since auto and regular credentials serve different purposes.
result[group_key] = (
CredentialsFieldInfo[CP, CT](
credentials_provider=combined.provider,
@@ -659,6 +664,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator=combined.discriminator,
discriminator_mapping=combined.discriminator_mapping,
discriminator_values=set(all_discriminator_values),
is_auto_credential=combined.is_auto_credential,
input_field_name=combined.input_field_name,
),
combined_keys,
)
@@ -684,6 +691,8 @@ class CredentialsFieldInfo(BaseModel, Generic[CP, CT]):
discriminator=self.discriminator,
discriminator_mapping=self.discriminator_mapping,
discriminator_values=self.discriminator_values,
is_auto_credential=self.is_auto_credential,
input_field_name=self.input_field_name,
)

View File

@@ -172,6 +172,81 @@ def execute_graph(
T = TypeVar("T")
async def _acquire_auto_credentials(
input_model: type[BlockSchema],
input_data: dict[str, Any],
creds_manager: "IntegrationCredentialsManager",
user_id: str,
) -> tuple[dict[str, Any], list[AsyncRedisLock]]:
"""
Resolve auto_credentials from GoogleDriveFileField-style inputs.
Returns:
(extra_exec_kwargs, locks): kwargs to inject into block execution, and
credential locks to release after execution completes.
"""
extra_exec_kwargs: dict[str, Any] = {}
locks: list[AsyncRedisLock] = []
# NOTE: If a block ever has multiple auto-credential fields, a ValueError
# on a later field will strand locks acquired for earlier fields. They'll
# auto-expire via Redis TTL, but add a try/except to release partial locks
# if that becomes a real scenario.
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
raise ValueError(
f"{provider.capitalize()} credentials for "
f"'{file_name}' in field '{field_name}' are not "
f"available in your account. "
f"This can happen if the agent was created by another "
f"user or the credentials were deleted. "
f"Please open the agent in the builder and re-select "
f"the file to authenticate with your own account."
)
# else: _credentials_id is explicitly None, skip (chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field "
f"'{field_name}'. Please re-select the file to authenticate "
f"with {provider.capitalize()}."
)
elif field_data is None and field_name not in input_data:
# Field not in input_data at all = connected from upstream block, skip
pass
else:
# field_data is None/empty but key IS in input_data = user didn't select
provider = info.get("config", {}).get("provider", "external service")
raise ValueError(
f"No file selected for '{field_name}'. "
f"Please select a file to provide "
f"{provider.capitalize()} authentication."
)
return extra_exec_kwargs, locks
async def execute_node(
node: Node,
data: NodeExecutionEntry,
@@ -213,9 +288,6 @@ async def execute_node(
block_name=node_block.name,
)
if node_block.disabled:
raise ValueError(f"Block {node_block.id} is disabled and cannot be executed")
# Sanity check: validate the execution input.
input_data, error = validate_exec(node, data.inputs, resolve_input=False)
if input_data is None:
@@ -274,41 +346,14 @@ async def execute_node(
extra_exec_kwargs[field_name] = credentials
# Handle auto-generated credentials (e.g., from GoogleDriveFileInput)
for kwarg_name, info in input_model.get_auto_credentials_fields().items():
field_name = info["field_name"]
field_data = input_data.get(field_name)
if field_data and isinstance(field_data, dict):
# Check if _credentials_id key exists in the field data
if "_credentials_id" in field_data:
cred_id = field_data["_credentials_id"]
if cred_id:
# Credential ID provided - acquire credentials
provider = info.get("config", {}).get(
"provider", "external service"
)
file_name = field_data.get("name", "selected file")
try:
credentials, lock = await creds_manager.acquire(
user_id, cred_id
)
creds_locks.append(lock)
extra_exec_kwargs[kwarg_name] = credentials
except ValueError:
# Credential was deleted or doesn't exist
raise ValueError(
f"Authentication expired for '{file_name}' in field '{field_name}'. "
f"The saved {provider.capitalize()} credentials no longer exist. "
f"Please re-select the file to re-authenticate."
)
# else: _credentials_id is explicitly None, skip credentials (for chained data)
else:
# _credentials_id key missing entirely - this is an error
provider = info.get("config", {}).get("provider", "external service")
file_name = field_data.get("name", "selected file")
raise ValueError(
f"Authentication missing for '{file_name}' in field '{field_name}'. "
f"Please re-select the file to authenticate with {provider.capitalize()}."
)
auto_extra_kwargs, auto_locks = await _acquire_auto_credentials(
input_model=input_model,
input_data=input_data,
creds_manager=creds_manager,
user_id=user_id,
)
extra_exec_kwargs.update(auto_extra_kwargs)
creds_locks.extend(auto_locks)
output_size = 0

View File

@@ -0,0 +1,320 @@
"""
Tests for auto_credentials handling in execute_node().
These test the _acquire_auto_credentials() helper function extracted from
execute_node() (manager.py lines 273-308).
"""
import pytest
from pytest_mock import MockerFixture
@pytest.fixture
def google_drive_file_data():
return {
"valid": {
"_credentials_id": "cred-id-123",
"id": "file-123",
"name": "test.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
"chained": {
"_credentials_id": None,
"id": "file-456",
"name": "chained.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
"missing_key": {
"id": "file-789",
"name": "bad.xlsx",
"mimeType": "application/vnd.google-apps.spreadsheet",
},
}
@pytest.fixture
def mock_input_model(mocker: MockerFixture):
"""Create a mock input model with get_auto_credentials_fields() returning one field."""
input_model = mocker.MagicMock()
input_model.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {
"provider": "google",
"type": "oauth2",
"scopes": ["https://www.googleapis.com/auth/drive.readonly"],
},
}
}
return input_model
@pytest.fixture
def mock_creds_manager(mocker: MockerFixture):
manager = mocker.AsyncMock()
mock_lock = mocker.AsyncMock()
mock_creds = mocker.MagicMock()
mock_creds.id = "cred-id-123"
mock_creds.provider = "google"
manager.acquire.return_value = (mock_creds, mock_lock)
return manager, mock_creds, mock_lock
@pytest.mark.asyncio
async def test_auto_credentials_happy_path(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""When field_data has a valid _credentials_id, credentials should be acquired."""
from backend.executor.manager import _acquire_auto_credentials
manager, mock_creds, mock_lock = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["valid"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
assert extra_kwargs["credentials"] == mock_creds
assert mock_lock in locks
@pytest.mark.asyncio
async def test_auto_credentials_field_none_static_raises(
mocker: MockerFixture,
mock_input_model,
mock_creds_manager,
):
"""
[THE BUG FIX TEST — OPEN-2895]
When field_data is None and the key IS in input_data (user didn't select a file),
should raise ValueError instead of silently skipping.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
# Key is present but value is None = user didn't select a file
input_data = {"spreadsheet": None}
with pytest.raises(ValueError, match="No file selected"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_field_absent_skips(
mocker: MockerFixture,
mock_input_model,
mock_creds_manager,
):
"""
When the field key is NOT in input_data at all (upstream connection),
should skip without error.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
# Key not present = connected from upstream block
input_data = {}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_not_called()
assert "credentials" not in extra_kwargs
assert locks == []
@pytest.mark.asyncio
async def test_auto_credentials_chained_cred_id_none(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
When _credentials_id is explicitly None (chained data from upstream),
should skip credential acquisition.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["chained"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
manager.acquire.assert_not_called()
assert "credentials" not in extra_kwargs
@pytest.mark.asyncio
async def test_auto_credentials_missing_cred_id_key_raises(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
When _credentials_id key is missing entirely from field_data dict,
should raise ValueError.
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["missing_key"]}
with pytest.raises(ValueError, match="Authentication missing"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_ownership_mismatch_error(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
[SECRT-1772] When acquire() raises ValueError (credential belongs to another user),
the error message should mention 'not available' (not 'expired').
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
manager.acquire.side_effect = ValueError(
"Credentials #cred-id-123 for user #user-2 not found"
)
input_data = {"spreadsheet": google_drive_file_data["valid"]}
with pytest.raises(ValueError, match="not available in your account"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-2",
)
@pytest.mark.asyncio
async def test_auto_credentials_deleted_credential_error(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""
[SECRT-1772] When acquire() raises ValueError (credential was deleted),
the error message should mention 'not available' (not 'expired').
"""
from backend.executor.manager import _acquire_auto_credentials
manager, _, _ = mock_creds_manager
manager.acquire.side_effect = ValueError(
"Credentials #cred-id-123 for user #user-1 not found"
)
input_data = {"spreadsheet": google_drive_file_data["valid"]}
with pytest.raises(ValueError, match="not available in your account"):
await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
@pytest.mark.asyncio
async def test_auto_credentials_lock_appended(
mocker: MockerFixture,
google_drive_file_data,
mock_input_model,
mock_creds_manager,
):
"""Lock from acquire() should be included in returned locks list."""
from backend.executor.manager import _acquire_auto_credentials
manager, _, mock_lock = mock_creds_manager
input_data = {"spreadsheet": google_drive_file_data["valid"]}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=mock_input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
assert len(locks) == 1
assert locks[0] is mock_lock
@pytest.mark.asyncio
async def test_auto_credentials_multiple_fields(
mocker: MockerFixture,
mock_creds_manager,
):
"""When there are multiple auto_credentials fields, only valid ones should acquire."""
from backend.executor.manager import _acquire_auto_credentials
manager, mock_creds, mock_lock = mock_creds_manager
input_model = mocker.MagicMock()
input_model.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
},
"credentials2": {
"field_name": "doc_file",
"config": {"provider": "google", "type": "oauth2"},
},
}
input_data = {
"spreadsheet": {
"_credentials_id": "cred-id-123",
"id": "file-1",
"name": "file1.xlsx",
},
"doc_file": {
"_credentials_id": None,
"id": "file-2",
"name": "chained.doc",
},
}
extra_kwargs, locks = await _acquire_auto_credentials(
input_model=input_model,
input_data=input_data,
creds_manager=manager,
user_id="user-1",
)
# Only the first field should have acquired credentials
manager.acquire.assert_called_once_with("user-1", "cred-id-123")
assert "credentials" in extra_kwargs
assert "credentials2" not in extra_kwargs
assert len(locks) == 1

View File

@@ -259,7 +259,8 @@ async def _validate_node_input_credentials(
# Find any fields of type CredentialsMetaInput
credentials_fields = block.input_schema.get_credentials_fields()
if not credentials_fields:
auto_credentials_fields = block.input_schema.get_auto_credentials_fields()
if not credentials_fields and not auto_credentials_fields:
continue
# Track if any credential field is missing for this node
@@ -339,6 +340,47 @@ async def _validate_node_input_credentials(
] = "Invalid credentials: type/provider mismatch"
continue
# Validate auto-credentials (GoogleDriveFileField-based)
# These have _credentials_id embedded in the file field data
if auto_credentials_fields:
for _kwarg_name, info in auto_credentials_fields.items():
field_name = info["field_name"]
# Check input_default and nodes_input_masks for the field value
field_value = node.input_default.get(field_name)
if nodes_input_masks and node.id in nodes_input_masks:
field_value = nodes_input_masks[node.id].get(
field_name, field_value
)
if field_value and isinstance(field_value, dict):
if "_credentials_id" not in field_value:
# Key removed (e.g., on fork) — needs re-auth
has_missing_credentials = True
credential_errors[node.id][field_name] = (
"Authentication missing for the selected file. "
"Please re-select the file to authenticate with "
"your own account."
)
continue
cred_id = field_value.get("_credentials_id")
if cred_id and isinstance(cred_id, str):
try:
creds_store = get_integration_credentials_store()
creds = await creds_store.get_creds_by_id(user_id, cred_id)
except Exception as e:
has_missing_credentials = True
credential_errors[node.id][
field_name
] = f"Credentials not available: {e}"
continue
if not creds:
has_missing_credentials = True
credential_errors[node.id][field_name] = (
"The saved credentials are not available "
"for your account. Please re-select the file to "
"authenticate with your own account."
)
# If node has optional credentials and any are missing, mark for skipping
# But only if there are no other errors for this node
if (
@@ -370,8 +412,9 @@ def make_node_credentials_input_map(
"""
result: dict[str, dict[str, JsonValue]] = {}
# Get aggregated credentials fields for the graph
graph_cred_inputs = graph.aggregate_credentials_inputs()
# Only map regular credentials (not auto_credentials, which are resolved
# at execution time from _credentials_id in file field data)
graph_cred_inputs = graph.regular_credentials_inputs
for graph_input_name, (_, compatible_node_fields, _) in graph_cred_inputs.items():
# Best-effort map: skip missing items

View File

@@ -907,3 +907,335 @@ async def test_stop_graph_execution_cascades_to_child_with_reviews(
# Verify both parent and child status updates
assert mock_execution_db.update_graph_execution_stats.call_count >= 1
# ============================================================================
# Tests for auto_credentials validation in _validate_node_input_credentials
# (Fix 3: SECRT-1772 + Fix 4: Path 4)
# ============================================================================
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_valid(
mocker: MockerFixture,
):
"""
[SECRT-1772] When a node has auto_credentials with a valid _credentials_id
that exists in the store, validation should pass without errors.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": "valid-cred-id",
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
# No regular credentials fields
mock_block.input_schema.get_credentials_fields.return_value = {}
# Has auto_credentials fields
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return valid credentials
mock_store = mocker.MagicMock()
mock_creds = mocker.MagicMock()
mock_creds.id = "valid-cred-id"
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=mock_creds)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
assert mock_node.id not in errors
assert mock_node.id not in nodes_to_skip
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_missing(
mocker: MockerFixture,
):
"""
[SECRT-1772] When a node has auto_credentials with a _credentials_id
that doesn't exist for the current user, validation should report an error.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-bad-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": "other-users-cred-id",
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
mock_block.input_schema.get_credentials_fields.return_value = {}
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return None (cred not found for this user)
mock_store = mocker.MagicMock()
mock_store.get_creds_by_id = mocker.AsyncMock(return_value=None)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="different-user",
nodes_input_masks=None,
)
assert mock_node.id in errors
assert "spreadsheet" in errors[mock_node.id]
assert "not available" in errors[mock_node.id]["spreadsheet"].lower()
@pytest.mark.asyncio
async def test_validate_node_input_credentials_both_regular_and_auto(
mocker: MockerFixture,
):
"""
[SECRT-1772] A node that has BOTH regular credentials AND auto_credentials
should have both validated.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-both-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"credentials": {
"id": "regular-cred-id",
"provider": "github",
"type": "api_key",
},
"spreadsheet": {
"_credentials_id": "auto-cred-id",
"id": "file-123",
"name": "test.xlsx",
},
}
mock_credentials_field_type = mocker.MagicMock()
mock_credentials_meta = mocker.MagicMock()
mock_credentials_meta.id = "regular-cred-id"
mock_credentials_meta.provider = "github"
mock_credentials_meta.type = "api_key"
mock_credentials_field_type.model_validate.return_value = mock_credentials_meta
mock_block = mocker.MagicMock()
# Regular credentials field
mock_block.input_schema.get_credentials_fields.return_value = {
"credentials": mock_credentials_field_type,
}
# Auto-credentials field
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"auto_credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
# Mock the credentials store to return valid credentials for both
mock_store = mocker.MagicMock()
mock_regular_creds = mocker.MagicMock()
mock_regular_creds.id = "regular-cred-id"
mock_regular_creds.provider = "github"
mock_regular_creds.type = "api_key"
mock_auto_creds = mocker.MagicMock()
mock_auto_creds.id = "auto-cred-id"
def get_creds_side_effect(user_id, cred_id):
if cred_id == "regular-cred-id":
return mock_regular_creds
elif cred_id == "auto-cred-id":
return mock_auto_creds
return None
mock_store.get_creds_by_id = mocker.AsyncMock(side_effect=get_creds_side_effect)
mocker.patch(
"backend.executor.utils.get_integration_credentials_store",
return_value=mock_store,
)
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
# Both should validate successfully - no errors
assert mock_node.id not in errors
assert mock_node.id not in nodes_to_skip
@pytest.mark.asyncio
async def test_validate_node_input_credentials_auto_creds_skipped_when_none(
mocker: MockerFixture,
):
"""
When a node has auto_credentials but the field value has _credentials_id=None
(e.g., from upstream connection), validation should skip it without error.
"""
from backend.executor.utils import _validate_node_input_credentials
mock_node = mocker.MagicMock()
mock_node.id = "node-with-chained-auto-creds"
mock_node.credentials_optional = False
mock_node.input_default = {
"spreadsheet": {
"_credentials_id": None,
"id": "file-123",
"name": "test.xlsx",
}
}
mock_block = mocker.MagicMock()
mock_block.input_schema.get_credentials_fields.return_value = {}
mock_block.input_schema.get_auto_credentials_fields.return_value = {
"credentials": {
"field_name": "spreadsheet",
"config": {"provider": "google", "type": "oauth2"},
}
}
mock_node.block = mock_block
mock_graph = mocker.MagicMock()
mock_graph.nodes = [mock_node]
errors, nodes_to_skip = await _validate_node_input_credentials(
graph=mock_graph,
user_id="test-user",
nodes_input_masks=None,
)
# No error - chained data with None cred_id is valid
assert mock_node.id not in errors
# ============================================================================
# Tests for CredentialsFieldInfo auto_credential tag (Fix 4: Path 4)
# ============================================================================
def test_credentials_field_info_auto_credential_tag():
"""
[Path 4] CredentialsFieldInfo should support is_auto_credential and
input_field_name fields for distinguishing auto from regular credentials.
"""
from backend.data.model import CredentialsFieldInfo
# Regular credential should have is_auto_credential=False by default
regular = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
},
by_alias=True,
)
assert regular.is_auto_credential is False
assert regular.input_field_name is None
# Auto credential should have is_auto_credential=True
auto = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["google"],
"credentials_types": ["oauth2"],
"is_auto_credential": True,
"input_field_name": "spreadsheet",
},
by_alias=True,
)
assert auto.is_auto_credential is True
assert auto.input_field_name == "spreadsheet"
def test_make_node_credentials_input_map_excludes_auto_creds(
mocker: MockerFixture,
):
"""
[Path 4] make_node_credentials_input_map should only include regular credentials,
not auto_credentials (which are resolved at execution time).
"""
from backend.data.model import CredentialsFieldInfo, CredentialsMetaInput
from backend.executor.utils import make_node_credentials_input_map
from backend.integrations.providers import ProviderName
# Create a mock graph with aggregate_credentials_inputs that returns
# both regular and auto credentials
mock_graph = mocker.MagicMock()
regular_field_info = CredentialsFieldInfo.model_validate(
{
"credentials_provider": ["github"],
"credentials_types": ["api_key"],
"is_auto_credential": False,
},
by_alias=True,
)
# Mock regular_credentials_inputs property (auto_credentials are excluded)
mock_graph.regular_credentials_inputs = {
"github_creds": (regular_field_info, {("node-1", "credentials")}, True),
}
graph_credentials_input = {
"github_creds": CredentialsMetaInput(
id="cred-123",
provider=ProviderName("github"),
type="api_key",
),
}
result = make_node_credentials_input_map(mock_graph, graph_credentials_input)
# Regular credentials should be mapped
assert "node-1" in result
assert "credentials" in result["node-1"]
# Auto credentials should NOT appear in the result
# (they would have been mapped to the kwarg_name "credentials" not "spreadsheet")
for node_id, fields in result.items():
for field_name, value in fields.items():
# Verify no auto-credential phantom entries
if isinstance(value, dict):
assert "_credentials_id" not in value

View File

@@ -364,44 +364,6 @@ def _remove_orphan_tool_responses(
return result
def validate_and_remove_orphan_tool_responses(
messages: list[dict],
log_warning: bool = True,
) -> list[dict]:
"""
Validate tool_call/tool_response pairs and remove orphaned responses.
Scans messages in order, tracking all tool_call IDs. Any tool response
referencing an ID not seen in a preceding message is considered orphaned
and removed. This prevents API errors like Anthropic's "unexpected tool_use_id".
Args:
messages: List of messages to validate (OpenAI or Anthropic format)
log_warning: Whether to log a warning when orphans are found
Returns:
A new list with orphaned tool responses removed
"""
available_ids: set[str] = set()
orphan_ids: set[str] = set()
for msg in messages:
available_ids |= _extract_tool_call_ids_from_message(msg)
for resp_id in _extract_tool_response_ids_from_message(msg):
if resp_id not in available_ids:
orphan_ids.add(resp_id)
if not orphan_ids:
return messages
if log_warning:
logger.warning(
f"Removing {len(orphan_ids)} orphan tool response(s): {orphan_ids}"
)
return _remove_orphan_tool_responses(messages, orphan_ids)
def _ensure_tool_pairs_intact(
recent_messages: list[dict],
all_messages: list[dict],
@@ -761,13 +723,6 @@ async def compress_context(
# Filter out any None values that may have been introduced
final_msgs: list[dict] = [m for m in msgs if m is not None]
# ---- STEP 6: Final tool-pair validation ---------------------------------
# After all compression steps, verify that every tool response has a
# matching tool_call in a preceding assistant message. Remove orphans
# to prevent API errors (e.g., Anthropic's "unexpected tool_use_id").
final_msgs = validate_and_remove_orphan_tool_responses(final_msgs)
final_count = sum(_msg_tokens(m, enc) for m in final_msgs)
error = None
if final_count + reserve > target_tokens:

View File

@@ -46,14 +46,14 @@ pycares = ">=4.9.0,<5"
[[package]]
name = "aiofiles"
version = "25.1.0"
version = "24.1.0"
description = "File support for asyncio."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695"},
{file = "aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2"},
{file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
{file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
]
[[package]]
@@ -1382,14 +1382,14 @@ tzdata = "*"
[[package]]
name = "fastapi"
version = "0.128.6"
version = "0.128.5"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "fastapi-0.128.6-py3-none-any.whl", hash = "sha256:bb1c1ef87d6086a7132d0ab60869d6f1ee67283b20fbf84ec0003bd335099509"},
{file = "fastapi-0.128.6.tar.gz", hash = "sha256:0cb3946557e792d731b26a42b04912f16367e3c3135ea8290f620e234f2b604f"},
{file = "fastapi-0.128.5-py3-none-any.whl", hash = "sha256:bceec0de8aa6564599c5bcc0593b0d287703562c848271fca8546fd2c87bf4dd"},
{file = "fastapi-0.128.5.tar.gz", hash = "sha256:a7173579fc162d6471e3c6fbd9a4b7610c7a3b367bcacf6c4f90d5d022cab711"},
]
[package.dependencies]
@@ -3078,14 +3078,14 @@ type = ["pygobject-stubs", "pytest-mypy (>=1.0.1)", "shtab", "types-pywin32"]
[[package]]
name = "langfuse"
version = "3.14.1"
version = "3.13.0"
description = "A client library for accessing langfuse"
optional = false
python-versions = "<4.0,>=3.10"
groups = ["main"]
files = [
{file = "langfuse-3.14.1-py3-none-any.whl", hash = "sha256:17bed605dbfc9947cbd1738a715f6d27c1b80b6da9f2946586171958fa5820d0"},
{file = "langfuse-3.14.1.tar.gz", hash = "sha256:404a6104cd29353d7829aa417ec46565b04917e5599afdda96c5b0865f4bc991"},
{file = "langfuse-3.13.0-py3-none-any.whl", hash = "sha256:71912ddac1cc831a65df895eae538a556f564c094ae51473e747426e9ded1a9d"},
{file = "langfuse-3.13.0.tar.gz", hash = "sha256:dacea8111ca4442e97dbfec4f8d676cf9709b35357a26e468f8887b95de0012f"},
]
[package.dependencies]
@@ -8440,4 +8440,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.14"
content-hash = "c06e96ad49388ba7a46786e9ea55ea2c1a57408e15613237b4bee40a592a12af"
content-hash = "14686ee0e2dc446a75d0db145b08dc410dc31c357e25085bb0f9b0174711c4b1"

View File

@@ -21,7 +21,7 @@ cryptography = "^46.0"
discord-py = "^2.5.2"
e2b-code-interpreter = "^1.5.2"
elevenlabs = "^1.50.0"
fastapi = "^0.128.6"
fastapi = "^0.128.5"
feedparser = "^6.0.11"
flake8 = "^7.3.0"
google-api-python-client = "^2.177.0"
@@ -34,7 +34,7 @@ html2text = "^2024.2.26"
jinja2 = "^3.1.6"
jsonref = "^1.1.0"
jsonschema = "^4.25.0"
langfuse = "^3.14.1"
langfuse = "^3.11.0"
launchdarkly-server-sdk = "^9.14.1"
mem0ai = "^0.1.115"
moviepy = "^2.1.2"
@@ -76,7 +76,7 @@ yt-dlp = "2025.12.08"
zerobouncesdk = "^1.1.2"
# NOTE: please insert new dependencies in their alphabetical location
pytest-snapshot = "^0.9.0"
aiofiles = "^25.1.0"
aiofiles = "^24.1.0"
tiktoken = "^0.12.0"
aioclamd = "^1.0.0"
setuptools = "^80.9.0"

View File

@@ -25,12 +25,8 @@ RUN if [ -f .env.production ]; then \
cp .env.default .env; \
fi
RUN pnpm run generate:api
# Disable source-map generation in Docker builds to halve webpack memory usage.
# Source maps are only useful when SENTRY_AUTH_TOKEN is set (Vercel deploys);
# the Docker image never uploads them, so generating them just wastes RAM.
ENV NEXT_PUBLIC_SOURCEMAPS="false"
# In CI, we want NEXT_PUBLIC_PW_TEST=true during build so Next.js inlines it
RUN if [ "$NEXT_PUBLIC_PW_TEST" = "true" ]; then NEXT_PUBLIC_PW_TEST=true NODE_OPTIONS="--max-old-space-size=8192" pnpm build; else NODE_OPTIONS="--max-old-space-size=8192" pnpm build; fi
RUN if [ "$NEXT_PUBLIC_PW_TEST" = "true" ]; then NEXT_PUBLIC_PW_TEST=true NODE_OPTIONS="--max-old-space-size=4096" pnpm build; else NODE_OPTIONS="--max-old-space-size=4096" pnpm build; fi
# Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile
FROM node:21-alpine AS prod

View File

@@ -1,12 +1,8 @@
import { withSentryConfig } from "@sentry/nextjs";
// Allow Docker builds to skip source-map generation (halves memory usage).
// Defaults to true so Vercel/local builds are unaffected.
const enableSourceMaps = process.env.NEXT_PUBLIC_SOURCEMAPS !== "false";
/** @type {import('next').NextConfig} */
const nextConfig = {
productionBrowserSourceMaps: enableSourceMaps,
productionBrowserSourceMaps: true,
// Externalize OpenTelemetry packages to fix Turbopack HMR issues
serverExternalPackages: [
"@opentelemetry/instrumentation",
@@ -18,37 +14,9 @@ const nextConfig = {
serverActions: {
bodySizeLimit: "256mb",
},
// Increase body size limit for API routes (file uploads) - 256MB to match backend limit
proxyClientMaxBodySize: "256mb",
middlewareClientMaxBodySize: "256mb",
// Limit parallel webpack workers to reduce peak memory during builds.
cpus: 2,
},
// Work around cssnano "Invalid array length" bug in Next.js's bundled
// cssnano-simple comment parser when processing very large CSS chunks.
// CSS is still bundled correctly; gzip handles most of the size savings anyway.
webpack: (config, { dev }) => {
if (!dev) {
// Next.js adds CssMinimizerPlugin internally (after user config), so we
// can't filter it from config.plugins. Instead, intercept the webpack
// compilation hooks and replace the buggy plugin's tap with a no-op.
config.plugins.push({
apply(compiler) {
compiler.hooks.compilation.tap(
"DisableCssMinimizer",
(compilation) => {
compilation.hooks.processAssets.intercept({
register: (tap) => {
if (tap.name === "CssMinimizerPlugin") {
return { ...tap, fn: async () => {} };
}
return tap;
},
});
},
);
},
});
}
return config;
},
images: {
domains: [
@@ -86,16 +54,9 @@ const nextConfig = {
transpilePackages: ["geist"],
};
// Only run the Sentry webpack plugin when we can actually upload source maps
// (i.e. on Vercel with SENTRY_AUTH_TOKEN set). The Sentry *runtime* SDK
// (imported in app code) still captures errors without the plugin.
// Skipping the plugin saves ~1 GB of peak memory during `next build`.
const skipSentryPlugin =
process.env.NODE_ENV !== "production" ||
!enableSourceMaps ||
!process.env.SENTRY_AUTH_TOKEN;
const isDevelopmentBuild = process.env.NODE_ENV !== "production";
export default skipSentryPlugin
export default isDevelopmentBuild
? nextConfig
: withSentryConfig(nextConfig, {
// For all available options, see:
@@ -135,7 +96,7 @@ export default skipSentryPlugin
// This helps Sentry with sourcemaps... https://docs.sentry.io/platforms/javascript/guides/nextjs/sourcemaps/
sourcemaps: {
disable: !enableSourceMaps,
disable: false,
assets: [".next/**/*.js", ".next/**/*.js.map"],
ignore: ["**/node_modules/**"],
deleteSourcemapsAfterUpload: false, // Source is public anyway :)

View File

@@ -7,7 +7,7 @@
},
"scripts": {
"dev": "pnpm run generate:api:force && next dev --turbo",
"build": "cross-env NODE_OPTIONS=--max-old-space-size=16384 next build",
"build": "next build",
"start": "next start",
"start:standalone": "cd .next/standalone && node server.js",
"lint": "next lint && prettier --check .",
@@ -30,7 +30,6 @@
"defaults"
],
"dependencies": {
"@ai-sdk/react": "3.0.61",
"@faker-js/faker": "10.0.0",
"@hookform/resolvers": "5.2.2",
"@next/third-parties": "15.4.6",
@@ -61,10 +60,6 @@
"@rjsf/utils": "6.1.2",
"@rjsf/validator-ajv8": "6.1.2",
"@sentry/nextjs": "10.27.0",
"@streamdown/cjk": "1.0.1",
"@streamdown/code": "1.0.1",
"@streamdown/math": "1.0.1",
"@streamdown/mermaid": "1.0.1",
"@supabase/ssr": "0.7.0",
"@supabase/supabase-js": "2.78.0",
"@tanstack/react-query": "5.90.6",
@@ -73,7 +68,6 @@
"@vercel/analytics": "1.5.0",
"@vercel/speed-insights": "1.2.0",
"@xyflow/react": "12.9.2",
"ai": "6.0.59",
"boring-avatars": "1.11.2",
"class-variance-authority": "0.7.1",
"clsx": "2.1.1",
@@ -93,6 +87,7 @@
"launchdarkly-react-client-sdk": "3.9.0",
"lodash": "4.17.21",
"lucide-react": "0.552.0",
"moment": "2.30.1",
"next": "15.4.10",
"next-themes": "0.4.6",
"nuqs": "2.7.2",
@@ -117,11 +112,9 @@
"remark-math": "6.0.0",
"shepherd.js": "14.5.1",
"sonner": "2.0.7",
"streamdown": "2.1.0",
"tailwind-merge": "2.6.0",
"tailwind-scrollbar": "3.1.0",
"tailwindcss-animate": "1.0.7",
"use-stick-to-bottom": "1.1.2",
"uuid": "11.1.0",
"vaul": "1.1.2",
"zod": "3.25.76",
@@ -179,8 +172,7 @@
},
"pnpm": {
"overrides": {
"@opentelemetry/instrumentation": "0.209.0",
"lodash-es": "4.17.23"
"@opentelemetry/instrumentation": "0.209.0"
}
},
"packageManager": "pnpm@10.20.0+sha512.cf9998222162dd85864d0a8102e7892e7ba4ceadebbf5a31f9c2fce48dfce317a9c53b9f6464d1ef9042cba2e02ae02a9f7c143a2b438cd93c91840f0192b9dd"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
"use client";
import { Tabs, TabsList, TabsTrigger } from "@/components/__legacy__/ui/tabs";
export type BuilderView = "old" | "new";
export function BuilderViewTabs({
value,
onChange,
}: {
value: BuilderView;
onChange: (value: BuilderView) => void;
}) {
return (
<div className="pointer-events-auto fixed right-4 top-20 z-50">
<Tabs
value={value}
onValueChange={(v: string) => onChange(v as BuilderView)}
>
<TabsList className="w-fit bg-zinc-900">
<TabsTrigger value="old" className="text-gray-100">
Old
</TabsTrigger>
<TabsTrigger value="new" className="text-gray-100">
New
</TabsTrigger>
</TabsList>
</Tabs>
</div>
);
}

View File

@@ -23,9 +23,6 @@ import { useCopyPaste } from "./useCopyPaste";
import { useFlow } from "./useFlow";
import { useFlowRealtime } from "./useFlowRealtime";
import "@xyflow/react/dist/style.css";
import "./flow.css";
export const Flow = () => {
const [{ flowID, flowExecutionID }] = useQueryStates({
flowID: parseAsString,

View File

@@ -1,9 +0,0 @@
/* Reset default xyflow handle styles so custom Phosphor icon handles render correctly */
.react-flow__handle {
background: transparent;
width: auto;
height: auto;
border: 0;
position: relative;
transform: none;
}

View File

@@ -1,4 +1,4 @@
import debounce from "lodash/debounce";
import { debounce } from "lodash";
import { useCallback, useEffect, useRef, useState } from "react";
import { useBlockMenuStore } from "../../../../stores/blockMenuStore";
import { getQueryClient } from "@/lib/react-query/queryClient";

View File

@@ -70,10 +70,10 @@ export const HorizontalScroll: React.FC<HorizontalScrollAreaProps> = ({
{children}
</div>
{canScrollLeft && (
<div className="pointer-events-none absolute inset-y-0 left-0 w-8 bg-gradient-to-r from-background via-background/80 to-background/0" />
<div className="pointer-events-none absolute inset-y-0 left-0 w-8 bg-gradient-to-r from-white via-white/80 to-white/0" />
)}
{canScrollRight && (
<div className="pointer-events-none absolute inset-y-0 right-0 w-8 bg-gradient-to-l from-background via-background/80 to-background/0" />
<div className="pointer-events-none absolute inset-y-0 right-0 w-8 bg-gradient-to-l from-white via-white/80 to-white/0" />
)}
{canScrollLeft && (
<button

View File

@@ -1,30 +1,100 @@
// import { Separator } from "@/components/__legacy__/ui/separator";
import { cn } from "@/lib/utils";
import React, { memo } from "react";
import { BlockMenu } from "./NewBlockMenu/BlockMenu/BlockMenu";
import { useNewControlPanel } from "./useNewControlPanel";
// import { NewSaveControl } from "../SaveControl/NewSaveControl";
import { GraphExecutionID } from "@/lib/autogpt-server-api";
// import { ControlPanelButton } from "../ControlPanelButton";
// import { GraphSearchMenu } from "../GraphMenu/GraphMenu";
import { Separator } from "@/components/__legacy__/ui/separator";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { CustomNode } from "../FlowEditor/nodes/CustomNode/CustomNode";
import { NewSaveControl } from "./NewSaveControl/NewSaveControl";
import { UndoRedoButtons } from "./UndoRedoButtons";
export const NewControlPanel = memo(() => {
useNewControlPanel({});
export type Control = {
icon: React.ReactNode;
label: string;
disabled?: boolean;
onClick: () => void;
};
return (
<section
className={cn(
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
)}
>
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
<BlockMenu />
<Separator className="text-[#E1E1E1]" />
<NewSaveControl />
<Separator className="text-[#E1E1E1]" />
<UndoRedoButtons />
</div>
</section>
);
});
export type NewControlPanelProps = {
flowExecutionID?: GraphExecutionID | undefined;
visualizeBeads?: "no" | "static" | "animate";
pinSavePopover?: boolean;
pinBlocksPopover?: boolean;
nodes?: CustomNode[];
onNodeSelect?: (nodeId: string) => void;
onNodeHover?: (nodeId: string) => void;
};
export const NewControlPanel = memo(
({
flowExecutionID: _flowExecutionID,
visualizeBeads: _visualizeBeads,
pinSavePopover: _pinSavePopover,
pinBlocksPopover: _pinBlocksPopover,
nodes: _nodes,
onNodeSelect: _onNodeSelect,
onNodeHover: _onNodeHover,
}: NewControlPanelProps) => {
const _isGraphSearchEnabled = useGetFlag(Flag.GRAPH_SEARCH);
const {
// agentDescription,
// setAgentDescription,
// saveAgent,
// agentName,
// setAgentName,
// savedAgent,
// isSaving,
// isRunning,
// isStopping,
} = useNewControlPanel({});
return (
<section
className={cn(
"absolute left-4 top-10 z-10 overflow-hidden rounded-[1rem] border-none bg-white p-0 shadow-[0_1px_5px_0_rgba(0,0,0,0.1)]",
)}
>
<div className="flex flex-col items-center justify-center rounded-[1rem] p-0">
<BlockMenu />
{/* <Separator className="text-[#E1E1E1]" />
{isGraphSearchEnabled && (
<>
<GraphSearchMenu
nodes={nodes}
blockMenuSelected={blockMenuSelected}
setBlockMenuSelected={setBlockMenuSelected}
onNodeSelect={onNodeSelect}
onNodeHover={onNodeHover}
/>
<Separator className="text-[#E1E1E1]" />
</>
)}
{controls.map((control, index) => (
<ControlPanelButton
key={index}
onClick={() => control.onClick()}
data-id={`control-button-${index}`}
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
disabled={control.disabled || false}
className="rounded-none"
>
{control.icon}
</ControlPanelButton>
))} */}
<Separator className="text-[#E1E1E1]" />
<NewSaveControl />
<Separator className="text-[#E1E1E1]" />
<UndoRedoButtons />
</div>
</section>
);
},
);
export default NewControlPanel;

View File

@@ -1,4 +1,4 @@
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import {
Popover,
PopoverContent,

View File

@@ -1,5 +1,5 @@
import { useGraphSearch } from "../GraphMenuSearchBar/useGraphMenuSearchBar";
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
interface UseGraphMenuProps {
nodes: CustomNode[];

View File

@@ -1,7 +1,7 @@
import React from "react";
import { Separator } from "@/components/__legacy__/ui/separator";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { beautifyString, categoryColorMap } from "@/lib/utils";
import { beautifyString, getPrimaryCategoryColor } from "@/lib/utils";
import { SearchableNode } from "../GraphMenuSearchBar/useGraphMenuSearchBar";
import { TextRenderer } from "@/components/__legacy__/ui/render";
import {
@@ -73,12 +73,14 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
}
const nodeTitle =
(node.data?.metadata?.customized_name as string) ||
beautifyString(node.data?.title || "").replace(/ Block$/, "");
const nodeType = beautifyString(node.data?.title || "").replace(
/ Block$/,
"",
);
node.data?.metadata?.customized_name ||
beautifyString(node.data?.blockType || "").replace(
/ Block$/,
"",
);
const nodeType = beautifyString(
node.data?.blockType || "",
).replace(/ Block$/, "");
return (
<TooltipProvider key={node.id}>
@@ -98,13 +100,7 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
onMouseLeave={() => onNodeHover?.(null)}
>
<div
className={`h-full w-3 rounded-l-[7px] ${
(node.data?.categories?.[0]?.category &&
categoryColorMap[
node.data.categories[0].category
]) ||
"bg-gray-300 dark:bg-slate-700"
}`}
className={`h-full w-3 rounded-l-[7px] ${getPrimaryCategoryColor(node.data?.categories)}`}
/>
<div className="mx-3 flex flex-1 items-center justify-between">
<div className="mr-2 min-w-0">
@@ -133,10 +129,9 @@ export const GraphSearchContent: React.FC<GraphSearchContentProps> = ({
<div className="font-semibold">
Node Type: {nodeType}
</div>
{!!node.data?.metadata?.customized_name && (
{node.data?.metadata?.customized_name && (
<div className="text-xs text-gray-500">
Custom Name:{" "}
{String(node.data.metadata.customized_name)}
Custom Name: {node.data.metadata.customized_name}
</div>
)}
</div>

View File

@@ -1,5 +1,5 @@
import { useState, useMemo, useDeferredValue } from "react";
import { CustomNode } from "../../../FlowEditor/nodes/CustomNode/CustomNode";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { beautifyString } from "@/lib/utils";
import jaro from "jaro-winkler";
@@ -67,10 +67,10 @@ function calculateNodeScore(
const nodeTitle = (node.data?.title || "").toLowerCase(); // This includes the ID
const nodeId = (node.id || "").toLowerCase();
const nodeDescription = (node.data?.description || "").toLowerCase();
const blockType = (node.data?.title || "").toLowerCase();
const blockType = (node.data?.blockType || "").toLowerCase();
const beautifiedBlockType = beautifyString(blockType).toLowerCase();
const customizedName = String(
node.data?.metadata?.customized_name || "",
const customizedName = (
node.data?.metadata?.customized_name || ""
).toLowerCase();
// Get input and output names with defensive checks

View File

@@ -1,18 +1,54 @@
import { GraphID } from "@/lib/autogpt-server-api";
import { useSearchParams } from "next/navigation";
import { useState } from "react";
export interface NewControlPanelProps {
// flowExecutionID: GraphExecutionID | undefined;
visualizeBeads?: "no" | "static" | "animate";
}
export const useNewControlPanel = ({
// flowExecutionID,
visualizeBeads: _visualizeBeads,
}: NewControlPanelProps) => {
const [blockMenuSelected, setBlockMenuSelected] = useState<
"save" | "block" | "search" | ""
>("");
const query = useSearchParams();
const _graphVersion = query.get("flowVersion");
const _graphVersionParsed = _graphVersion
? parseInt(_graphVersion)
: undefined;
const _flowID = (query.get("flowID") as GraphID | null) ?? undefined;
// const {
// agentDescription,
// setAgentDescription,
// saveAgent,
// agentName,
// setAgentName,
// savedAgent,
// isSaving,
// isRunning,
// isStopping,
// } = useAgentGraph(
// flowID,
// graphVersion,
// flowExecutionID,
// visualizeBeads !== "no",
// );
return {
blockMenuSelected,
setBlockMenuSelected,
// agentDescription,
// setAgentDescription,
// saveAgent,
// agentName,
// setAgentName,
// savedAgent,
// isSaving,
// isRunning,
// isStopping,
};
};

View File

@@ -0,0 +1,83 @@
import { useMemo } from "react";
import { Link } from "@/app/api/__generated__/models/link";
import { useEdgeStore } from "../stores/edgeStore";
import { useNodeStore } from "../stores/nodeStore";
import { scrollbarStyles } from "@/components/styles/scrollbars";
import { cn } from "@/lib/utils";
import { customEdgeToLink } from "./helper";
export const RightSidebar = () => {
const edges = useEdgeStore((s) => s.edges);
const nodes = useNodeStore((s) => s.nodes);
const backendLinks: Link[] = useMemo(
() => edges.map(customEdgeToLink),
[edges],
);
return (
<div
className={cn(
"flex h-full w-full flex-col border-l border-slate-200 bg-white p-4 dark:border-slate-700 dark:bg-slate-900",
scrollbarStyles,
)}
>
<div className="mb-4">
<h2 className="text-lg font-semibold text-slate-800 dark:text-slate-200">
Graph Debug Panel
</h2>
</div>
<div className="flex-1 overflow-y-auto">
<h3 className="mb-2 text-sm font-semibold text-slate-700 dark:text-slate-200">
Nodes ({nodes.length})
</h3>
<div className="mb-6 space-y-3">
{nodes.map((n) => (
<div
key={n.id}
className="rounded border p-2 text-xs dark:border-slate-700"
>
<div className="mb-1 font-medium">
#{n.id} {n.data?.title ? ` ${n.data.title}` : ""}
</div>
<div className="text-slate-500 dark:text-slate-400">
hardcodedValues
</div>
<pre className="mt-1 max-h-40 overflow-auto rounded bg-slate-50 p-2 dark:bg-slate-800">
{JSON.stringify(n.data?.hardcodedValues ?? {}, null, 2)}
</pre>
</div>
))}
</div>
<h3 className="mb-2 text-sm font-semibold text-slate-700 dark:text-slate-200">
Links ({backendLinks.length})
</h3>
<div className="mb-6 space-y-3">
{backendLinks.map((l) => (
<div
key={l.id}
className="rounded border p-2 text-xs dark:border-slate-700"
>
<div className="font-medium">
{l.source_id}[{l.source_name}] {l.sink_id}[{l.sink_name}]
</div>
<div className="mt-1 text-slate-500 dark:text-slate-400">
edge.id: {l.id}
</div>
</div>
))}
</div>
<h4 className="mb-2 text-xs font-semibold text-slate-600 dark:text-slate-300">
Backend Links JSON
</h4>
<pre className="max-h-64 overflow-auto rounded bg-slate-50 p-2 text-[11px] dark:bg-slate-800">
{JSON.stringify(backendLinks, null, 2)}
</pre>
</div>
</div>
);
};

View File

@@ -0,0 +1,443 @@
import React, { useCallback, useMemo, useState, useDeferredValue } from "react";
import { Card, CardContent, CardHeader } from "@/components/__legacy__/ui/card";
import { Label } from "@/components/__legacy__/ui/label";
import { Button } from "@/components/__legacy__/ui/button";
import { Input } from "@/components/__legacy__/ui/input";
import { TextRenderer } from "@/components/__legacy__/ui/render";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { beautifyString } from "@/lib/utils";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import {
Block,
BlockIORootSchema,
BlockUIType,
GraphInputSchema,
GraphOutputSchema,
SpecialBlockID,
} from "@/lib/autogpt-server-api";
import { MagnifyingGlassIcon, PlusIcon } from "@radix-ui/react-icons";
import { IconToyBrick } from "@/components/__legacy__/ui/icons";
import { getPrimaryCategoryColor } from "@/lib/utils";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { GraphMeta } from "@/lib/autogpt-server-api";
import jaro from "jaro-winkler";
import { getV1GetSpecificGraph } from "@/app/api/__generated__/endpoints/graphs/graphs";
import { okData } from "@/app/api/helpers";
type _Block = Omit<Block, "inputSchema" | "outputSchema"> & {
uiKey?: string;
inputSchema: BlockIORootSchema | GraphInputSchema;
outputSchema: BlockIORootSchema | GraphOutputSchema;
hardcodedValues?: Record<string, any>;
_cached?: {
blockName: string;
beautifiedName: string;
description: string;
};
};
// Hook to preprocess blocks with cached expensive operations
const useSearchableBlocks = (blocks: _Block[]): _Block[] => {
return useMemo(
() =>
blocks.map((block) => {
if (!block._cached) {
block._cached = {
blockName: block.name.toLowerCase(),
beautifiedName: beautifyString(block.name).toLowerCase(),
description: block.description.toLowerCase(),
};
}
return block;
}),
[blocks],
);
};
interface BlocksControlProps {
blocks: _Block[];
addBlock: (
id: string,
name: string,
hardcodedValues: Record<string, any>,
) => void;
pinBlocksPopover: boolean;
flows: GraphMeta[];
nodes: CustomNode[];
}
/**
* A React functional component that displays a control for managing blocks.
*
* @component
* @param {Object} BlocksControlProps - The properties for the BlocksControl component.
* @param {Block[]} BlocksControlProps.blocks - An array of blocks to be displayed and filtered.
* @param {(id: string, name: string) => void} BlocksControlProps.addBlock - A function to call when a block is added.
* @returns The rendered BlocksControl component.
*/
export function BlocksControl({
blocks: _blocks,
addBlock,
pinBlocksPopover,
flows,
nodes,
}: BlocksControlProps) {
const [searchQuery, setSearchQuery] = useState("");
const deferredSearchQuery = useDeferredValue(searchQuery);
const [selectedCategory, setSelectedCategory] = useState<string | null>(null);
const blocks = useSearchableBlocks(_blocks);
const graphHasWebhookNodes = nodes.some((n) =>
[BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(n.data.uiType),
);
const graphHasInputNodes = nodes.some(
(n) => n.data.uiType == BlockUIType.INPUT,
);
const filteredAvailableBlocks = useMemo(() => {
const blockList = blocks
.filter((b) => b.uiType !== BlockUIType.AGENT)
.sort((a, b) => a.name.localeCompare(b.name));
// Agent blocks are created from GraphMeta which doesn't include schemas.
// Schemas will be fetched on-demand when the block is actually added.
const agentBlockList = flows
.map((flow): _Block => {
return {
id: SpecialBlockID.AGENT,
name: flow.name,
description:
`Ver.${flow.version}` +
(flow.description ? ` | ${flow.description}` : ""),
categories: [{ category: "AGENT", description: "" }],
// Empty schemas - will be populated when block is added
inputSchema: { type: "object", properties: {} },
outputSchema: { type: "object", properties: {} },
staticOutput: false,
uiType: BlockUIType.AGENT,
costs: [],
uiKey: flow.id,
hardcodedValues: {
graph_id: flow.id,
graph_version: flow.version,
// Schemas will be fetched on-demand when block is added
},
};
})
.map(
(agentBlock): _Block => ({
...agentBlock,
_cached: {
blockName: agentBlock.name.toLowerCase(),
beautifiedName: beautifyString(agentBlock.name).toLowerCase(),
description: agentBlock.description.toLowerCase(),
},
}),
);
return blockList
.concat(agentBlockList)
.map((block) => ({
block,
score: blockScoreForQuery(block, deferredSearchQuery),
}))
.filter(
({ block, score }) =>
score > 0 &&
(!selectedCategory ||
block.categories.some((cat) => cat.category === selectedCategory)),
)
.sort((a, b) => b.score - a.score)
.map(({ block }) => ({
...block,
notAvailable:
(block.uiType == BlockUIType.WEBHOOK &&
graphHasWebhookNodes &&
"Agents can only have one webhook-triggered block") ||
(block.uiType == BlockUIType.WEBHOOK &&
graphHasInputNodes &&
"Webhook-triggered blocks can't be used together with input blocks") ||
(block.uiType == BlockUIType.INPUT &&
graphHasWebhookNodes &&
"Input blocks can't be used together with a webhook-triggered block") ||
null,
}));
}, [
blocks,
flows,
selectedCategory,
deferredSearchQuery,
graphHasInputNodes,
graphHasWebhookNodes,
]);
const resetFilters = useCallback(() => {
setSearchQuery("");
setSelectedCategory(null);
}, []);
// Handler to add a block, fetching graph data on-demand for agent blocks
const handleAddBlock = useCallback(
async (block: _Block & { notAvailable: string | null }) => {
if (block.notAvailable) return;
// For agent blocks, fetch the full graph to get schemas
if (block.uiType === BlockUIType.AGENT && block.hardcodedValues) {
const graphID = block.hardcodedValues.graph_id as string;
const graphVersion = block.hardcodedValues.graph_version as number;
const graphData = okData(
await getV1GetSpecificGraph(graphID, { version: graphVersion }),
);
if (graphData) {
addBlock(block.id, block.name, {
...block.hardcodedValues,
input_schema: graphData.input_schema,
output_schema: graphData.output_schema,
});
} else {
// Fallback: add without schemas (will be incomplete)
console.error("Failed to fetch graph data for agent block");
addBlock(block.id, block.name, block.hardcodedValues || {});
}
} else {
addBlock(block.id, block.name, block.hardcodedValues || {});
}
},
[addBlock],
);
// Extract unique categories from blocks
const categories = useMemo(() => {
return Array.from(
new Set([
null,
...blocks
.flatMap((block) => block.categories.map((cat) => cat.category))
.sort(),
]),
);
}, [blocks]);
return (
<Popover
open={pinBlocksPopover ? true : undefined}
onOpenChange={(open) => open || resetFilters()}
>
<Tooltip delayDuration={500}>
<TooltipTrigger asChild>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
data-id="blocks-control-popover-trigger"
data-testid="blocks-control-blocks-button"
name="Blocks"
className="dark:hover:bg-slate-800"
>
<IconToyBrick />
</Button>
</PopoverTrigger>
</TooltipTrigger>
<TooltipContent side="right">Blocks</TooltipContent>
</Tooltip>
<PopoverContent
side="right"
sideOffset={22}
align="start"
className="absolute -top-3 w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
data-id="blocks-control-popover-content"
>
<Card className="p-3 pb-0 dark:bg-slate-900">
<CardHeader className="flex flex-col gap-x-8 gap-y-1 p-3 px-2">
<div className="items-center justify-between">
<Label
htmlFor="search-blocks"
className="whitespace-nowrap text-base font-bold text-black dark:text-white 2xl:text-xl"
data-id="blocks-control-label"
data-testid="blocks-control-blocks-label"
>
Blocks
</Label>
</div>
<div className="relative flex items-center">
<MagnifyingGlassIcon className="absolute m-2 h-5 w-5 text-gray-500 dark:text-gray-400" />
<Input
id="search-blocks"
type="text"
placeholder="Search blocks"
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="rounded-lg px-8 py-5 dark:bg-slate-800 dark:text-white"
data-id="blocks-control-search-input"
autoComplete="off"
/>
</div>
<div
className="mt-2 flex flex-wrap gap-2"
data-testid="blocks-categories-list"
>
{categories.map((category) => {
const color = getPrimaryCategoryColor([
{ category: category || "All", description: "" },
]);
const colorClass =
selectedCategory === category ? `${color}` : "";
return (
<div
key={category}
data-testid="blocks-category"
role="button"
className={`cursor-pointer rounded-xl border px-2 py-2 text-xs font-medium dark:border-slate-700 dark:text-white ${colorClass}`}
onClick={() =>
setSelectedCategory(
selectedCategory === category ? null : category,
)
}
>
{beautifyString((category || "All").toLowerCase())}
</div>
);
})}
</div>
</CardHeader>
<CardContent className="overflow-scroll border-t border-t-gray-200 p-0 dark:border-t-slate-700">
<ScrollArea
className="h-[60vh] w-full"
data-id="blocks-control-scroll-area"
>
{filteredAvailableBlocks.map((block) => (
<Card
key={block.uiKey || block.id}
className={`m-2 my-4 flex h-20 shadow-none dark:border-slate-700 dark:bg-slate-800 dark:text-slate-100 dark:hover:bg-slate-700 ${
block.notAvailable
? "cursor-not-allowed opacity-50"
: "cursor-move hover:shadow-lg"
}`}
data-id={`block-card-${block.id}`}
draggable={!block.notAvailable}
onDragStart={(e) => {
if (block.notAvailable) return;
e.dataTransfer.effectAllowed = "copy";
e.dataTransfer.setData(
"application/reactflow",
JSON.stringify({
blockId: block.id,
blockName: block.name,
hardcodedValues: block?.hardcodedValues || {},
}),
);
}}
onClick={() => handleAddBlock(block)}
title={block.notAvailable ?? undefined}
>
<div
className={`-ml-px h-full w-3 rounded-l-xl ${getPrimaryCategoryColor(block.categories)}`}
></div>
<div className="mx-3 flex flex-1 items-center justify-between">
<div className="mr-2 min-w-0">
<span
className="block truncate pb-1 text-sm font-semibold dark:text-white"
data-id={`block-name-${block.id}`}
data-type={block.uiType}
data-testid={`block-name-${block.id}`}
>
<TextRenderer
value={beautifyString(block.name).replace(
/ Block$/,
"",
)}
truncateLengthLimit={45}
/>
</span>
<span
className="block break-all text-xs font-normal text-gray-500 dark:text-gray-400"
data-testid={`block-description-${block.id}`}
>
<TextRenderer
value={block.description}
truncateLengthLimit={165}
/>
</span>
</div>
<div
className="flex flex-shrink-0 items-center gap-1"
data-id={`block-tooltip-${block.id}`}
data-testid={`block-add`}
>
<PlusIcon className="h-6 w-6 rounded-lg bg-gray-200 stroke-black stroke-[0.5px] p-1 dark:bg-gray-700 dark:stroke-white" />
</div>
</div>
</Card>
))}
</ScrollArea>
</CardContent>
</Card>
</PopoverContent>
</Popover>
);
}
/**
* Evaluates how well a block matches the search query and returns a relevance score.
* The scoring algorithm works as follows:
* - Returns 1 if no query (all blocks match equally)
* - Normalized query for case-insensitive matching
* - Returns 3 for exact substring matches in block name (highest priority)
* - Returns 2 when all query words appear in the block name (regardless of order)
* - Returns 1.X for blocks with names similar to query using Jaro-Winkler distance (X is similarity score)
* - Returns 0.5 when all query words appear in the block description (lowest priority)
* - Returns 0 for no match
*
* Higher scores will appear first in search results.
*/
function blockScoreForQuery(block: _Block, query: string): number {
if (!query) return 1;
const normalizedQuery = query.toLowerCase().trim();
const queryWords = normalizedQuery.split(/\s+/);
// Use cached values for performance
const { blockName, beautifiedName, description } = block._cached!;
// 1. Exact match in name (highest priority)
if (
blockName.includes(normalizedQuery) ||
beautifiedName.includes(normalizedQuery)
) {
return 3;
}
// 2. All query words in name (regardless of order)
const allWordsInName = queryWords.every(
(word) => blockName.includes(word) || beautifiedName.includes(word),
);
if (allWordsInName) return 2;
// 3. Similarity with name (Jaro-Winkler)
const similarityThreshold = 0.65;
const nameSimilarity = jaro(blockName, normalizedQuery);
const beautifiedSimilarity = jaro(beautifiedName, normalizedQuery);
const maxSimilarity = Math.max(nameSimilarity, beautifiedSimilarity);
if (maxSimilarity > similarityThreshold) {
return 1 + maxSimilarity; // Score between 1 and 2
}
// 4. All query words in description (lower priority)
const allWordsInDescription = queryWords.every((word) =>
description.includes(word),
);
if (allWordsInDescription) return 0.5;
return 0;
}

View File

@@ -0,0 +1,119 @@
import React from "react";
import { cn } from "@/lib/utils";
import { Button } from "@/components/__legacy__/ui/button";
import { LogOut } from "lucide-react";
import { ClockIcon, WarningIcon } from "@phosphor-icons/react";
import { IconPlay, IconSquare } from "@/components/__legacy__/ui/icons";
interface Props {
onClickAgentOutputs?: () => void;
onClickRunAgent?: () => void;
onClickStopRun: () => void;
onClickScheduleButton?: () => void;
isRunning: boolean;
isDisabled: boolean;
className?: string;
resolutionModeActive?: boolean;
}
export const BuildActionBar: React.FC<Props> = ({
onClickAgentOutputs,
onClickRunAgent,
onClickStopRun,
onClickScheduleButton,
isRunning,
isDisabled,
className,
resolutionModeActive = false,
}) => {
const buttonClasses =
"flex items-center gap-2 text-sm font-medium md:text-lg";
// Show resolution mode message instead of action buttons
if (resolutionModeActive) {
return (
<div
className={cn(
"flex w-fit select-none items-center justify-center p-4",
className,
)}
>
<div className="flex items-center gap-3 rounded-lg border border-amber-300 bg-amber-50 px-4 py-3 dark:border-amber-700 dark:bg-amber-900/30">
<WarningIcon className="size-5 text-amber-600 dark:text-amber-400" />
<span className="text-sm font-medium text-amber-800 dark:text-amber-200">
Remove incompatible connections to continue
</span>
</div>
</div>
);
}
return (
<div
className={cn(
"flex w-fit select-none items-center justify-center p-4",
className,
)}
>
<div className="flex gap-1 md:gap-4">
{onClickAgentOutputs && (
<Button
className={buttonClasses}
variant="outline"
size="primary"
onClick={onClickAgentOutputs}
title="View agent outputs"
>
<LogOut className="hidden size-5 md:flex" /> Agent Outputs
</Button>
)}
{!isRunning ? (
<Button
className={cn(
buttonClasses,
onClickRunAgent && isDisabled
? "cursor-default opacity-50 hover:bg-accent"
: "",
)}
variant="accent"
size="primary"
onClick={onClickRunAgent}
disabled={!onClickRunAgent}
title="Run the agent"
aria-label="Run the agent"
data-testid="primary-action-run-agent"
data-tutorial-id="primary-action-run-agent"
>
<IconPlay /> Run
</Button>
) : (
<Button
className={buttonClasses}
variant="destructive"
size="primary"
onClick={onClickStopRun}
title="Stop the agent"
data-id="primary-action-stop-agent"
>
<IconSquare /> Stop
</Button>
)}
{onClickScheduleButton && (
<Button
className={buttonClasses}
variant="outline"
size="primary"
onClick={onClickScheduleButton}
title="Set up a run schedule for the agent"
data-id="primary-action-schedule-agent"
>
<ClockIcon className="hidden h-5 w-5 md:flex" />
Schedule Run
</Button>
)}
</div>
</div>
);
};

View File

@@ -0,0 +1,33 @@
import {
BaseEdge,
ConnectionLineComponentProps,
Node,
getBezierPath,
Position,
} from "@xyflow/react";
export default function ConnectionLine<NodeType extends Node>({
fromPosition,
fromHandle,
fromX,
fromY,
toPosition,
toX,
toY,
}: ConnectionLineComponentProps<NodeType>) {
const sourceX =
fromPosition === Position.Right
? fromX + ((fromHandle?.width ?? 0) / 2 - 5)
: fromX - ((fromHandle?.width ?? 0) / 2 - 5);
const [path] = getBezierPath({
sourceX: sourceX,
sourceY: fromY,
sourcePosition: fromPosition,
targetX: toX,
targetY: toY,
targetPosition: toPosition,
});
return <BaseEdge path={path} style={{ strokeWidth: 2, stroke: "#555" }} />;
}

View File

@@ -0,0 +1,86 @@
import { Card, CardContent } from "@/components/__legacy__/ui/card";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { Button } from "@/components/__legacy__/ui/button";
import { Separator } from "@/components/__legacy__/ui/separator";
import { cn } from "@/lib/utils";
import React from "react";
/**
* Represents a control element for the ControlPanel Component.
* @type {Object} Control
* @property {React.ReactNode} icon - The icon of the control from lucide-react https://lucide.dev/icons/
* @property {string} label - The label of the control, to be leveraged by ToolTip.
* @property {onclick} onClick - The function to be executed when the control is clicked.
*/
export type Control = {
icon: React.ReactNode;
label: string;
disabled?: boolean;
onClick: () => void;
};
interface ControlPanelProps {
controls: Control[];
topChildren?: React.ReactNode;
botChildren?: React.ReactNode;
className?: string;
}
/**
* ControlPanel component displays a panel with controls as icons.tsx with the ability to take in children.
* @param {Object} ControlPanelProps - The properties of the control panel component.
* @param {Array} ControlPanelProps.controls - An array of control objects representing actions to be preformed.
* @param {Array} ControlPanelProps.children - The child components of the control panel.
* @param {string} ControlPanelProps.className - Additional CSS class names for the control panel.
* @returns The rendered control panel component.
*/
export const ControlPanel = ({
controls,
topChildren,
botChildren,
className,
}: ControlPanelProps) => {
return (
<Card className={cn("m-4 mt-24 w-14 dark:bg-slate-900", className)}>
<CardContent className="p-0">
<div className="flex flex-col items-center gap-3 rounded-xl py-3">
{topChildren}
<Separator className="dark:bg-slate-700" />
{controls.map((control, index) => (
<Tooltip key={index} delayDuration={500}>
<TooltipTrigger asChild>
<div>
<Button
variant="ghost"
size="icon"
onClick={() => control.onClick()}
data-id={`control-button-${index}`}
data-testid={`blocks-control-${control.label.toLowerCase()}-button`}
disabled={control.disabled || false}
className="dark:bg-slate-900 dark:text-slate-100 dark:hover:bg-slate-800"
>
{control.icon}
<span className="sr-only">{control.label}</span>
</Button>
</div>
</TooltipTrigger>
<TooltipContent
side="right"
className="dark:bg-slate-800 dark:text-slate-100"
>
{control.label}
</TooltipContent>
</Tooltip>
))}
<Separator className="dark:bg-slate-700" />
{botChildren}
</div>
</CardContent>
</Card>
);
};
export default ControlPanel;

View File

@@ -0,0 +1,240 @@
import React, {
useCallback,
useContext,
useEffect,
useState,
useRef,
} from "react";
import {
BaseEdge,
EdgeLabelRenderer,
EdgeProps,
useReactFlow,
XYPosition,
Edge,
Node,
} from "@xyflow/react";
import "./customedge.css";
import { X } from "lucide-react";
import { BuilderContext } from "../Flow/Flow";
import { NodeExecutionResult } from "@/lib/autogpt-server-api";
import { useCustomEdge } from "./useCustomEdge";
export type CustomEdgeData = {
edgeColor: string;
sourcePos?: XYPosition;
isStatic?: boolean;
beadUp: number;
beadDown: number;
beadData?: Map<string, NodeExecutionResult["status"]>;
};
type Bead = {
t: number;
targetT: number;
startTime: number;
};
export type CustomEdge = Edge<CustomEdgeData, "custom">;
export function CustomEdge({
id,
data,
selected,
sourceX,
sourceY,
targetX,
targetY,
markerEnd,
}: EdgeProps<CustomEdge>) {
const [beads, setBeads] = useState<{
beads: Bead[];
created: number;
destroyed: number;
}>({ beads: [], created: 0, destroyed: 0 });
const beadsRef = useRef(beads);
const { svgPath, length, getPointForT, getTForDistance } = useCustomEdge(
sourceX - 5,
sourceY - 5,
targetX + 3,
targetY - 5,
);
const { deleteElements } = useReactFlow<Node, CustomEdge>();
const builderContext = useContext(BuilderContext);
const { visualizeBeads } = builderContext ?? {
visualizeBeads: "no",
};
// Check if this edge is broken (during resolution mode)
const isBroken =
builderContext?.resolutionMode?.active &&
builderContext?.resolutionMode?.brokenEdgeIds?.includes(id);
const onEdgeRemoveClick = () => {
deleteElements({ edges: [{ id }] });
};
const animationDuration = 500; // Duration in milliseconds for bead to travel the curve
const beadDiameter = 12;
const deltaTime = 16;
const setTargetPositions = useCallback(
(beads: Bead[]) => {
const distanceBetween = Math.min(
(length - beadDiameter) / (beads.length + 1),
beadDiameter,
);
return beads.map((bead, index) => {
const distanceFromEnd = beadDiameter * 1.35;
const targetPosition = distanceBetween * index + distanceFromEnd;
const t = getTForDistance(-targetPosition);
return {
...bead,
t: visualizeBeads === "animate" ? bead.t : t,
targetT: t,
} as Bead;
});
},
[getTForDistance, length, visualizeBeads],
);
beadsRef.current = beads;
useEffect(() => {
const beadUp: number = data?.beadUp ?? 0;
const beadDown: number = data?.beadDown ?? 0;
if (
beadUp === 0 &&
beadDown === 0 &&
(beads.created > 0 || beads.destroyed > 0)
) {
setBeads({ beads: [], created: 0, destroyed: 0 });
return;
}
// Add beads
if (beadUp > beads.created) {
setBeads(({ beads, created, destroyed }) => {
const newBeads = [];
for (let i = 0; i < beadUp - created; i++) {
newBeads.push({ t: 0, targetT: 0, startTime: Date.now() });
}
const b = setTargetPositions([...beads, ...newBeads]);
return { beads: b, created: beadUp, destroyed };
});
}
// Animate and remove beads
const interval = setInterval(
({ current: beads }) => {
// If there are no beads visible or moving, stop re-rendering
if (
(beadUp === beads.created && beads.created === beads.destroyed) ||
beads.beads.every((bead) => bead.t >= bead.targetT)
) {
clearInterval(interval);
return;
}
setBeads(({ beads, created, destroyed }) => {
let destroyedCount = 0;
const newBeads = beads
.map((bead) => {
const progressIncrement = deltaTime / animationDuration;
const t = Math.min(
bead.t + bead.targetT * progressIncrement,
bead.targetT,
);
return { ...bead, t };
})
.filter((bead, index) => {
const removeCount = beadDown - destroyed;
if (bead.t >= bead.targetT && index < removeCount) {
destroyedCount++;
return false;
}
return true;
});
return {
beads: setTargetPositions(newBeads),
created,
destroyed: destroyed + destroyedCount,
};
});
},
deltaTime,
beadsRef,
);
return () => clearInterval(interval);
}, [data?.beadUp, data?.beadDown, setTargetPositions, visualizeBeads]);
const middle = getPointForT(0.5);
// Determine edge color - red for broken edges
const baseColor = data?.edgeColor ?? "#555555";
const edgeColor = isBroken ? "#ef4444" : baseColor;
// Add opacity to hex color (99 = 60% opacity, 80 = 50% opacity)
const strokeColor = isBroken
? `${edgeColor}99`
: selected
? edgeColor
: `${edgeColor}80`;
return (
<>
<BaseEdge
path={svgPath}
markerEnd={markerEnd}
style={{
stroke: strokeColor,
strokeWidth: data?.isStatic ? 2.5 : 2,
strokeDasharray: data?.isStatic ? "5 3" : undefined,
}}
className="data-sentry-unmask transition-all duration-200"
/>
<path
d={svgPath}
fill="none"
strokeOpacity={0}
strokeWidth={20}
className="data-sentry-unmask react-flow__edge-interaction"
/>
<EdgeLabelRenderer>
<div
style={{
position: "absolute",
transform: `translate(-50%, -50%) translate(${middle.x}px,${middle.y}px)`,
pointerEvents: "all",
}}
className="edge-label-renderer"
>
<button
className="edge-label-button opacity-0 transition-opacity duration-200 hover:opacity-100"
onClick={onEdgeRemoveClick}
>
<X className="size-4" />
</button>
</div>
</EdgeLabelRenderer>
{beads.beads.map((bead, index) => {
const pos = getPointForT(bead.t);
return (
<circle
key={index}
cx={pos.x}
cy={pos.y}
r={beadDiameter / 2} // Bead radius
fill={data?.edgeColor ?? "#555555"}
/>
);
})}
</>
);
}

View File

@@ -0,0 +1,48 @@
.edge-label-renderer {
position: absolute;
pointer-events: all;
}
.edge-label-button {
width: 20px;
height: 20px;
background: #eee;
border: 1px solid #fff;
cursor: pointer;
border-radius: 50%;
display: flex;
justify-content: center;
align-items: center;
padding: 0;
color: #555;
opacity: 0;
transition:
opacity 0.2s ease-in-out,
background-color 0.2s ease-in-out;
}
.edge-label-button.visible {
opacity: 1;
}
.edge-label-button:hover {
box-shadow: 0 0 6px 2px rgba(0, 0, 0, 0.08);
background: #f0f0f0;
}
.edge-label-button svg {
width: 14px;
height: 14px;
}
.react-flow__edge-interaction {
cursor: pointer;
}
.react-flow__edges > svg:has(> g.selected) {
z-index: 10 !important;
}
.react-flow__edgelabel-renderer {
z-index: 11 !important;
}

View File

@@ -0,0 +1,157 @@
import { useCallback, useMemo } from "react";
type XYPosition = {
x: number;
y: number;
};
export type BezierPath = {
sourcePosition: XYPosition;
control1: XYPosition;
control2: XYPosition;
targetPosition: XYPosition;
};
export function useCustomEdge(
sourceX: number,
sourceY: number,
targetX: number,
targetY: number,
) {
const path: BezierPath = useMemo(() => {
const xDifference = Math.abs(sourceX - targetX);
const yDifference = Math.abs(sourceY - targetY);
const xControlDistance =
sourceX < targetX ? 64 : Math.max(xDifference / 2, 64);
const yControlDistance = yDifference < 128 && sourceX > targetX ? -64 : 0;
return {
sourcePosition: { x: sourceX, y: sourceY },
control1: {
x: sourceX + xControlDistance,
y: sourceY + yControlDistance,
},
control2: {
x: targetX - xControlDistance,
y: targetY + yControlDistance,
},
targetPosition: { x: targetX, y: targetY },
};
}, [sourceX, sourceY, targetX, targetY]);
const svgPath = useMemo(
() =>
`M ${path.sourcePosition.x} ${path.sourcePosition.y} ` +
`C ${path.control1.x} ${path.control1.y} ${path.control2.x} ${path.control2.y} ` +
`${path.targetPosition.x}, ${path.targetPosition.y}`,
[path],
);
const getPointForT = useCallback(
(t: number) => {
// Bezier formula: (1-t)^3 * p0 + 3*(1-t)^2*t*p1 + 3*(1-t)*t^2*p2 + t^3*p3
const x =
Math.pow(1 - t, 3) * path.sourcePosition.x +
3 * Math.pow(1 - t, 2) * t * path.control1.x +
3 * (1 - t) * Math.pow(t, 2) * path.control2.x +
Math.pow(t, 3) * path.targetPosition.x;
const y =
Math.pow(1 - t, 3) * path.sourcePosition.y +
3 * Math.pow(1 - t, 2) * t * path.control1.y +
3 * (1 - t) * Math.pow(t, 2) * path.control2.y +
Math.pow(t, 3) * path.targetPosition.y;
return { x, y };
},
[path],
);
const getArcLength = useCallback(
(t: number, samples: number = 100) => {
let length = 0;
let prevPoint = getPointForT(0);
for (let i = 1; i <= samples; i++) {
const currT = (i / samples) * t;
const currPoint = getPointForT(currT);
length += Math.sqrt(
Math.pow(currPoint.x - prevPoint.x, 2) +
Math.pow(currPoint.y - prevPoint.y, 2),
);
prevPoint = currPoint;
}
return length;
},
[getPointForT],
);
const length = useMemo(() => {
return getArcLength(1);
}, [getArcLength]);
const getBezierDerivative = useCallback(
(t: number) => {
const mt = 1 - t;
const x =
3 *
(mt * mt * (path.control1.x - path.sourcePosition.x) +
2 * mt * t * (path.control2.x - path.control1.x) +
t * t * (path.targetPosition.x - path.control2.x));
const y =
3 *
(mt * mt * (path.control1.y - path.sourcePosition.y) +
2 * mt * t * (path.control2.y - path.control1.y) +
t * t * (path.targetPosition.y - path.control2.y));
return { x, y };
},
[path],
);
const getTForDistance = useCallback(
(distance: number, epsilon: number = 0.0001) => {
if (distance < 0) {
distance = length + distance; // If distance is negative, calculate from the end of the curve
}
let t = distance / getArcLength(1);
let prevT = 0;
while (Math.abs(t - prevT) > epsilon) {
prevT = t;
const length = getArcLength(t);
const derivative = Math.sqrt(
Math.pow(getBezierDerivative(t).x, 2) +
Math.pow(getBezierDerivative(t).y, 2),
);
t -= (length - distance) / derivative;
t = Math.max(0, Math.min(1, t)); // Clamp t between 0 and 1
}
return t;
},
[getArcLength, getBezierDerivative, length],
);
const getPointAtDistance = useCallback(
(distance: number) => {
if (distance < 0) {
distance = length + distance; // If distance is negative, calculate from the end of the curve
}
const t = getTForDistance(distance);
return getPointForT(t);
},
[getTForDistance, getPointForT, length],
);
return {
path,
svgPath,
length,
getPointForT,
getTForDistance,
getPointAtDistance,
};
}

View File

@@ -0,0 +1,244 @@
import React from "react";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/__legacy__/ui/dialog";
import { Button } from "@/components/__legacy__/ui/button";
import { AlertTriangle, XCircle, PlusCircle } from "lucide-react";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { beautifyString } from "@/lib/utils";
import { Alert, AlertDescription } from "@/components/molecules/Alert/Alert";
interface IncompatibilityDialogProps {
isOpen: boolean;
onClose: () => void;
onConfirm: () => void;
currentVersion: number;
latestVersion: number;
agentName: string;
incompatibilities: IncompatibilityInfo;
}
export const IncompatibilityDialog: React.FC<IncompatibilityDialogProps> = ({
isOpen,
onClose,
onConfirm,
currentVersion,
latestVersion,
agentName,
incompatibilities,
}) => {
const hasMissingInputs = incompatibilities.missingInputs.length > 0;
const hasMissingOutputs = incompatibilities.missingOutputs.length > 0;
const hasNewInputs = incompatibilities.newInputs.length > 0;
const hasNewOutputs = incompatibilities.newOutputs.length > 0;
const hasNewRequired = incompatibilities.newRequiredInputs.length > 0;
const hasTypeMismatches = incompatibilities.inputTypeMismatches.length > 0;
const hasInputChanges = hasMissingInputs || hasNewInputs;
const hasOutputChanges = hasMissingOutputs || hasNewOutputs;
return (
<Dialog open={isOpen} onOpenChange={(open) => !open && onClose()}>
<DialogContent className="max-w-lg">
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
<AlertTriangle className="h-5 w-5 text-amber-500" />
Incompatible Update
</DialogTitle>
<DialogDescription>
Updating <strong>{beautifyString(agentName)}</strong> from v
{currentVersion} to v{latestVersion} will break some connections.
</DialogDescription>
</DialogHeader>
<div className="space-y-4 py-2">
{/* Input changes - two column layout */}
{hasInputChanges && (
<TwoColumnSection
title="Input Changes"
leftIcon={<XCircle className="h-4 w-4 text-red-500" />}
leftTitle="Removed"
leftItems={incompatibilities.missingInputs}
rightIcon={<PlusCircle className="h-4 w-4 text-green-500" />}
rightTitle="Added"
rightItems={incompatibilities.newInputs}
/>
)}
{/* Output changes - two column layout */}
{hasOutputChanges && (
<TwoColumnSection
title="Output Changes"
leftIcon={<XCircle className="h-4 w-4 text-red-500" />}
leftTitle="Removed"
leftItems={incompatibilities.missingOutputs}
rightIcon={<PlusCircle className="h-4 w-4 text-green-500" />}
rightTitle="Added"
rightItems={incompatibilities.newOutputs}
/>
)}
{hasTypeMismatches && (
<SingleColumnSection
icon={<XCircle className="h-4 w-4 text-red-500" />}
title="Type Changed"
description="These connected inputs have a different type:"
items={incompatibilities.inputTypeMismatches.map(
(m) => `${m.name} (${m.oldType}${m.newType})`,
)}
/>
)}
{hasNewRequired && (
<SingleColumnSection
icon={<PlusCircle className="h-4 w-4 text-amber-500" />}
title="New Required Inputs"
description="These inputs are now required:"
items={incompatibilities.newRequiredInputs}
/>
)}
</div>
<Alert variant="warning">
<AlertDescription>
If you proceed, you&apos;ll need to remove the broken connections
before you can save or run your agent.
</AlertDescription>
</Alert>
<DialogFooter className="gap-2 sm:gap-0">
<Button variant="outline" onClick={onClose}>
Cancel
</Button>
<Button
variant="destructive"
onClick={onConfirm}
className="bg-amber-600 hover:bg-amber-700"
>
Update Anyway
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
);
};
interface TwoColumnSectionProps {
title: string;
leftIcon: React.ReactNode;
leftTitle: string;
leftItems: string[];
rightIcon: React.ReactNode;
rightTitle: string;
rightItems: string[];
}
const TwoColumnSection: React.FC<TwoColumnSectionProps> = ({
title,
leftIcon,
leftTitle,
leftItems,
rightIcon,
rightTitle,
rightItems,
}) => (
<div className="rounded-md border border-gray-200 p-3 dark:border-gray-700">
<span className="font-medium">{title}</span>
<div className="mt-2 grid grid-cols-2 items-start gap-4">
{/* Left column - Breaking changes */}
<div className="min-w-0">
<div className="flex items-center gap-1.5 text-sm text-gray-500 dark:text-gray-400">
{leftIcon}
<span>{leftTitle}</span>
</div>
<ul className="mt-1.5 space-y-1">
{leftItems.length > 0 ? (
leftItems.map((item) => (
<li
key={item}
className="text-sm text-gray-700 dark:text-gray-300"
>
<code className="rounded bg-red-50 px-1 py-0.5 font-mono text-xs text-red-700 dark:bg-red-900/30 dark:text-red-300">
{item}
</code>
</li>
))
) : (
<li className="text-sm italic text-gray-400 dark:text-gray-500">
None
</li>
)}
</ul>
</div>
{/* Right column - Possible solutions */}
<div className="min-w-0">
<div className="flex items-center gap-1.5 text-sm text-gray-500 dark:text-gray-400">
{rightIcon}
<span>{rightTitle}</span>
</div>
<ul className="mt-1.5 space-y-1">
{rightItems.length > 0 ? (
rightItems.map((item) => (
<li
key={item}
className="text-sm text-gray-700 dark:text-gray-300"
>
<code className="rounded bg-green-50 px-1 py-0.5 font-mono text-xs text-green-700 dark:bg-green-900/30 dark:text-green-300">
{item}
</code>
</li>
))
) : (
<li className="text-sm italic text-gray-400 dark:text-gray-500">
None
</li>
)}
</ul>
</div>
</div>
</div>
);
interface SingleColumnSectionProps {
icon: React.ReactNode;
title: string;
description: string;
items: string[];
}
const SingleColumnSection: React.FC<SingleColumnSectionProps> = ({
icon,
title,
description,
items,
}) => (
<div className="rounded-md border border-gray-200 p-3 dark:border-gray-700">
<div className="flex items-center gap-2">
{icon}
<span className="font-medium">{title}</span>
</div>
<p className="mt-1 text-sm text-gray-500 dark:text-gray-400">
{description}
</p>
<ul className="mt-2 space-y-1">
{items.map((item) => (
<li
key={item}
className="ml-4 list-disc text-sm text-gray-700 dark:text-gray-300"
>
<code className="rounded bg-gray-100 px-1 py-0.5 font-mono text-xs dark:bg-gray-800">
{item}
</code>
</li>
))}
</ul>
</div>
);
export default IncompatibilityDialog;

View File

@@ -0,0 +1,130 @@
import React from "react";
import { Button } from "@/components/__legacy__/ui/button";
import { ArrowUp, AlertTriangle, Info } from "lucide-react";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { IncompatibilityInfo } from "../../../hooks/useSubAgentUpdate/types";
import { cn } from "@/lib/utils";
interface SubAgentUpdateBarProps {
currentVersion: number;
latestVersion: number;
isCompatible: boolean;
incompatibilities: IncompatibilityInfo | null;
onUpdate: () => void;
isInResolutionMode?: boolean;
}
export const SubAgentUpdateBar: React.FC<SubAgentUpdateBarProps> = ({
currentVersion,
latestVersion,
isCompatible,
incompatibilities,
onUpdate,
isInResolutionMode = false,
}) => {
if (isInResolutionMode) {
return <ResolutionModeBar incompatibilities={incompatibilities} />;
}
return (
<div className="flex items-center justify-between gap-2 rounded-t-lg bg-blue-50 px-3 py-2 dark:bg-blue-900/30">
<div className="flex items-center gap-2">
<ArrowUp className="h-4 w-4 text-blue-600 dark:text-blue-400" />
<span className="text-sm text-blue-700 dark:text-blue-300">
Update available (v{currentVersion} v{latestVersion})
</span>
{!isCompatible && (
<Tooltip>
<TooltipTrigger asChild>
<AlertTriangle className="h-4 w-4 text-amber-500" />
</TooltipTrigger>
<TooltipContent className="max-w-xs">
<p className="font-medium">Incompatible changes detected</p>
<p className="text-xs text-gray-400">
Click Update to see details
</p>
</TooltipContent>
</Tooltip>
)}
</div>
<Button
size="sm"
variant={isCompatible ? "default" : "outline"}
onClick={onUpdate}
className={cn(
"h-7 text-xs",
!isCompatible && "border-amber-500 text-amber-600 hover:bg-amber-50",
)}
>
Update
</Button>
</div>
);
};
interface ResolutionModeBarProps {
incompatibilities: IncompatibilityInfo | null;
}
const ResolutionModeBar: React.FC<ResolutionModeBarProps> = ({
incompatibilities,
}) => {
const formatIncompatibilities = () => {
if (!incompatibilities) return "No incompatibilities";
const items: string[] = [];
if (incompatibilities.missingInputs.length > 0) {
items.push(
`Missing inputs: ${incompatibilities.missingInputs.join(", ")}`,
);
}
if (incompatibilities.missingOutputs.length > 0) {
items.push(
`Missing outputs: ${incompatibilities.missingOutputs.join(", ")}`,
);
}
if (incompatibilities.newRequiredInputs.length > 0) {
items.push(
`New required inputs: ${incompatibilities.newRequiredInputs.join(", ")}`,
);
}
if (incompatibilities.inputTypeMismatches.length > 0) {
const mismatches = incompatibilities.inputTypeMismatches
.map((m) => `${m.name} (${m.oldType}${m.newType})`)
.join(", ");
items.push(`Type changed: ${mismatches}`);
}
return items.join("\n");
};
return (
<div className="flex items-center justify-between gap-2 rounded-t-lg bg-amber-50 px-3 py-2 dark:bg-amber-900/30">
<div className="flex items-center gap-2">
<AlertTriangle className="h-4 w-4 text-amber-600 dark:text-amber-400" />
<span className="text-sm text-amber-700 dark:text-amber-300">
Remove incompatible connections
</span>
<Tooltip>
<TooltipTrigger asChild>
<Info className="h-4 w-4 cursor-help text-amber-500" />
</TooltipTrigger>
<TooltipContent className="max-w-sm whitespace-pre-line">
<p className="font-medium">Incompatible changes:</p>
<p className="mt-1 text-xs">{formatIncompatibilities()}</p>
<p className="mt-2 text-xs text-gray-400">
Delete the red connections to continue
</p>
</TooltipContent>
</Tooltip>
</div>
</div>
);
};
export default SubAgentUpdateBar;

View File

@@ -0,0 +1,131 @@
.custom-node {
color: #000000;
box-sizing: border-box;
transition: border-color 0.3s ease-in-out;
}
.custom-node .custom-switch {
padding: 0.5rem 1.25rem;
display: flex;
align-items: center;
justify-content: space-between;
}
.error-message {
color: #d9534f;
font-size: 13px;
padding-left: 0.5rem;
}
/* Existing styles */
.handle-container {
display: flex;
position: relative;
margin-bottom: 0px;
padding: 5px;
min-height: 44px;
height: 100%;
}
.react-flow__handle {
background: transparent;
width: auto;
height: auto;
border: 0;
position: relative;
transform: none;
}
.border-error {
border: 1px solid #d9534f;
}
.select-input {
width: 100%;
padding: 5px;
border-radius: 4px;
border: 1px solid #000;
background: #fff;
color: #000;
}
.radio-label {
display: block;
margin: 5px 0;
color: #000;
}
.number-input {
width: 100%;
padding: 5px;
border-radius: 4px;
background: #fff;
color: #000;
}
.array-item-container {
display: flex;
align-items: center;
margin-bottom: 5px;
}
.array-item-input {
flex-grow: 1;
padding: 5px;
border-radius: 4px;
border: 1px solid #000;
background: #fff;
color: #000;
}
.array-item-remove {
background: #d9534f;
border: none;
color: white;
cursor: pointer;
margin-left: 5px;
border-radius: 4px;
padding: 5px 10px;
}
.array-item-add {
background: #5bc0de;
border: none;
color: white;
cursor: pointer;
border-radius: 4px;
padding: 5px 10px;
margin-top: 5px;
}
.error-message {
color: #d9534f;
font-size: 13px;
margin-top: 5px;
margin-left: 5px;
}
/* Styles for node states */
.completed {
border-color: #27ae60; /* Green border for completed nodes */
}
.running {
border-color: #f39c12; /* Orange border for running nodes */
}
.failed {
border-color: #c0392b; /* Red border for failed nodes */
}
.incomplete {
border-color: #9f14ab; /* Pink border for incomplete nodes */
}
.queued {
border-color: #25e6e6; /* Cyan border for queued nodes */
}
.custom-switch {
padding-left: 2px;
}

View File

@@ -0,0 +1,166 @@
import { beautifyString } from "@/lib/utils";
import { Clipboard, Maximize2 } from "lucide-react";
import React, { useMemo, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
import {
Table,
TableBody,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "../../../../../components/__legacy__/ui/table";
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
import ExpandableOutputDialog from "./ExpandableOutputDialog";
type DataTableProps = {
title?: string;
truncateLongData?: boolean;
data: { [key: string]: Array<any> };
};
export default function DataTable({
title,
truncateLongData,
data,
}: DataTableProps) {
const { toast } = useToast();
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
const [expandedDialog, setExpandedDialog] = useState<{
isOpen: boolean;
execId: string;
pinName: string;
data: any[];
} | null>(null);
// Prepare renderers for each item when enhanced mode is enabled
const getItemRenderer = useMemo(() => {
if (!enableEnhancedOutputHandling) return null;
return (item: unknown) => {
const metadata: OutputMetadata = {};
return globalRegistry.getRenderer(item, metadata);
};
}, [enableEnhancedOutputHandling]);
const copyData = (pin: string, data: string) => {
navigator.clipboard.writeText(data).then(() => {
toast({
title: `"${pin}" data copied to clipboard!`,
duration: 2000,
});
});
};
const openExpandedView = (pinName: string, pinData: any[]) => {
setExpandedDialog({
isOpen: true,
execId: title || "Unknown Execution",
pinName,
data: pinData,
});
};
const closeExpandedView = () => {
setExpandedDialog(null);
};
return (
<>
{title && <strong className="mt-2 flex justify-center">{title}</strong>}
<Table className="cursor-default select-text">
<TableHeader>
<TableRow>
<TableHead>Pin</TableHead>
<TableHead>Data</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{Object.entries(data).map(([key, value]) => (
<TableRow className="group" key={key}>
<TableCell className="cursor-text">
{beautifyString(key)}
</TableCell>
<TableCell className="cursor-text">
<div className="flex min-h-9 items-center whitespace-pre-wrap">
<div className="absolute right-1 top-auto m-1 hidden gap-1 group-hover:flex">
<Button
variant="outline"
size="icon"
onClick={() => openExpandedView(key, value)}
title="Expand Full View"
>
<Maximize2 size={18} />
</Button>
<Button
variant="outline"
size="icon"
onClick={() =>
copyData(
beautifyString(key),
value
.map((i) =>
typeof i === "object"
? JSON.stringify(i, null, 2)
: String(i),
)
.join(", "),
)
}
title="Copy Data"
>
<Clipboard size={18} />
</Button>
</div>
{value.map((item, index) => {
const renderer = getItemRenderer?.(item);
if (enableEnhancedOutputHandling && renderer) {
const metadata: OutputMetadata = {};
return (
<React.Fragment key={index}>
<OutputItem
value={item}
metadata={metadata}
renderer={renderer}
/>
{index < value.length - 1 && ", "}
</React.Fragment>
);
}
return (
<React.Fragment key={index}>
<ContentRenderer
value={item}
truncateLongData={truncateLongData}
/>
{index < value.length - 1 && ", "}
</React.Fragment>
);
})}
</div>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
{expandedDialog && (
<ExpandableOutputDialog
isOpen={expandedDialog.isOpen}
onClose={closeExpandedView}
execId={expandedDialog.execId}
pinName={expandedDialog.pinName}
data={expandedDialog.data}
/>
)}
</>
);
}

View File

@@ -0,0 +1,269 @@
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputActions,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import { beautifyString } from "@/lib/utils";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { Clipboard, Maximize2 } from "lucide-react";
import React, { FC, useMemo, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { ContentRenderer } from "../../../../../components/__legacy__/ui/render";
import { ScrollArea } from "../../../../../components/__legacy__/ui/scroll-area";
import { Separator } from "../../../../../components/__legacy__/ui/separator";
import { Switch } from "../../../../../components/atoms/Switch/Switch";
import { useToast } from "../../../../../components/molecules/Toast/use-toast";
interface ExpandableOutputDialogProps {
isOpen: boolean;
onClose: () => void;
execId: string;
pinName: string;
data: any[];
}
const ExpandableOutputDialog: FC<ExpandableOutputDialogProps> = ({
isOpen,
onClose,
execId,
pinName,
data,
}) => {
const { toast } = useToast();
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
const [useEnhancedRenderer, setUseEnhancedRenderer] = useState(false);
// Prepare items for the enhanced renderer system
const outputItems = useMemo(() => {
if (!data || !useEnhancedRenderer) return [];
const items: Array<{
key: string;
label: string;
value: unknown;
metadata?: OutputMetadata;
renderer: any;
}> = [];
data.forEach((value, index) => {
const metadata: OutputMetadata = {};
// Extract metadata from the value if it's an object
if (
typeof value === "object" &&
value !== null &&
!React.isValidElement(value)
) {
const objValue = value as any;
if (objValue.type) metadata.type = objValue.type;
if (objValue.mimeType) metadata.mimeType = objValue.mimeType;
if (objValue.filename) metadata.filename = objValue.filename;
if (objValue.language) metadata.language = objValue.language;
}
const renderer = globalRegistry.getRenderer(value, metadata);
if (renderer) {
items.push({
key: `item-${index}`,
label: index === 0 ? beautifyString(pinName) : "",
value,
metadata,
renderer,
});
} else {
// Fallback to text renderer
const textRenderer = globalRegistry
.getAllRenderers()
.find((r) => r.name === "TextRenderer");
if (textRenderer) {
items.push({
key: `item-${index}`,
label: index === 0 ? beautifyString(pinName) : "",
value:
typeof value === "string"
? value
: JSON.stringify(value, null, 2),
metadata,
renderer: textRenderer,
});
}
}
});
return items;
}, [data, useEnhancedRenderer, pinName]);
const copyData = () => {
const formattedData = data
.map((item) =>
typeof item === "object" ? JSON.stringify(item, null, 2) : String(item),
)
.join("\n\n");
navigator.clipboard.writeText(formattedData).then(() => {
toast({
title: `"${beautifyString(pinName)}" data copied to clipboard!`,
duration: 2000,
});
});
};
return (
<Dialog
title={
<div className="flex items-center justify-between pr-8">
<div className="flex items-center gap-2">
<Maximize2 size={20} />
Full Output Preview
</div>
{enableEnhancedOutputHandling && (
<div className="flex items-center gap-3">
<label
htmlFor="enhanced-rendering-toggle"
className="cursor-pointer select-none text-sm font-normal text-gray-600"
>
Enhanced Rendering
</label>
<Switch
id="enhanced-rendering-toggle"
checked={useEnhancedRenderer}
onCheckedChange={setUseEnhancedRenderer}
/>
</div>
)}
</div>
}
controlled={{
isOpen,
set: (open) => {
if (!open) onClose();
},
}}
onClose={onClose}
styling={{
maxWidth: "56rem",
width: "90vw",
height: "90vh",
}}
>
<Dialog.Content>
<div className="flex h-full flex-col">
<div className="pb-4">
<p className="text-sm text-zinc-600">
Execution ID: <span className="font-mono text-xs">{execId}</span>
<br />
Pin:{" "}
<span className="font-semibold">{beautifyString(pinName)}</span>
</p>
</div>
<div className="flex flex-1 flex-col overflow-hidden">
{useEnhancedRenderer && outputItems.length > 0 && (
<div className="border-b px-4 py-2">
<OutputActions
items={outputItems.map((item) => ({
value: item.value,
metadata: item.metadata,
renderer: item.renderer,
}))}
/>
</div>
)}
<ScrollArea className="h-full">
<div className="p-4">
{data.length > 0 ? (
useEnhancedRenderer ? (
<div className="space-y-4">
{outputItems.map((item) => (
<OutputItem
key={item.key}
value={item.value}
metadata={item.metadata}
renderer={item.renderer}
label={item.label}
/>
))}
</div>
) : (
<div className="space-y-4">
{data.map((item, index) => (
<div
key={index}
className="rounded-lg border bg-gray-50 p-4"
>
<div className="mb-2 flex items-center justify-between">
<span className="text-sm font-medium text-gray-600">
Item {index + 1} of {data.length}
</span>
<Button
variant="outline"
size="sm"
onClick={() => {
const itemData =
typeof item === "object"
? JSON.stringify(item, null, 2)
: String(item);
navigator.clipboard
.writeText(itemData)
.then(() => {
toast({
title: `Item ${index + 1} copied to clipboard!`,
duration: 2000,
});
});
}}
className="flex items-center gap-1"
>
<Clipboard size={14} />
Copy Item
</Button>
</div>
<Separator className="mb-3" />
<div className="whitespace-pre-wrap break-words font-mono text-sm">
<ContentRenderer
value={item}
truncateLongData={false}
/>
</div>
</div>
))}
</div>
)
) : (
<div className="py-8 text-center text-gray-500">
No data available
</div>
)}
</div>
</ScrollArea>
</div>
<Dialog.Footer className="flex justify-between">
<div className="text-sm text-gray-600">
{data.length} item{data.length !== 1 ? "s" : ""} total
</div>
<div className="flex gap-2">
{!useEnhancedRenderer && (
<Button
variant="outline"
onClick={copyData}
className="flex items-center gap-1"
>
<Clipboard size={16} />
Copy All
</Button>
)}
<Button onClick={onClose}>Close</Button>
</div>
</Dialog.Footer>
</div>
</Dialog.Content>
</Dialog>
);
};
export default ExpandableOutputDialog;

View File

@@ -0,0 +1,103 @@
/* flow.css or index.css */
body {
font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu",
"Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
}
code {
font-family:
source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace;
}
.modal {
position: absolute;
top: 50%;
left: 50%;
right: auto;
bottom: auto;
margin-right: -50%;
transform: translate(-50%, -50%);
background: #ffffff;
padding: 20px;
border: 1px solid #ccc;
border-radius: 4px;
color: #000000;
}
.overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(0, 0, 0, 0.75);
}
.modal h2 {
margin-top: 0;
}
.modal button {
margin-right: 10px;
}
.modal form {
display: flex;
flex-direction: column;
}
.modal form div {
margin-bottom: 15px;
}
.sidebar {
position: fixed;
top: 0;
left: -600px;
width: 350px;
height: calc(100vh - 68px); /* Full height minus top offset */
background-color: #ffffff;
color: #000000;
padding: 20px;
transition: left 0.3s ease;
z-index: 1000;
overflow-y: auto;
margin-top: 68px; /* Margin to push content below the top fixed area */
}
.sidebar.open {
left: 0;
}
.sidebar h3 {
margin: 0 0 10px;
}
.sidebar input {
margin: 0 0 10px;
}
.sidebarNodeRowStyle {
display: flex;
justify-content: space-between;
align-items: center;
background-color: #e2e2e2;
padding: 10px;
margin-bottom: 10px;
border-radius: 10px;
cursor: grab;
}
.sidebarNodeRowStyle.dragging {
opacity: 0.5;
}
.flow-container {
position: absolute;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
}

View File

@@ -0,0 +1,82 @@
import React from "react";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import { Button } from "@/components/atoms/Button/Button";
import { MagnifyingGlassIcon } from "@radix-ui/react-icons";
import { CustomNode } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import { GraphSearchContent } from "../NewControlPanel/NewSearchGraph/GraphMenuContent/GraphContent";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { useGraphMenu } from "../NewControlPanel/NewSearchGraph/GraphMenu/useGraphMenu";
interface GraphSearchControlProps {
nodes: CustomNode[];
onNodeSelect: (nodeId: string) => void;
onNodeHover?: (nodeId: string | null) => void;
}
export function GraphSearchControl({
nodes,
onNodeSelect,
onNodeHover,
}: GraphSearchControlProps) {
// Use the same hook as GraphSearchMenu for consistency
const {
open,
searchQuery,
setSearchQuery,
filteredNodes,
handleNodeSelect,
handleOpenChange,
} = useGraphMenu({
nodes,
blockMenuSelected: "", // We don't need to track this in the old control panel
setBlockMenuSelected: () => {}, // Not needed in this context
onNodeSelect,
});
return (
<Popover open={open} onOpenChange={handleOpenChange}>
<Tooltip delayDuration={500}>
<TooltipTrigger asChild>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
data-id="graph-search-control-trigger"
data-testid="graph-search-control-button"
name="Search"
className="dark:hover:bg-slate-800"
>
<MagnifyingGlassIcon className="h-5 w-5" />
</Button>
</PopoverTrigger>
</TooltipTrigger>
<TooltipContent side="right">Search Graph</TooltipContent>
</Tooltip>
<PopoverContent
side="right"
sideOffset={22}
align="start"
alignOffset={-50} // Offset upward to align with control panel top
className="absolute -top-3 w-[17rem] rounded-xl border-none p-0 shadow-none md:w-[30rem]"
data-id="graph-search-popover-content"
>
<GraphSearchContent
searchQuery={searchQuery}
onSearchChange={setSearchQuery}
filteredNodes={filteredNodes}
onNodeSelect={handleNodeSelect}
onNodeHover={onNodeHover}
/>
</PopoverContent>
</Popover>
);
}

View File

@@ -0,0 +1,107 @@
import React, { FC, useEffect, useState } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { Textarea } from "../../../../../components/__legacy__/ui/textarea";
import { Maximize2, Minimize2, Clipboard } from "lucide-react";
import { createPortal } from "react-dom";
import { toast } from "../../../../../components/molecules/Toast/use-toast";
interface ModalProps {
isOpen: boolean;
onClose: () => void;
onSave: (value: string) => void;
title?: string;
defaultValue: string;
}
const InputModalComponent: FC<ModalProps> = ({
isOpen,
onClose,
onSave,
title,
defaultValue,
}) => {
const [tempValue, setTempValue] = useState(defaultValue);
const [isMaximized, setIsMaximized] = useState(false);
useEffect(() => {
if (isOpen) {
setTempValue(defaultValue);
setIsMaximized(false);
}
}, [isOpen, defaultValue]);
const handleSave = () => {
onSave(tempValue);
onClose();
};
const toggleSize = () => {
setIsMaximized(!isMaximized);
};
const copyValue = () => {
navigator.clipboard.writeText(tempValue).then(() => {
toast({
title: "Input value copied to clipboard!",
duration: 2000,
});
});
};
if (!isOpen) {
return null;
}
const modalContent = (
<div
id="modal-content"
className={`fixed rounded-lg border-[1.5px] bg-white p-5 ${
isMaximized ? "inset-[128px] flex flex-col" : `w-[90%] max-w-[800px]`
}`}
>
<h2 className="mb-4 text-center text-lg font-semibold">
{title || "Enter input text"}
</h2>
<div className="nowheel relative flex-grow">
<Textarea
className="h-full min-h-[200px] w-full resize-none"
value={tempValue}
onChange={(e) => setTempValue(e.target.value)}
/>
<div className="absolute bottom-2 right-2 flex space-x-2">
<Button onClick={copyValue} size="icon" variant="outline">
<Clipboard size={18} />
</Button>
<Button onClick={toggleSize} size="icon" variant="outline">
{isMaximized ? <Minimize2 size={18} /> : <Maximize2 size={18} />}
</Button>
</div>
</div>
<div className="mt-4 flex justify-end space-x-2">
<Button onClick={onClose} variant="outline">
Cancel
</Button>
<Button onClick={handleSave}>Save</Button>
</div>
</div>
);
return (
<>
{isMaximized ? (
createPortal(
<div className="fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
{modalContent}
</div>,
document.body,
)
) : (
<div className="nodrag fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
{modalContent}
</div>
)}
</>
);
};
export default InputModalComponent;

View File

@@ -0,0 +1,163 @@
import { BlockIOSubSchema } from "@/lib/autogpt-server-api/types";
import {
cn,
beautifyString,
getTypeBgColor,
getTypeTextColor,
getEffectiveType,
} from "@/lib/utils";
import { FC, memo, useCallback } from "react";
import { Handle, Position } from "@xyflow/react";
import { InformationTooltip } from "@/components/molecules/InformationTooltip/InformationTooltip";
type HandleProps = {
keyName: string;
schema: BlockIOSubSchema;
isConnected: boolean;
isRequired?: boolean;
side: "left" | "right";
title?: string;
className?: string;
isBroken?: boolean;
};
// Move the constant out of the component to avoid re-creation on every render.
const TYPE_NAME: Record<string, string> = {
string: "text",
number: "number",
integer: "integer",
boolean: "true/false",
object: "object",
array: "list",
null: "null",
};
// Extract and memoize the Dot component so that it doesn't re-render unnecessarily.
const Dot: FC<{ isConnected: boolean; type?: string; isBroken?: boolean }> =
memo(({ isConnected, type, isBroken }) => {
const color = isBroken
? "border-red-500 bg-red-100 dark:bg-red-900/30"
: isConnected
? getTypeBgColor(type || "any")
: "border-gray-300 dark:border-gray-600";
return (
<div
className={cn(
"m-1 h-4 w-4 rounded-full border-2 bg-white transition-colors duration-100 group-hover:bg-gray-300 dark:bg-slate-800 dark:group-hover:bg-gray-700",
color,
isBroken && "opacity-50",
)}
/>
);
});
Dot.displayName = "Dot";
const NodeHandle: FC<HandleProps> = ({
keyName,
schema,
isConnected,
isRequired,
side,
title,
className,
isBroken = false,
}) => {
// Extract effective type from schema (handles anyOf/oneOf/allOf wrappers)
const effectiveType = getEffectiveType(schema);
const typeClass = `text-sm ${getTypeTextColor(effectiveType || "any")} ${
side === "left" ? "text-left" : "text-right"
}`;
const label = (
<div className={cn("flex flex-grow flex-row", isBroken && "opacity-50")}>
<span
className={cn(
"data-sentry-unmask text-m green flex items-end pr-2 text-gray-900 dark:text-gray-100",
className,
isBroken && "text-red-500 line-through",
)}
>
{title || schema.title || beautifyString(keyName.toLowerCase())}
{isRequired ? "*" : ""}
</span>
<span
className={cn(
`${typeClass} data-sentry-unmask flex items-end`,
isBroken && "text-red-400",
)}
>
({TYPE_NAME[effectiveType as keyof typeof TYPE_NAME] || "any"})
</span>
</div>
);
// Use a native HTML onContextMenu handler instead of wrapping a large node with a Radix ContextMenu trigger.
const handleContextMenu = useCallback(
(e: React.MouseEvent<HTMLDivElement>) => {
e.preventDefault();
// Optionally, you can trigger a custom, lightweight context menu here.
},
[],
);
if (side === "left") {
return (
<div
key={keyName}
className={cn("handle-container", isBroken && "pointer-events-none")}
onContextMenu={handleContextMenu}
>
<Handle
type="target"
data-testid={`input-handle-${keyName}`}
position={Position.Left}
id={keyName}
className={cn("group -ml-[38px]", isBroken && "cursor-not-allowed")}
isConnectable={!isBroken}
>
<div className="pointer-events-none flex items-center">
<Dot
isConnected={isConnected}
type={effectiveType}
isBroken={isBroken}
/>
{label}
</div>
</Handle>
<InformationTooltip description={schema.description} />
</div>
);
} else {
return (
<div
key={keyName}
className={cn(
"handle-container justify-end",
isBroken && "pointer-events-none",
)}
onContextMenu={handleContextMenu}
>
<Handle
type="source"
data-testid={`output-handle-${keyName}`}
position={Position.Right}
id={keyName}
className={cn("group -mr-[38px]", isBroken && "cursor-not-allowed")}
isConnectable={!isBroken}
>
<div className="pointer-events-none flex items-center">
{label}
<Dot
isConnected={isConnected}
type={effectiveType}
isBroken={isBroken}
/>
</div>
</Handle>
</div>
);
}
};
export default memo(NodeHandle);

View File

@@ -0,0 +1,158 @@
import React, { useContext, useMemo, useState } from "react";
import { Button } from "@/components/__legacy__/ui/button";
import { Maximize2 } from "lucide-react";
import * as Separator from "@radix-ui/react-separator";
import { ContentRenderer } from "@/components/__legacy__/ui/render";
import type { OutputMetadata } from "@/components/contextual/OutputRenderers";
import {
globalRegistry,
OutputItem,
} from "@/components/contextual/OutputRenderers";
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { beautifyString } from "@/lib/utils";
import { BuilderContext } from "./Flow/Flow";
import ExpandableOutputDialog from "./ExpandableOutputDialog";
type NodeOutputsProps = {
title?: string;
truncateLongData?: boolean;
data: { [key: string]: Array<any> };
};
export default function NodeOutputs({
title,
truncateLongData,
data,
}: NodeOutputsProps) {
const builderContext = useContext(BuilderContext);
const enableEnhancedOutputHandling = useGetFlag(
Flag.ENABLE_ENHANCED_OUTPUT_HANDLING,
);
const [expandedDialog, setExpandedDialog] = useState<{
isOpen: boolean;
execId: string;
pinName: string;
data: any[];
} | null>(null);
if (!builderContext) {
throw new Error(
"BuilderContext consumer must be inside FlowEditor component",
);
}
const { getNodeTitle } = builderContext;
// Prepare renderers for each item when enhanced mode is enabled
const getItemRenderer = useMemo(() => {
if (!enableEnhancedOutputHandling) return null;
return (item: unknown) => {
const metadata: OutputMetadata = {};
return globalRegistry.getRenderer(item, metadata);
};
}, [enableEnhancedOutputHandling]);
const getBeautifiedPinName = (pin: string) => {
if (!pin.startsWith("tools_^_")) {
return beautifyString(pin);
}
// Special handling for tool pins: replace node ID with node title
const toolNodeID = pin.slice(8).split("_~_")[0]; // tools_^_{node_id}_~_{field}
const toolNodeTitle = getNodeTitle(toolNodeID);
return toolNodeTitle
? beautifyString(pin.replace(toolNodeID, toolNodeTitle))
: beautifyString(pin);
};
const openExpandedView = (pinName: string, pinData: any[]) => {
setExpandedDialog({
isOpen: true,
execId: title || "Node Output",
pinName,
data: pinData,
});
};
const closeExpandedView = () => {
setExpandedDialog(null);
};
return (
<div className="m-4 space-y-4">
{title && <strong className="mt-2flex">{title}</strong>}
{Object.entries(data).map(([pin, dataArray]) => (
<div key={pin} className="group">
<div className="flex items-center justify-between">
<div className="flex items-center">
<strong className="mr-2">Pin:</strong>
<span>{getBeautifiedPinName(pin)}</span>
</div>
{(truncateLongData || dataArray.length > 10) && (
<Button
variant="outline"
size="sm"
onClick={() => openExpandedView(pin, dataArray)}
className="hidden items-center gap-1 group-hover:flex"
title="Expand Full View"
>
<Maximize2 size={14} />
Expand
</Button>
)}
</div>
<div className="mt-2">
<strong className="mr-2">Data:</strong>
<div className="mt-1">
{dataArray.slice(0, 10).map((item, index) => {
const renderer = getItemRenderer?.(item);
if (enableEnhancedOutputHandling && renderer) {
const metadata: OutputMetadata = {};
return (
<React.Fragment key={index}>
<OutputItem
value={item}
metadata={metadata}
renderer={renderer}
/>
{index < Math.min(dataArray.length, 10) - 1 && ", "}
</React.Fragment>
);
}
return (
<React.Fragment key={index}>
<ContentRenderer
value={item}
truncateLongData={truncateLongData}
/>
{index < Math.min(dataArray.length, 10) - 1 && ", "}
</React.Fragment>
);
})}
{dataArray.length > 10 && (
<span style={{ color: "#888" }}>
<br />
<b></b>
<br />
<span>and {dataArray.length - 10} more</span>
</span>
)}
</div>
<Separator.Root className="my-4 h-[1px] bg-gray-300" />
</div>
</div>
))}
{expandedDialog && (
<ExpandableOutputDialog
isOpen={expandedDialog.isOpen}
onClose={closeExpandedView}
execId={expandedDialog.execId}
pinName={expandedDialog.pinName}
data={expandedDialog.data}
/>
)}
</div>
);
}

View File

@@ -0,0 +1,205 @@
import { FC, useCallback, useEffect, useState } from "react";
import NodeHandle from "@/app/(platform)/build/components/legacy-builder/NodeHandle";
import type {
BlockIOTableSubSchema,
TableCellValue,
TableRow,
} from "@/lib/autogpt-server-api/types";
import type { ConnectedEdge } from "./CustomNode/CustomNode";
import { cn } from "@/lib/utils";
import { PlusIcon, XIcon } from "@phosphor-icons/react";
import { Button } from "@/components/atoms/Button/Button";
import { Input } from "@/components/atoms/Input/Input";
interface NodeTableInputProps {
/** Unique identifier for the node in the builder graph */
nodeId: string;
/** Key identifier for this specific input field within the node */
selfKey: string;
/** Schema definition for the table structure */
schema: BlockIOTableSubSchema;
/** Column headers for the table */
headers: string[];
/** Initial row data for the table */
rows?: TableRow[];
/** Validation errors mapped by field key */
errors: { [key: string]: string | undefined };
/** Graph connections between nodes in the builder */
connections: ConnectedEdge[];
/** Callback when table data changes */
handleInputChange: (key: string, value: TableRow[]) => void;
/** Callback when input field is clicked (for builder selection) */
handleInputClick: (key: string) => void;
/** Additional CSS classes */
className?: string;
/** Display name for the input field */
displayName?: string;
}
/**
* Table input component for the workflow builder interface.
*
* This component is specifically designed for use in the agent builder where users
* design workflows with connected nodes. It includes graph connection capabilities
* via NodeHandle and is tightly integrated with the builder's state management.
*
* @warning Do NOT use this component in runtime/execution contexts (like RunAgentInputs).
* For runtime table inputs, use a simpler implementation without builder-specific features.
*
* @example
* ```tsx
* <NodeTableInput
* nodeId="node-123"
* selfKey="table_data"
* schema={tableSchema}
* headers={["Name", "Value"]}
* rows={existingData}
* connections={graphConnections}
* handleInputChange={handleChange}
* handleInputClick={handleClick}
* errors={{}}
* />
* ```
*
* @see Used exclusively in: `/app/(platform)/build/components/legacy-builder/NodeInputs.tsx`
*/
export const NodeTableInput: FC<NodeTableInputProps> = ({
nodeId,
selfKey,
schema,
headers,
rows = [],
errors,
connections,
handleInputChange,
handleInputClick: _handleInputClick,
className,
displayName,
}) => {
const [tableData, setTableData] = useState<TableRow[]>(rows);
// Sync with parent state when rows change
useEffect(() => {
setTableData(rows);
}, [rows]);
const isConnected = (key: string) =>
connections.some((c) => c.targetHandle === key && c.target === nodeId);
const updateTableData = useCallback(
(newData: TableRow[]) => {
setTableData(newData);
handleInputChange(selfKey, newData);
},
[selfKey, handleInputChange],
);
const updateCell = (
rowIndex: number,
header: string,
value: TableCellValue,
) => {
const newData = [...tableData];
if (!newData[rowIndex]) {
newData[rowIndex] = {};
}
newData[rowIndex][header] = value;
updateTableData(newData);
};
const addRow = () => {
if (!headers || headers.length === 0) {
return;
}
const newRow: TableRow = {};
headers.forEach((header) => {
newRow[header] = "";
});
updateTableData([...tableData, newRow]);
};
const removeRow = (index: number) => {
const newData = tableData.filter((_, i) => i !== index);
updateTableData(newData);
};
return (
<div className={cn("w-full space-y-2", className)}>
<NodeHandle
title={displayName || selfKey}
keyName={selfKey}
schema={schema}
isConnected={isConnected(selfKey)}
isRequired={false}
side="left"
/>
{!isConnected(selfKey) && (
<div className="nodrag overflow-x-auto">
<table className="w-full border-collapse">
<thead>
<tr>
{headers.map((header, index) => (
<th
key={index}
className="border border-gray-300 bg-gray-100 px-2 py-1 text-left text-sm font-medium dark:border-gray-600 dark:bg-gray-800"
>
{header}
</th>
))}
<th className="w-10"></th>
</tr>
</thead>
<tbody>
{tableData.map((row, rowIndex) => (
<tr key={rowIndex}>
{headers.map((header, colIndex) => (
<td
key={colIndex}
className="border border-gray-300 p-1 dark:border-gray-600"
>
<Input
id={`${selfKey}-${rowIndex}-${header}`}
label={header}
type="text"
value={String(row[header] || "")}
onChange={(e) =>
updateCell(rowIndex, header, e.target.value)
}
className="h-8 w-full"
placeholder={`Enter ${header}`}
/>
</td>
))}
<td className="p-1">
<Button
variant="ghost"
size="small"
onClick={() => removeRow(rowIndex)}
className="h-8 w-8 p-0"
>
<XIcon />
</Button>
</td>
</tr>
))}
</tbody>
</table>
<Button
className="mt-2 bg-gray-200 font-normal text-black hover:text-white dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600"
onClick={addRow}
size="small"
>
<PlusIcon className="mr-2" /> Add Row
</Button>
</div>
)}
{errors[selfKey] && (
<span className="text-sm text-red-500">{errors[selfKey]}</span>
)}
</div>
);
};

View File

@@ -0,0 +1,311 @@
"use client";
import React, { useEffect, useState, useRef } from "react";
import ReactMarkdown from "react-markdown";
import type { GraphID } from "@/lib/autogpt-server-api/types";
import { askOtto } from "@/app/(platform)/build/actions";
import { cn } from "@/lib/utils";
import { environment } from "@/services/environment";
interface Message {
type: "user" | "assistant";
content: string;
}
export default function OttoChatWidget({
graphID,
className,
}: {
graphID?: GraphID;
className?: string;
}): React.ReactNode {
const [isOpen, setIsOpen] = useState(false);
const [messages, setMessages] = useState<Message[]>([]);
const [inputValue, setInputValue] = useState("");
const [isProcessing, setIsProcessing] = useState(false);
const [includeGraphData, setIncludeGraphData] = useState(false);
const messagesEndRef = useRef<HTMLDivElement>(null);
useEffect(() => {
// Add welcome message when component mounts
if (messages.length === 0) {
setMessages([
{
type: "assistant",
content: "Hello, I am Otto! Ask me anything about AutoGPT!",
},
]);
}
}, [messages.length]);
useEffect(() => {
// Scroll to bottom whenever messages change
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages]);
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
if (!inputValue.trim() || isProcessing) return;
const userMessage = inputValue.trim();
setInputValue("");
setIsProcessing(true);
// Add user message to chat
setMessages((prev) => [...prev, { type: "user", content: userMessage }]);
// Add temporary processing message
setMessages((prev) => [
...prev,
{ type: "assistant", content: "Processing your question..." },
]);
const conversationHistory = messages.reduce<
{ query: string; response: string }[]
>((acc, msg, i, arr) => {
if (
msg.type === "user" &&
i + 1 < arr.length &&
arr[i + 1].type === "assistant" &&
arr[i + 1].content !== "Processing your question..."
) {
acc.push({
query: msg.content,
response: arr[i + 1].content,
});
}
return acc;
}, []);
try {
const data = await askOtto(
userMessage,
conversationHistory,
includeGraphData,
graphID,
);
// Check if the response contains an error
if ("error" in data && data.error === true) {
// Handle different error types
let errorMessage =
"Sorry, there was an error processing your message. Please try again.";
if (data.answer === "Authentication required") {
errorMessage = "Please sign in to use the chat feature.";
} else if (data.answer === "Failed to connect to Otto service") {
errorMessage =
"Otto service is currently unavailable. Please try again later.";
} else if (data.answer.includes("timed out")) {
errorMessage = "Request timed out. Please try again later.";
}
// Remove processing message and add error message
setMessages((prev) => [
...prev.slice(0, -1),
{ type: "assistant", content: errorMessage },
]);
} else {
// Remove processing message and add actual response
setMessages((prev) => [
...prev.slice(0, -1),
{ type: "assistant", content: data.answer },
]);
}
} catch (error) {
console.error("Unexpected error in chat widget:", error);
setMessages((prev) => [
...prev.slice(0, -1),
{
type: "assistant",
content:
"An unexpected error occurred. Please refresh the page and try again.",
},
]);
} finally {
setIsProcessing(false);
setIncludeGraphData(false);
}
};
// Don't render the chat widget if we're not on the build page or in local mode
if (environment.isLocal()) {
return null;
}
if (!isOpen) {
return (
<div className={className}>
<button
onClick={() => setIsOpen(true)}
className="inline-flex h-14 w-14 items-center justify-center whitespace-nowrap rounded-2xl bg-[rgba(65,65,64,1)] text-neutral-50 shadow transition-colors hover:bg-neutral-900/90 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-neutral-950 disabled:pointer-events-none disabled:opacity-50 dark:bg-neutral-50 dark:text-neutral-900 dark:hover:bg-neutral-50/90 dark:focus-visible:ring-neutral-300"
aria-label="Open chat widget"
>
<svg
viewBox="0 0 24 24"
className="h-6 w-6"
stroke="currentColor"
strokeWidth="2"
fill="none"
strokeLinecap="round"
strokeLinejoin="round"
>
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z" />
</svg>
</button>
</div>
);
}
return (
<div
className={cn(
"flex h-[600px] w-[600px] flex-col rounded-lg border bg-background shadow-xl",
className,
"z-40",
)}
>
{/* Header */}
<div className="flex items-center justify-between border-b p-4">
<h2 className="font-semibold">Otto Assistant</h2>
<button
onClick={() => setIsOpen(false)}
className="text-muted-foreground transition-colors hover:text-foreground"
aria-label="Close chat"
>
<svg
viewBox="0 0 24 24"
className="h-5 w-5"
stroke="currentColor"
strokeWidth="2"
fill="none"
strokeLinecap="round"
strokeLinejoin="round"
>
<line x1="18" y1="6" x2="6" y2="18" />
<line x1="6" y1="6" x2="18" y2="18" />
</svg>
</button>
</div>
{/* Messages */}
<div className="flex-1 space-y-4 overflow-y-auto p-4">
{messages.map((message, index) => (
<div
key={index}
className={`flex ${message.type === "user" ? "justify-end" : "justify-start"}`}
>
<div
className={`max-w-[80%] rounded-lg p-3 ${
message.type === "user"
? "ml-4 bg-black text-white"
: "mr-4 bg-[#8b5cf6] text-white"
}`}
>
{message.type === "user" ? (
message.content
) : (
<ReactMarkdown
className="prose prose-sm dark:prose-invert max-w-none"
components={{
p: ({ children }) => (
<p className="mb-2 last:mb-0">{children}</p>
),
code(props) {
const { children, className, node: _, ...rest } = props;
const match = /language-(\w+)/.exec(className || "");
return match ? (
<pre className="overflow-x-auto rounded-md bg-muted-foreground/20 p-3">
<code className="font-mono text-sm" {...rest}>
{children}
</code>
</pre>
) : (
<code
className="rounded-md bg-muted-foreground/20 px-1 py-0.5 font-mono text-sm"
{...rest}
>
{children}
</code>
);
},
ul: ({ children }) => (
<ul className="mb-2 list-disc pl-4 last:mb-0">
{children}
</ul>
),
ol: ({ children }) => (
<ol className="mb-2 list-decimal pl-4 last:mb-0">
{children}
</ol>
),
li: ({ children }) => (
<li className="mb-1 last:mb-0">{children}</li>
),
}}
>
{message.content}
</ReactMarkdown>
)}
</div>
</div>
))}
<div ref={messagesEndRef} />
</div>
{/* Input */}
<form onSubmit={handleSubmit} className="border-t p-4">
<div className="flex flex-col gap-2">
<div className="flex gap-2">
<input
type="text"
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
placeholder="Type your message..."
className="flex-1 rounded-md border bg-background px-3 py-2 focus:outline-none focus:ring-2 focus:ring-primary"
disabled={isProcessing}
/>
<button
type="submit"
disabled={isProcessing}
className="rounded-md bg-primary px-4 py-2 text-primary-foreground transition-colors hover:bg-primary/90 disabled:opacity-50"
>
Send
</button>
</div>
{graphID && (
<button
type="button"
onClick={() => {
setIncludeGraphData((prev) => !prev);
}}
className={`flex items-center gap-2 rounded border px-2 py-1.5 text-sm transition-all duration-200 ${
includeGraphData
? "border-primary/30 bg-primary/10 text-primary hover:shadow-[0_0_10px_3px_rgba(139,92,246,0.3)]"
: "border-transparent bg-muted text-muted-foreground hover:bg-muted/80 hover:shadow-[0_0_10px_3px_rgba(139,92,246,0.15)]"
}`}
>
<svg
viewBox="0 0 24 24"
className="h-4 w-4"
stroke="currentColor"
strokeWidth="2"
fill="none"
strokeLinecap="round"
strokeLinejoin="round"
>
<rect x="3" y="3" width="18" height="18" rx="2" ry="2" />
<circle cx="8.5" cy="8.5" r="1.5" />
<polyline points="21 15 16 10 5 21" />
</svg>
{includeGraphData
? "Graph data will be included"
: "Include graph data"}
</button>
)}
</div>
</form>
</div>
);
}

View File

@@ -0,0 +1,50 @@
import React, { FC } from "react";
import { Button } from "../../../../../components/__legacy__/ui/button";
import { NodeExecutionResult } from "@/lib/autogpt-server-api/types";
import DataTable from "./DataTable";
import { Separator } from "@/components/__legacy__/ui/separator";
interface OutputModalProps {
isOpen: boolean;
onClose: () => void;
executionResults: {
execId: string;
data: NodeExecutionResult["output_data"];
}[];
}
const OutputModalComponent: FC<OutputModalProps> = ({
isOpen,
onClose,
executionResults,
}) => {
if (!isOpen) {
return null;
}
return (
<div className="nodrag nowheel fixed inset-0 flex items-center justify-center bg-white bg-opacity-60">
<div className="w-[500px] max-w-[90%] rounded-lg border-[1.5px] bg-white p-5">
<strong>Output Data History</strong>
<div className="my-2 max-h-[384px] flex-grow overflow-y-auto rounded-md p-2">
{executionResults.map((data, i) => (
<>
<DataTable
key={i}
title={data.execId}
data={data.data}
truncateLongData={true}
/>
<Separator />
</>
))}
</div>
<div className="mt-2.5 flex justify-end gap-2.5">
<Button onClick={onClose}>Close</Button>
</div>
</div>
</div>
);
};
export default OutputModalComponent;

View File

@@ -0,0 +1,96 @@
import { useCallback } from "react";
import { AgentRunDraftView } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/agent-run-draft-view";
import { Dialog } from "@/components/molecules/Dialog/Dialog";
import type {
CredentialsMetaInput,
Graph,
} from "@/lib/autogpt-server-api/types";
interface RunInputDialogProps {
isOpen: boolean;
doClose: () => void;
graph: Graph;
doRun?: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => Promise<void> | void;
doCreateSchedule?: (
cronExpression: string,
scheduleName: string,
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => Promise<void> | void;
}
export function RunnerInputDialog({
isOpen,
doClose,
graph,
doRun,
doCreateSchedule,
}: RunInputDialogProps) {
const handleRun = useCallback(
doRun
? async (
inputs: Record<string, any>,
credentials_inputs: Record<string, CredentialsMetaInput>,
) => {
await doRun(inputs, credentials_inputs);
doClose();
}
: async () => {},
[doRun, doClose],
);
const handleSchedule = useCallback(
doCreateSchedule
? async (
cronExpression: string,
scheduleName: string,
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => {
await doCreateSchedule(
cronExpression,
scheduleName,
inputs,
credentialsInputs,
);
doClose();
}
: async () => {},
[doCreateSchedule, doClose],
);
return (
<Dialog
title="Run your agent"
controlled={{
isOpen,
set: (open) => {
if (!open) doClose();
},
}}
onClose={doClose}
styling={{
maxWidth: "56rem",
width: "90vw",
}}
>
<Dialog.Content>
<div className="flex flex-col p-10">
<p className="mt-2 text-sm text-zinc-600">{graph.name}</p>
<AgentRunDraftView
className="p-0"
graph={graph}
doRun={doRun ? handleRun : undefined}
onRun={doRun ? undefined : doClose}
doCreateSchedule={doCreateSchedule ? handleSchedule : undefined}
onCreateSchedule={doCreateSchedule ? undefined : doClose}
/>
</div>
</Dialog.Content>
</Dialog>
);
}

View File

@@ -0,0 +1,156 @@
import React from "react";
import {
Sheet,
SheetContent,
SheetHeader,
SheetTitle,
SheetDescription,
} from "@/components/__legacy__/ui/sheet";
import { ScrollArea } from "@/components/__legacy__/ui/scroll-area";
import { Label } from "@/components/__legacy__/ui/label";
import { Textarea } from "@/components/__legacy__/ui/textarea";
import { Button } from "@/components/__legacy__/ui/button";
import { Clipboard } from "lucide-react";
import { useToast } from "@/components/molecules/Toast/use-toast";
export type OutputNodeInfo = {
metadata: {
name: string;
description: string;
};
result?: any;
};
interface OutputModalProps {
isOpen: boolean;
doClose: () => void;
outputs: OutputNodeInfo[];
graphExecutionError?: string | null;
}
const formatOutput = (output: any): string => {
if (typeof output === "object") {
try {
if (
Array.isArray(output) &&
output.every((item) => typeof item === "string")
) {
return output.join("\n").replace(/\\n/g, "\n");
}
return JSON.stringify(output, null, 2);
} catch (error) {
return `Error formatting output: ${(error as Error).message}`;
}
}
if (typeof output === "string") {
return output.replace(/\\n/g, "\n");
}
return String(output);
};
export function RunnerOutputUI({
isOpen,
doClose,
outputs,
graphExecutionError,
}: OutputModalProps) {
const { toast } = useToast();
const copyOutput = (name: string, output: any) => {
const formattedOutput = formatOutput(output);
navigator.clipboard.writeText(formattedOutput).then(() => {
toast({
title: `"${name}" output copied to clipboard!`,
duration: 2000,
});
});
};
const adjustTextareaHeight = (textarea: HTMLTextAreaElement) => {
textarea.style.height = "auto";
textarea.style.height = `${textarea.scrollHeight}px`;
};
return (
<Sheet open={isOpen} onOpenChange={doClose}>
<SheetContent
side="right"
className="flex h-full w-full flex-col overflow-hidden sm:max-w-[600px]"
>
<SheetHeader className="px-2 py-2">
<SheetTitle className="text-xl">Run Outputs</SheetTitle>
<SheetDescription className="mt-1 text-sm">
View the outputs from your agent run.
</SheetDescription>
</SheetHeader>
<div className="flex-grow overflow-y-auto px-2 py-2">
<ScrollArea className="h-full overflow-auto pr-4">
<div className="space-y-4">
{graphExecutionError && (
<div className="rounded-md border border-red-200 bg-red-50 p-3 dark:border-red-800 dark:bg-red-900/20">
<p className="text-sm text-red-800 dark:text-red-200">
<strong>Error:</strong> {graphExecutionError}
</p>
</div>
)}
{outputs && outputs.length > 0 ? (
outputs.map((output, i) => (
<div key={i} className="space-y-1">
<Label className="text-base font-semibold">
{output.metadata.name || "Unnamed Output"}
</Label>
{output.metadata.description && (
<Label className="block text-sm text-gray-600">
{output.metadata.description}
</Label>
)}
<div className="group relative rounded-md bg-gray-100 p-2">
<Button
className="absolute right-1 top-1 z-10 m-1 hidden p-2 group-hover:block"
variant="outline"
size="icon"
onClick={() =>
copyOutput(
output.metadata.name || "Unnamed Output",
output.result,
)
}
title="Copy Output"
>
<Clipboard size={18} />
</Button>
<Textarea
readOnly
value={formatOutput(output.result ?? "No output yet")}
className="w-full resize-none whitespace-pre-wrap break-words border-none bg-transparent text-sm"
style={{
height: "auto",
minHeight: "2.5rem",
maxHeight: "400px",
}}
ref={(el) => {
if (el) {
adjustTextareaHeight(el);
if (el.scrollHeight > 400) {
el.style.height = "400px";
}
}
}}
/>
</div>
</div>
))
) : (
<p>No output blocks available.</p>
)}
</div>
</ScrollArea>
</div>
</SheetContent>
</Sheet>
);
}
export default RunnerOutputUI;

View File

@@ -0,0 +1,117 @@
import React, {
useState,
forwardRef,
useImperativeHandle,
useMemo,
} from "react";
import { Node } from "@xyflow/react";
import { CustomNodeData } from "@/app/(platform)/build/components/legacy-builder/CustomNode/CustomNode";
import {
BlockUIType,
CredentialsMetaInput,
Graph,
} from "@/lib/autogpt-server-api/types";
import RunnerOutputUI, { OutputNodeInfo } from "./RunnerOutputUI";
import { RunnerInputDialog } from "./RunnerInputUI";
interface RunnerUIWrapperProps {
graph: Graph;
nodes: Node<CustomNodeData>[];
graphExecutionError?: string | null;
saveAndRun: (
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => void;
createRunSchedule: (
cronExpression: string,
scheduleName: string,
inputs: Record<string, any>,
credentialsInputs: Record<string, CredentialsMetaInput>,
) => Promise<void>;
}
export interface RunnerUIWrapperRef {
openRunInputDialog: () => void;
openRunnerOutput: () => void;
runOrOpenInput: () => void;
}
const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
(
{ graph, nodes, graphExecutionError, saveAndRun, createRunSchedule },
ref,
) => {
const [isRunInputDialogOpen, setIsRunInputDialogOpen] = useState(false);
const [isRunnerOutputOpen, setIsRunnerOutputOpen] = useState(false);
const graphInputs = graph.input_schema.properties;
const graphOutputs = useMemo((): OutputNodeInfo[] => {
const outputNodes = nodes.filter(
(node) => node.data.uiType === BlockUIType.OUTPUT,
);
return outputNodes.map(
(node) =>
({
metadata: {
name: node.data.hardcodedValues.name || "Output",
description:
node.data.hardcodedValues.description ||
"Output from the agent",
},
result:
(node.data.executionResults as any)
?.map((result: any) => result?.data?.output)
.join("\n--\n") || "No output yet",
}) satisfies OutputNodeInfo,
);
}, [nodes]);
const openRunInputDialog = () => setIsRunInputDialogOpen(true);
const openRunnerOutput = () => setIsRunnerOutputOpen(true);
const runOrOpenInput = () => {
if (
Object.keys(graphInputs).length > 0 ||
Object.keys(graph.credentials_input_schema.properties).length > 0
) {
openRunInputDialog();
} else {
saveAndRun({}, {});
}
};
useImperativeHandle(
ref,
() =>
({
openRunInputDialog,
openRunnerOutput,
runOrOpenInput,
}) satisfies RunnerUIWrapperRef,
);
return (
<>
<RunnerInputDialog
isOpen={isRunInputDialogOpen}
doClose={() => setIsRunInputDialogOpen(false)}
graph={graph}
doRun={saveAndRun}
doCreateSchedule={createRunSchedule}
/>
<RunnerOutputUI
isOpen={isRunnerOutputOpen}
doClose={() => setIsRunnerOutputOpen(false)}
outputs={graphOutputs}
graphExecutionError={graphExecutionError}
/>
</>
);
},
);
RunnerUIWrapper.displayName = "RunnerUIWrapper";
export default RunnerUIWrapper;

View File

@@ -0,0 +1,217 @@
import React, { useEffect, useState } from "react";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/__legacy__/ui/popover";
import { Card, CardContent, CardFooter } from "@/components/__legacy__/ui/card";
import { Input } from "@/components/__legacy__/ui/input";
import { Button } from "@/components/__legacy__/ui/button";
import { GraphMeta } from "@/lib/autogpt-server-api";
import { Label } from "@/components/__legacy__/ui/label";
import { IconSave } from "@/components/__legacy__/ui/icons";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/atoms/Tooltip/BaseTooltip";
import { useToast } from "@/components/molecules/Toast/use-toast";
import { useQueryClient } from "@tanstack/react-query";
import { getGetV2ListMySubmissionsQueryKey } from "@/app/api/__generated__/endpoints/store/store";
import { CronExpressionDialog } from "@/app/(platform)/library/agents/[id]/components/OldAgentLibraryView/components/cron-scheduler-dialog";
import { humanizeCronExpression } from "@/lib/cron-expression-utils";
import { CalendarClockIcon } from "lucide-react";
interface SaveControlProps {
agentMeta: GraphMeta | null;
agentName: string;
agentDescription: string;
agentRecommendedScheduleCron: string;
canSave: boolean;
onSave: () => Promise<void>;
onNameChange: (name: string) => void;
onDescriptionChange: (description: string) => void;
onRecommendedScheduleCronChange: (cron: string) => void;
pinSavePopover: boolean;
}
/**
* A SaveControl component to be used within the ControlPanel. It allows the user to save the agent.
* @param {Object} SaveControlProps - The properties of the SaveControl component.
* @param {GraphMeta | null} SaveControlProps.agentMeta - The agent's metadata, or null if creating a new agent.
* @param {string} SaveControlProps.agentName - The agent's name.
* @param {string} SaveControlProps.agentDescription - The agent's description.
* @param {boolean} SaveControlProps.canSave - Whether the button to save the agent should be enabled.
* @param {() => void} SaveControlProps.onSave - Function to save the agent.
* @param {(name: string) => void} SaveControlProps.onNameChange - Function to handle name changes.
* @param {(description: string) => void} SaveControlProps.onDescriptionChange - Function to handle description changes.
* @returns The SaveControl component.
*/
export const SaveControl = ({
agentMeta,
canSave,
onSave,
agentName,
onNameChange,
agentDescription,
onDescriptionChange,
agentRecommendedScheduleCron,
onRecommendedScheduleCronChange,
pinSavePopover,
}: SaveControlProps) => {
/**
* Note for improvement:
* At the moment we are leveraging onDescriptionChange and onNameChange to handle the changes in the description and name of the agent.
* We should migrate this to be handled with form controls and a form library.
*/
const { toast } = useToast();
const queryClient = useQueryClient();
const [cronScheduleDialogOpen, setCronScheduleDialogOpen] = useState(false);
const handleScheduleChange = (cronExpression: string) => {
onRecommendedScheduleCronChange(cronExpression);
};
useEffect(() => {
const handleKeyDown = async (event: KeyboardEvent) => {
if ((event.ctrlKey || event.metaKey) && event.key === "s") {
event.preventDefault(); // Stop the browser default action
await onSave(); // Call your save function
queryClient.invalidateQueries({
queryKey: getGetV2ListMySubmissionsQueryKey(),
});
toast({
duration: 2000,
title: "All changes saved successfully!",
});
}
};
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, [onSave, toast]);
return (
<Popover open={pinSavePopover ? true : undefined}>
<Tooltip delayDuration={500}>
<TooltipTrigger asChild>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
data-id="save-control-popover-trigger"
data-testid="blocks-control-save-button"
name="Save"
>
<IconSave className="dark:text-gray-300" />
</Button>
</PopoverTrigger>
</TooltipTrigger>
<TooltipContent side="right">Save</TooltipContent>
</Tooltip>
<PopoverContent
side="right"
sideOffset={15}
align="start"
data-id="save-control-popover-content"
className="w-96 max-w-[400px]"
>
<Card className="border-none shadow-none dark:bg-slate-900">
<CardContent className="p-4">
<div className="space-y-3">
<div>
<Label htmlFor="name" className="dark:text-gray-300">
Name
</Label>
<Input
id="name"
placeholder="Enter your agent name"
value={agentName}
onChange={(e) => onNameChange(e.target.value)}
data-id="save-control-name-input"
data-testid="save-control-name-input"
maxLength={100}
className="mt-1"
/>
</div>
<div>
<Label htmlFor="description" className="dark:text-gray-300">
Description
</Label>
<Input
id="description"
placeholder="Your agent description"
value={agentDescription}
onChange={(e) => onDescriptionChange(e.target.value)}
data-id="save-control-description-input"
data-testid="save-control-description-input"
maxLength={500}
className="mt-1"
/>
</div>
<div>
<Label className="dark:text-gray-300">
Recommended Schedule
</Label>
<Button
variant="outline"
onClick={() => setCronScheduleDialogOpen(true)}
className="mt-1 w-full min-w-0 justify-start text-sm"
data-id="save-control-recommended-schedule-button"
data-testid="save-control-recommended-schedule-button"
>
<CalendarClockIcon className="mr-2 h-4 w-4 flex-shrink-0" />
<span className="min-w-0 flex-1 truncate">
{agentRecommendedScheduleCron
? humanizeCronExpression(agentRecommendedScheduleCron)
: "Set schedule"}
</span>
</Button>
</div>
{agentMeta?.version && (
<div>
<Label htmlFor="version" className="dark:text-gray-300">
Version
</Label>
<Input
id="version"
placeholder="Version"
value={agentMeta?.version || "-"}
disabled
data-testid="save-control-version-output"
className="mt-1"
/>
</div>
)}
</div>
</CardContent>
<CardFooter className="flex flex-col items-stretch gap-2">
<Button
className="w-full dark:bg-slate-700 dark:text-slate-100 dark:hover:bg-slate-800"
onClick={onSave}
data-id="save-control-save-agent"
data-testid="save-control-save-agent-button"
disabled={!canSave}
>
Save Agent
</Button>
</CardFooter>
</Card>
</PopoverContent>
<CronExpressionDialog
open={cronScheduleDialogOpen}
setOpen={setCronScheduleDialogOpen}
onSubmit={handleScheduleChange}
defaultCronExpression={agentRecommendedScheduleCron}
title="Recommended Schedule"
/>
</Popover>
);
};

View File

@@ -0,0 +1,95 @@
import { CustomNodeData } from "./CustomNode/CustomNode";
import { CustomEdgeData } from "./CustomEdge/CustomEdge";
import { Edge } from "@xyflow/react";
type ActionType =
| "ADD_NODE"
| "DELETE_NODE"
| "ADD_EDGE"
| "DELETE_EDGE"
| "UPDATE_NODE"
| "MOVE_NODE"
| "UPDATE_INPUT"
| "UPDATE_NODE_POSITION";
type AddNodePayload = { node: CustomNodeData };
type DeleteNodePayload = { nodeId: string };
type AddEdgePayload = { edge: Edge<CustomEdgeData> };
type DeleteEdgePayload = { edgeId: string };
type UpdateNodePayload = { nodeId: string; newData: Partial<CustomNodeData> };
type MoveNodePayload = { nodeId: string; position: { x: number; y: number } };
type UpdateInputPayload = {
nodeId: string;
oldValues: { [key: string]: any };
newValues: { [key: string]: any };
};
type UpdateNodePositionPayload = {
nodeId: string;
oldPosition: { x: number; y: number };
newPosition: { x: number; y: number };
};
type ActionPayload =
| AddNodePayload
| DeleteNodePayload
| AddEdgePayload
| DeleteEdgePayload
| UpdateNodePayload
| MoveNodePayload
| UpdateInputPayload
| UpdateNodePositionPayload;
type Action = {
type: ActionType;
payload: ActionPayload;
undo: () => void;
redo: () => void;
};
class History {
private past: Action[] = [];
private future: Action[] = [];
push(action: Action) {
this.past.push(action);
this.future = [];
}
undo() {
const action = this.past.pop();
if (action) {
action.undo();
this.future.push(action);
}
}
redo() {
const action = this.future.pop();
if (action) {
action.redo();
this.past.push(action);
}
}
canUndo(): boolean {
return this.past.length > 0;
}
canRedo(): boolean {
return this.future.length > 0;
}
clear() {
this.past = [];
this.future = [];
}
getHistoryState() {
return {
past: [...this.past],
future: [...this.future],
};
}
}
export const history = new History();

View File

@@ -0,0 +1,569 @@
import Shepherd from "shepherd.js";
import "shepherd.js/dist/css/shepherd.css";
import { Key, storage } from "@/services/storage/local-storage";
import { analytics } from "@/services/analytics";
export const startTutorial = (
emptyNodeList: (forceEmpty: boolean) => boolean,
setPinBlocksPopover: (value: boolean) => void,
setPinSavePopover: (value: boolean) => void,
) => {
const tour = new Shepherd.Tour({
useModalOverlay: true,
defaultStepOptions: {
cancelIcon: { enabled: true },
scrollTo: { behavior: "smooth", block: "center" },
},
});
// CSS classes for disabling and highlighting blocks
const disableClass = "disable-blocks";
const highlightClass = "highlight-block";
let isConnecting = false;
// Helper function to disable all blocks except the target block
const disableOtherBlocks = (targetBlockSelector: string) => {
document.querySelectorAll('[data-id^="block-card-"]').forEach((block) => {
block.classList.toggle(disableClass, !block.matches(targetBlockSelector));
block.classList.toggle(
highlightClass,
block.matches(targetBlockSelector),
);
});
};
// Helper function to enable all blocks
const enableAllBlocks = () => {
document.querySelectorAll('[data-id^="block-card-"]').forEach((block) => {
block.classList.remove(disableClass, highlightClass);
});
};
// Inject CSS for disabling and highlighting blocks
const injectStyles = () => {
const style = document.createElement("style");
style.textContent = `
.${disableClass} {
pointer-events: none;
opacity: 0.5;
}
.${highlightClass} {
background-color: #ffeb3b;
border: 2px solid #fbc02d;
transition: background-color 0.3s, border-color 0.3s;
}
`;
document.head.appendChild(style);
};
// Helper function to check if an element is present in the DOM
const waitForElement = (selector: string): Promise<void> => {
return new Promise((resolve) => {
const checkElement = () => {
if (document.querySelector(selector)) {
resolve();
} else {
setTimeout(checkElement, 10);
}
};
checkElement();
});
};
// Function to detect the correct connection and advance the tour
const detectConnection = () => {
const checkForConnection = () => {
const correctConnection = document.querySelector(
'[data-testid^="rf__edge-"]',
);
if (correctConnection) {
tour.show("press-run-again");
} else {
setTimeout(checkForConnection, 100);
}
};
checkForConnection();
};
// Define state management functions to handle connection state
function startConnecting() {
isConnecting = true;
}
function stopConnecting() {
isConnecting = false;
}
// Reset connection state when revisiting the step
function resetConnectionState() {
stopConnecting();
}
// Event handlers for mouse down and up to manage connection state
function handleMouseDown() {
startConnecting();
setTimeout(() => {
if (isConnecting) {
tour.next();
}
}, 100);
}
// Event handler for mouse up to check if the connection was successful
function handleMouseUp(event: { target: any }) {
const target = event.target;
const validConnectionPoint = document.querySelector(
'[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
);
if (validConnectionPoint && !validConnectionPoint.contains(target)) {
setTimeout(() => {
if (!document.querySelector('[data-testid^="rf__edge-"]')) {
stopConnecting();
tour.show("connect-blocks-output");
}
}, 200);
} else {
stopConnecting();
}
}
// Define the fitViewToScreen function
const fitViewToScreen = () => {
const fitViewButton = document.querySelector(
".react-flow__controls-fitview",
) as HTMLButtonElement;
if (fitViewButton) {
fitViewButton.click();
}
};
injectStyles();
const warningText = emptyNodeList(false)
? ""
: "<br/><br/><b>Caution: Clicking next will start a tutorial and will clear the current flow.</b>";
tour.addStep({
id: "starting-step",
title: "Welcome to the Tutorial",
text: `This is the AutoGPT builder! ${warningText}`,
buttons: [
{
text: "Skip Tutorial",
action: () => {
tour.cancel(); // Ends the tour
storage.set(Key.SHEPHERD_TOUR, "skipped"); // Set the tutorial as skipped in local storage
},
classes: "shepherd-button-secondary", // Optionally add a class for styling the skip button differently
},
{
text: "Next",
action: () => {
emptyNodeList(true);
tour.next();
},
},
],
});
tour.addStep({
id: "open-block-step",
title: "Open Blocks Menu",
text: "Please click the block button to open the blocks menu.",
attachTo: {
element: '[data-id="blocks-control-popover-trigger"]',
on: "right",
},
advanceOn: {
selector: '[data-id="blocks-control-popover-trigger"]',
event: "click",
},
buttons: [],
});
tour.addStep({
id: "scroll-block-menu",
title: "Scroll Down or Search",
text: 'Scroll down or search in the blocks menu for the "Calculator Block" and press the block to add it.',
attachTo: {
element: '[data-id="blocks-control-popover-content"]',
on: "right",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-id="blocks-control-popover-content"]').then(() => {
disableOtherBlocks(
'[data-id="block-card-b1ab9b19-67a6-406d-abf5-2dba76d00c79"]',
);
}),
advanceOn: {
selector: '[data-id="block-card-b1ab9b19-67a6-406d-abf5-2dba76d00c79"]',
event: "click",
},
when: {
show: () => setPinBlocksPopover(true),
hide: enableAllBlocks,
},
});
tour.addStep({
id: "focus-new-block",
title: "New Block",
text: "This is the Calculator Block! Let's go over how it works.",
attachTo: { element: `[data-id="custom-node-1"]`, on: "left" },
beforeShowPromise: () => waitForElement('[data-id="custom-node-1"]'),
buttons: [
{
text: "Next",
action: tour.next,
},
],
when: {
show: () => {
setPinBlocksPopover(false);
setTimeout(() => {
fitViewToScreen();
}, 100);
},
},
});
tour.addStep({
id: "input-to-block",
title: "Input to the Block",
text: "This is the input pin for the block. You can input the output of other blocks here; this block takes numbers as input.",
attachTo: { element: '[data-nodeid="1"]', on: "left" },
buttons: [
{
text: "Back",
action: tour.back,
},
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "output-from-block",
title: "Output from the Block",
text: "This is the output pin for the block. You can connect this to another block to pass the output along.",
attachTo: { element: '[data-handlepos="right"]', on: "right" },
buttons: [
{
text: "Back",
action: tour.back,
},
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "select-operation-and-input",
title: "Select Operation and Input Numbers",
text: "Select any mathematical operation you'd like to perform, and enter numbers in both input fields.",
attachTo: { element: '[data-id="input-handles"]', on: "right" },
buttons: [
{
text: "Back",
action: tour.back,
},
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "press-initial-save-button",
title: "Press Save",
text: "First we need to save the flow before we can run it!",
attachTo: {
element: '[data-id="save-control-popover-trigger"]',
on: "left",
},
advanceOn: {
selector: '[data-id="save-control-popover-trigger"]',
event: "click",
},
buttons: [
{
text: "Back",
action: tour.back,
},
],
when: {
hide: () => setPinSavePopover(true),
},
});
tour.addStep({
id: "save-agent-details",
title: "Save the Agent",
text: "Enter a name for your agent, add an optional description, and then click 'Save agent' to save your flow.",
attachTo: {
element: '[data-id="save-control-popover-content"]',
on: "top",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-id="save-control-popover-content"]'),
advanceOn: {
selector: '[data-id="save-control-save-agent"]',
event: "click",
},
when: {
hide: () => setPinSavePopover(false),
},
});
tour.addStep({
id: "press-run",
title: "Press Run",
text: "Start your first flow by pressing the Run button!",
attachTo: {
element: '[data-tutorial-id="primary-action-run-agent"]',
on: "top",
},
advanceOn: {
selector: '[data-tutorial-id="primary-action-run-agent"]',
event: "click",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-tutorial-id="primary-action-run-agent"]'),
when: {
hide: () => {
setTimeout(() => {
fitViewToScreen();
}, 500);
},
},
});
tour.addStep({
id: "wait-for-processing",
title: "Processing",
text: "Let's wait for the block to finish being processed...",
attachTo: {
element: '[data-id^="badge-"][data-id$="-QUEUED"]',
on: "bottom",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-id^="badge-"][data-id$="-QUEUED"]').then(
fitViewToScreen,
),
when: {
show: () => {
waitForElement('[data-id^="badge-"][data-id$="-COMPLETED"]').then(
() => {
tour.next();
},
);
},
},
});
tour.addStep({
id: "check-output",
title: "Check the Output",
text: "Check here to see the output of the block after running the flow.",
attachTo: { element: '[data-id="latest-output"]', on: "top" },
beforeShowPromise: () =>
new Promise((resolve) => {
setTimeout(() => {
waitForElement('[data-id="latest-output"]').then(resolve);
}, 100);
}),
buttons: [
{
text: "Next",
action: tour.next,
},
],
when: {
show: () => {
fitViewToScreen();
},
},
});
tour.addStep({
id: "copy-paste-block",
title: "Copy and Paste the Block",
text: "Lets duplicate this block. Click and hold the block with your mouse, then press Ctrl+C (Cmd+C on Mac) to copy and Ctrl+V (Cmd+V on Mac) to paste.",
attachTo: { element: '[data-testid^="rf__node-"]', on: "top" },
buttons: [
{
text: "Back",
action: tour.back,
},
],
when: {
show: () => {
fitViewToScreen();
waitForElement('[data-testid^="rf__node-"]:nth-child(2)').then(() => {
tour.next();
});
},
},
});
tour.addStep({
id: "focus-second-block",
title: "Focus on the New Block",
text: "This is your copied Calculator Block. Now, lets move it to the side of the first block.",
attachTo: { element: '[data-testid^="rf__node-"]:nth-child(2)', on: "top" },
beforeShowPromise: () =>
waitForElement('[data-testid^="rf__node-"]:nth-child(2)'),
buttons: [
{
text: "Next",
action: tour.next,
},
],
});
tour.addStep({
id: "connect-blocks-output",
title: "Connect the Blocks: Output",
text: "Now, let's connect the output of the first Calculator Block to the input of the second Calculator Block. Drag from the output pin of the first block to the input pin (A) of the second block.",
attachTo: {
element:
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
on: "bottom",
},
buttons: [
{
text: "Back",
action: tour.back,
},
],
beforeShowPromise: () => {
return waitForElement(
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
);
},
when: {
show: () => {
fitViewToScreen();
resetConnectionState(); // Reset state when revisiting this step
tour.modal.show();
const outputPin = document.querySelector(
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
);
if (outputPin) {
outputPin.addEventListener("mousedown", handleMouseDown);
}
},
hide: () => {
const outputPin = document.querySelector(
'[data-testid^="rf__node-"]:first-child [data-id$="-result-source"]',
);
if (outputPin) {
outputPin.removeEventListener("mousedown", handleMouseDown);
}
},
},
});
tour.addStep({
id: "connect-blocks-input",
title: "Connect the Blocks: Input",
text: "Now, connect the output to the input pin of the second block (A).",
attachTo: {
element: '[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
on: "top",
},
buttons: [],
beforeShowPromise: () => {
return waitForElement(
'[data-testid^="rf__node-"]:nth-child(2) [data-id$="-a-target"]',
).then(() => {
detectConnection();
});
},
when: {
show: () => {
tour.modal.show();
document.addEventListener("mouseup", handleMouseUp, true);
},
hide: () => {
tour.modal.hide();
document.removeEventListener("mouseup", handleMouseUp, true);
},
},
});
tour.addStep({
id: "press-run-again",
title: "Press Run Again",
text: "Now, press the Run button again to execute the flow with the new Calculator Block added!",
attachTo: {
element: '[data-tutorial-id="primary-action-run-agent"]',
on: "top",
},
advanceOn: {
selector: '[data-tutorial-id="primary-action-run-agent"]',
event: "click",
},
buttons: [],
beforeShowPromise: () =>
waitForElement('[data-tutorial-id="primary-action-run-agent"]'),
when: {
hide: () => {
setTimeout(() => {
fitViewToScreen();
}, 500);
},
},
});
tour.addStep({
id: "congratulations",
title: "Congratulations!",
text: "You have successfully created your first flow. Watch for the outputs in the blocks!",
beforeShowPromise: () => waitForElement('[data-id="latest-output"]'),
when: {
show: () => tour.modal.hide(),
},
buttons: [
{
text: "Finish",
action: tour.complete,
},
],
});
// Unpin blocks and save menu when the tour is completed or canceled
tour.on("complete", () => {
setPinBlocksPopover(false);
setPinSavePopover(false);
storage.set(Key.SHEPHERD_TOUR, "completed"); // Optionally mark the tutorial as completed
});
for (const step of tour.steps) {
step.on("show", () => {
"use client";
console.debug("sendTutorialStep");
analytics.sendGAEvent("event", "tutorial_step_shown", { value: step.id });
});
}
tour.on("cancel", () => {
setPinBlocksPopover(false);
setPinSavePopover(false);
storage.set(Key.SHEPHERD_TOUR, "canceled"); // Optionally mark the tutorial as canceled
});
tour.start();
};

View File

@@ -0,0 +1,142 @@
import { useCallback } from "react";
import { Node, Edge, useReactFlow } from "@xyflow/react";
import { Key, storage } from "@/services/storage/local-storage";
import { ConnectedEdge } from "./CustomNode/CustomNode";
interface CopyableData {
nodes: Node[];
edges: Edge[];
}
export function useCopyPaste(getNextNodeId: () => string) {
const { setNodes, addEdges, getNodes, getEdges, getViewport } =
useReactFlow();
const handleCopyPaste = useCallback(
(event: KeyboardEvent) => {
if (event.ctrlKey || event.metaKey) {
if (event.key === "c" || event.key === "C") {
const selectedNodes = getNodes().filter((node) => node.selected);
const selectedNodeIds = new Set(selectedNodes.map((node) => node.id));
// Only copy edges where both source and target nodes are selected
const selectedEdges = getEdges().filter(
(edge) =>
edge.selected &&
selectedNodeIds.has(edge.source) &&
selectedNodeIds.has(edge.target),
);
const copiedData: CopyableData = {
nodes: selectedNodes.map((node) => ({
...node,
data: {
...node.data,
connections: node.data.connections || [], // Preserve connections
},
})),
edges: selectedEdges,
};
storage.set(Key.COPIED_FLOW_DATA, JSON.stringify(copiedData));
}
if (event.key === "v" || event.key === "V") {
const copiedDataString = storage.get(Key.COPIED_FLOW_DATA);
if (copiedDataString) {
const copiedData = JSON.parse(copiedDataString) as CopyableData;
const oldToNewIdMap: Record<string, string> = {};
// Get fresh viewport values at paste time to ensure correct positioning
const { x, y, zoom } = getViewport();
const viewportCenter = {
x: (window.innerWidth / 2 - x) / zoom,
y: (window.innerHeight / 2 - y) / zoom,
};
let minX = Infinity,
minY = Infinity,
maxX = -Infinity,
maxY = -Infinity;
copiedData.nodes.forEach((node: Node) => {
minX = Math.min(minX, node.position.x);
minY = Math.min(minY, node.position.y);
maxX = Math.max(maxX, node.position.x);
maxY = Math.max(maxY, node.position.y);
});
const offsetX = viewportCenter.x - (minX + maxX) / 2;
const offsetY = viewportCenter.y - (minY + maxY) / 2;
const pastedNodes = copiedData.nodes.map((node: Node) => {
const newNodeId = getNextNodeId();
oldToNewIdMap[node.id] = newNodeId;
return {
...node,
id: newNodeId, // Generate unique ID for the pasted node
selected: true, // Select the pasted nodes so they're visible
position: {
x: node.position.x + offsetX,
y: node.position.y + offsetY,
},
data: {
...node.data,
backend_id: undefined, // Clear backend_id so the new node.id is used when saving
connections: node.data.connections || [], // Preserve connections
status: undefined,
executionResults: undefined,
},
};
});
const pastedEdges = copiedData.edges.map((edge) => {
const newSourceId = oldToNewIdMap[edge.source] ?? edge.source;
const newTargetId = oldToNewIdMap[edge.target] ?? edge.target;
return {
...edge,
id: `${newSourceId}_${edge.sourceHandle}_${newTargetId}_${edge.targetHandle}_${Date.now()}`,
source: newSourceId,
target: newTargetId,
};
});
setNodes((existingNodes) => [
...existingNodes.map((node) => ({ ...node, selected: false })),
...pastedNodes,
]);
addEdges(pastedEdges);
setNodes((nodes) => {
return nodes.map((node) => {
const nodeConnections = getEdges()
.filter(
(edge: Edge) =>
edge.source === node.id || edge.target === node.id,
)
.map(
(edge: Edge): ConnectedEdge => ({
id: edge.id,
source: edge.source,
target: edge.target,
sourceHandle: edge.sourceHandle!,
targetHandle: edge.targetHandle!,
}),
);
return {
...node,
data: {
...node.data,
connections: nodeConnections,
},
};
});
});
}
}
}
},
[setNodes, addEdges, getNodes, getEdges, getNextNodeId, getViewport],
);
return handleCopyPaste;
}

View File

@@ -1,13 +1,64 @@
"use client";
import { ReactFlowProvider } from "@xyflow/react";
import { Flow } from "./components/FlowEditor/Flow/Flow";
export default function BuilderPage() {
import FlowEditor from "@/app/(platform)/build/components/legacy-builder/Flow/Flow";
import { useOnboarding } from "@/providers/onboarding/onboarding-provider";
// import LoadingBox from "@/components/__legacy__/ui/loading";
import { GraphID } from "@/lib/autogpt-server-api/types";
import { ReactFlowProvider } from "@xyflow/react";
import { useSearchParams } from "next/navigation";
import { useEffect } from "react";
import { BuilderViewTabs } from "./components/BuilderViewTabs/BuilderViewTabs";
import { Flow } from "./components/FlowEditor/Flow/Flow";
import { useBuilderView } from "./useBuilderView";
function BuilderContent() {
const query = useSearchParams();
const { completeStep } = useOnboarding();
useEffect(() => {
completeStep("BUILDER_OPEN");
}, [completeStep]);
const _graphVersion = query.get("flowVersion");
const graphVersion = _graphVersion ? parseInt(_graphVersion) : undefined;
return (
<div className="relative h-full w-full">
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
</div>
<FlowEditor
className="flex h-full w-full"
flowID={(query.get("flowID") as GraphID | null) ?? undefined}
flowVersion={graphVersion}
/>
);
}
export default function BuilderPage() {
const {
isSwitchEnabled,
selectedView,
setSelectedView,
isNewFlowEditorEnabled,
} = useBuilderView();
// Switch is temporary, we will remove it once our new flow editor is ready
if (isSwitchEnabled) {
return (
<div className="relative h-full w-full">
<BuilderViewTabs value={selectedView} onChange={setSelectedView} />
{selectedView === "new" ? (
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
) : (
<BuilderContent />
)}
</div>
);
}
return isNewFlowEditorEnabled ? (
<ReactFlowProvider>
<Flow />
</ReactFlowProvider>
) : (
<BuilderContent />
);
}

View File

@@ -0,0 +1,44 @@
import { Flag, useGetFlag } from "@/services/feature-flags/use-get-flag";
import { usePathname, useRouter, useSearchParams } from "next/navigation";
import { useEffect, useMemo } from "react";
import { BuilderView } from "./components/BuilderViewTabs/BuilderViewTabs";
export function useBuilderView() {
const isNewFlowEditorEnabled = useGetFlag(Flag.NEW_FLOW_EDITOR);
const isBuilderViewSwitchEnabled = useGetFlag(Flag.BUILDER_VIEW_SWITCH);
const router = useRouter();
const pathname = usePathname();
const searchParams = useSearchParams();
const currentView = searchParams.get("view");
const defaultView = "old";
const selectedView = useMemo<BuilderView>(() => {
if (currentView === "new" || currentView === "old") return currentView;
return defaultView;
}, [currentView, defaultView]);
useEffect(() => {
if (isBuilderViewSwitchEnabled === true) {
if (currentView !== "new" && currentView !== "old") {
const params = new URLSearchParams(searchParams);
params.set("view", defaultView);
router.replace(`${pathname}?${params.toString()}`);
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isBuilderViewSwitchEnabled, defaultView, pathname, router, searchParams]);
const setSelectedView = (value: BuilderView) => {
const params = new URLSearchParams(searchParams);
params.set("view", value);
router.push(`${pathname}?${params.toString()}`);
};
return {
isSwitchEnabled: isBuilderViewSwitchEnabled === true,
selectedView,
setSelectedView,
isNewFlowEditorEnabled: Boolean(isNewFlowEditorEnabled),
} as const;
}

View File

@@ -1,80 +0,0 @@
"use client";
import { SidebarProvider } from "@/components/ui/sidebar";
import { ChatContainer } from "./components/ChatContainer/ChatContainer";
import { ChatSidebar } from "./components/ChatSidebar/ChatSidebar";
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
import { ScaleLoader } from "./components/ScaleLoader/ScaleLoader";
import { useCopilotPage } from "./useCopilotPage";
export function CopilotPage() {
const {
sessionId,
messages,
status,
error,
stop,
createSession,
onSend,
isLoadingSession,
isCreatingSession,
isUserLoading,
isLoggedIn,
// Mobile drawer
isMobile,
isDrawerOpen,
sessions,
isLoadingSessions,
handleOpenDrawer,
handleCloseDrawer,
handleDrawerOpenChange,
handleSelectSession,
handleNewChat,
} = useCopilotPage();
if (isUserLoading || !isLoggedIn) {
return (
<div className="fixed inset-0 z-50 flex items-center justify-center bg-[#f8f8f9]">
<ScaleLoader className="text-neutral-400" />
</div>
);
}
return (
<SidebarProvider
defaultOpen={true}
className="h-[calc(100vh-72px)] min-h-0"
>
{!isMobile && <ChatSidebar />}
<div className="relative flex h-full w-full flex-col overflow-hidden bg-[#f8f8f9] px-0">
{isMobile && <MobileHeader onOpenDrawer={handleOpenDrawer} />}
<div className="flex-1 overflow-hidden">
<ChatContainer
messages={messages}
status={status}
error={error}
sessionId={sessionId}
isLoadingSession={isLoadingSession}
isCreatingSession={isCreatingSession}
onCreateSession={createSession}
onSend={onSend}
onStop={stop}
/>
</div>
</div>
{isMobile && (
<MobileDrawer
isOpen={isDrawerOpen}
sessions={sessions}
currentSessionId={sessionId}
isLoading={isLoadingSessions}
onSelectSession={handleSelectSession}
onNewChat={handleNewChat}
onClose={handleCloseDrawer}
onOpenChange={handleDrawerOpenChange}
/>
)}
</SidebarProvider>
);
}

View File

@@ -1,74 +0,0 @@
"use client";
import { ChatInput } from "@/app/(platform)/copilot/components/ChatInput/ChatInput";
import { UIDataTypes, UIMessage, UITools } from "ai";
import { LayoutGroup, motion } from "framer-motion";
import { ChatMessagesContainer } from "../ChatMessagesContainer/ChatMessagesContainer";
import { CopilotChatActionsProvider } from "../CopilotChatActionsProvider/CopilotChatActionsProvider";
import { EmptySession } from "../EmptySession/EmptySession";
export interface ChatContainerProps {
messages: UIMessage<unknown, UIDataTypes, UITools>[];
status: string;
error: Error | undefined;
sessionId: string | null;
isLoadingSession: boolean;
isCreatingSession: boolean;
onCreateSession: () => void | Promise<string>;
onSend: (message: string) => void | Promise<void>;
onStop: () => void;
}
export const ChatContainer = ({
messages,
status,
error,
sessionId,
isLoadingSession,
isCreatingSession,
onCreateSession,
onSend,
onStop,
}: ChatContainerProps) => {
const inputLayoutId = "copilot-2-chat-input";
return (
<CopilotChatActionsProvider onSend={onSend}>
<LayoutGroup id="copilot-2-chat-layout">
<div className="flex h-full min-h-0 w-full flex-col bg-[#f8f8f9] px-2 lg:px-0">
{sessionId ? (
<div className="mx-auto flex h-full min-h-0 w-full max-w-3xl flex-col">
<ChatMessagesContainer
messages={messages}
status={status}
error={error}
isLoading={isLoadingSession}
/>
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ duration: 0.3 }}
className="relative px-3 pb-2 pt-2"
>
<div className="pointer-events-none absolute left-0 right-0 top-[-18px] z-10 h-6 bg-gradient-to-b from-transparent to-[#f8f8f9]" />
<ChatInput
inputId="chat-input-session"
onSend={onSend}
disabled={status === "streaming"}
isStreaming={status === "streaming"}
onStop={onStop}
placeholder="What else can I help with?"
/>
</motion.div>
</div>
) : (
<EmptySession
inputLayoutId={inputLayoutId}
isCreatingSession={isCreatingSession}
onCreateSession={onCreateSession}
onSend={onSend}
/>
)}
</div>
</LayoutGroup>
</CopilotChatActionsProvider>
);
};

View File

@@ -1,294 +0,0 @@
import { getGetWorkspaceDownloadFileByIdUrl } from "@/app/api/__generated__/endpoints/workspace/workspace";
import {
Conversation,
ConversationContent,
ConversationScrollButton,
} from "@/components/ai-elements/conversation";
import {
Message,
MessageContent,
MessageResponse,
} from "@/components/ai-elements/message";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { toast } from "@/components/molecules/Toast/use-toast";
import { ToolUIPart, UIDataTypes, UIMessage, UITools } from "ai";
import { useEffect, useRef, useState } from "react";
import { CreateAgentTool } from "../../tools/CreateAgent/CreateAgent";
import { EditAgentTool } from "../../tools/EditAgent/EditAgent";
import { FindAgentsTool } from "../../tools/FindAgents/FindAgents";
import { FindBlocksTool } from "../../tools/FindBlocks/FindBlocks";
import { RunAgentTool } from "../../tools/RunAgent/RunAgent";
import { RunBlockTool } from "../../tools/RunBlock/RunBlock";
import { SearchDocsTool } from "../../tools/SearchDocs/SearchDocs";
import { ViewAgentOutputTool } from "../../tools/ViewAgentOutput/ViewAgentOutput";
// ---------------------------------------------------------------------------
// Workspace media support
// ---------------------------------------------------------------------------
/**
* Resolve workspace:// URLs in markdown text to proxy download URLs.
* Detects MIME type from the hash fragment (e.g. workspace://id#video/mp4)
* and prefixes the alt text with "video:" so the custom img component can
* render a <video> element instead.
*/
function resolveWorkspaceUrls(text: string): string {
return text.replace(
/!\[([^\]]*)\]\(workspace:\/\/([^)#\s]+)(?:#([^)\s]*))?\)/g,
(_match, alt: string, fileId: string, mimeHint?: string) => {
const apiPath = getGetWorkspaceDownloadFileByIdUrl(fileId);
const url = `/api/proxy${apiPath}`;
if (mimeHint?.startsWith("video/")) {
return `![video:${alt || "Video"}](${url})`;
}
return `![${alt || "Image"}](${url})`;
},
);
}
/**
* Custom img component for Streamdown that renders <video> elements
* for workspace video files (detected via "video:" alt-text prefix).
* Falls back to <video> when an <img> fails to load for workspace files.
*/
function WorkspaceMediaImage(props: React.JSX.IntrinsicElements["img"]) {
const { src, alt, ...rest } = props;
const [imgFailed, setImgFailed] = useState(false);
const isWorkspace = src?.includes("/workspace/files/") ?? false;
if (!src) return null;
if (alt?.startsWith("video:") || (imgFailed && isWorkspace)) {
return (
<span className="my-2 inline-block">
<video
controls
className="h-auto max-w-full rounded-md border border-zinc-200"
preload="metadata"
>
<source src={src} />
Your browser does not support the video tag.
</video>
</span>
);
}
return (
// eslint-disable-next-line @next/next/no-img-element
<img
src={src}
alt={alt || "Image"}
className="h-auto max-w-full rounded-md border border-zinc-200"
loading="lazy"
onError={() => {
if (isWorkspace) setImgFailed(true);
}}
{...rest}
/>
);
}
/** Stable components override for Streamdown (avoids re-creating on every render). */
const STREAMDOWN_COMPONENTS = { img: WorkspaceMediaImage };
const THINKING_PHRASES = [
"Thinking...",
"Considering this...",
"Working through this...",
"Analyzing your request...",
"Reasoning...",
"Looking into it...",
"Processing your request...",
"Mulling this over...",
"Piecing it together...",
"On it...",
];
function getRandomPhrase() {
return THINKING_PHRASES[Math.floor(Math.random() * THINKING_PHRASES.length)];
}
interface ChatMessagesContainerProps {
messages: UIMessage<unknown, UIDataTypes, UITools>[];
status: string;
error: Error | undefined;
isLoading: boolean;
}
export const ChatMessagesContainer = ({
messages,
status,
error,
isLoading,
}: ChatMessagesContainerProps) => {
const [thinkingPhrase, setThinkingPhrase] = useState(getRandomPhrase);
const lastToastTimeRef = useRef(0);
useEffect(() => {
if (status === "submitted") {
setThinkingPhrase(getRandomPhrase());
}
}, [status]);
// Show a toast when a new error occurs, debounced to avoid spam
useEffect(() => {
if (!error) return;
const now = Date.now();
if (now - lastToastTimeRef.current < 3_000) return;
lastToastTimeRef.current = now;
toast({
variant: "destructive",
title: "Something went wrong",
description:
"The assistant encountered an error. Please try sending your message again.",
});
}, [error]);
const lastMessage = messages[messages.length - 1];
const lastAssistantHasVisibleContent =
lastMessage?.role === "assistant" &&
lastMessage.parts.some(
(p) =>
(p.type === "text" && p.text.trim().length > 0) ||
p.type.startsWith("tool-"),
);
const showThinking =
status === "submitted" ||
(status === "streaming" && !lastAssistantHasVisibleContent);
return (
<Conversation className="min-h-0 flex-1">
<ConversationContent className="flex min-h-screen flex-1 flex-col gap-6 px-3 py-6">
{isLoading && messages.length === 0 && (
<div className="flex min-h-full flex-1 items-center justify-center">
<LoadingSpinner className="text-neutral-600" />
</div>
)}
{messages.map((message, messageIndex) => {
const isLastAssistant =
messageIndex === messages.length - 1 &&
message.role === "assistant";
const messageHasVisibleContent = message.parts.some(
(p) =>
(p.type === "text" && p.text.trim().length > 0) ||
p.type.startsWith("tool-"),
);
return (
<Message from={message.role} key={message.id}>
<MessageContent
className={
"text-[1rem] leading-relaxed " +
"group-[.is-user]:rounded-xl group-[.is-user]:bg-purple-100 group-[.is-user]:px-3 group-[.is-user]:py-2.5 group-[.is-user]:text-slate-900 group-[.is-user]:[border-bottom-right-radius:0] " +
"group-[.is-assistant]:bg-transparent group-[.is-assistant]:text-slate-900"
}
>
{message.parts.map((part, i) => {
switch (part.type) {
case "text":
return (
<MessageResponse
key={`${message.id}-${i}`}
components={STREAMDOWN_COMPONENTS}
>
{resolveWorkspaceUrls(part.text)}
</MessageResponse>
);
case "tool-find_block":
return (
<FindBlocksTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-find_agent":
case "tool-find_library_agent":
return (
<FindAgentsTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-search_docs":
case "tool-get_doc_page":
return (
<SearchDocsTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-run_block":
return (
<RunBlockTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-run_agent":
case "tool-schedule_agent":
return (
<RunAgentTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-create_agent":
return (
<CreateAgentTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-edit_agent":
return (
<EditAgentTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
case "tool-view_agent_output":
return (
<ViewAgentOutputTool
key={`${message.id}-${i}`}
part={part as ToolUIPart}
/>
);
default:
return null;
}
})}
{isLastAssistant &&
!messageHasVisibleContent &&
showThinking && (
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
{thinkingPhrase}
</span>
)}
</MessageContent>
</Message>
);
})}
{showThinking && lastMessage?.role !== "assistant" && (
<Message from="assistant">
<MessageContent className="text-[1rem] leading-relaxed">
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
{thinkingPhrase}
</span>
</MessageContent>
</Message>
)}
{error && (
<div className="rounded-lg bg-red-50 p-4 text-sm text-red-700">
<p className="font-medium">Something went wrong</p>
<p className="mt-1 text-red-600">
The assistant encountered an error. Please try sending your
message again.
</p>
</div>
)}
</ConversationContent>
<ConversationScrollButton />
</Conversation>
);
};

View File

@@ -1,188 +0,0 @@
"use client";
import { useGetV2ListSessions } from "@/app/api/__generated__/endpoints/chat/chat";
import { Button } from "@/components/atoms/Button/Button";
import { LoadingSpinner } from "@/components/atoms/LoadingSpinner/LoadingSpinner";
import { Text } from "@/components/atoms/Text/Text";
import {
Sidebar,
SidebarContent,
SidebarFooter,
SidebarHeader,
SidebarTrigger,
useSidebar,
} from "@/components/ui/sidebar";
import { cn } from "@/lib/utils";
import { PlusCircleIcon, PlusIcon } from "@phosphor-icons/react";
import { motion } from "framer-motion";
import { parseAsString, useQueryState } from "nuqs";
export function ChatSidebar() {
const { state } = useSidebar();
const isCollapsed = state === "collapsed";
const [sessionId, setSessionId] = useQueryState("sessionId", parseAsString);
const { data: sessionsResponse, isLoading: isLoadingSessions } =
useGetV2ListSessions({ limit: 50 });
const sessions =
sessionsResponse?.status === 200 ? sessionsResponse.data.sessions : [];
function handleNewChat() {
setSessionId(null);
}
function handleSelectSession(id: string) {
setSessionId(id);
}
function formatDate(dateString: string) {
const date = new Date(dateString);
const now = new Date();
const diffMs = now.getTime() - date.getTime();
const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24));
if (diffDays === 0) return "Today";
if (diffDays === 1) return "Yesterday";
if (diffDays < 7) return `${diffDays} days ago`;
const day = date.getDate();
const ordinal =
day % 10 === 1 && day !== 11
? "st"
: day % 10 === 2 && day !== 12
? "nd"
: day % 10 === 3 && day !== 13
? "rd"
: "th";
const month = date.toLocaleDateString("en-US", { month: "short" });
const year = date.getFullYear();
return `${day}${ordinal} ${month} ${year}`;
}
return (
<Sidebar
variant="inset"
collapsible="icon"
className="!top-[50px] !h-[calc(100vh-50px)] border-r border-zinc-100 px-0"
>
{isCollapsed && (
<SidebarHeader
className={cn(
"flex",
isCollapsed
? "flex-row items-center justify-between gap-y-4 md:flex-col md:items-start md:justify-start"
: "flex-row items-center justify-between",
)}
>
<motion.div
key={isCollapsed ? "header-collapsed" : "header-expanded"}
className="flex flex-col items-center gap-3 pt-4"
initial={{ opacity: 0, filter: "blur(3px)" }}
animate={{ opacity: 1, filter: "blur(0px)" }}
transition={{ type: "spring", bounce: 0.2 }}
>
<div className="flex flex-col items-center gap-2">
<SidebarTrigger />
<Button
variant="ghost"
onClick={handleNewChat}
style={{ minWidth: "auto", width: "auto" }}
>
<PlusCircleIcon className="!size-5" />
<span className="sr-only">New Chat</span>
</Button>
</div>
</motion.div>
</SidebarHeader>
)}
<SidebarContent className="gap-4 overflow-y-auto px-4 py-4 [-ms-overflow-style:none] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden">
{!isCollapsed && (
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ duration: 0.2, delay: 0.1 }}
className="flex items-center justify-between px-3"
>
<Text variant="h3" size="body-medium">
Your chats
</Text>
<div className="relative left-6">
<SidebarTrigger />
</div>
</motion.div>
)}
{!isCollapsed && (
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ duration: 0.2, delay: 0.15 }}
className="mt-4 flex flex-col gap-1"
>
{isLoadingSessions ? (
<div className="flex min-h-[30rem] items-center justify-center py-4">
<LoadingSpinner size="small" className="text-neutral-600" />
</div>
) : sessions.length === 0 ? (
<p className="py-4 text-center text-sm text-neutral-500">
No conversations yet
</p>
) : (
sessions.map((session) => (
<button
key={session.id}
onClick={() => handleSelectSession(session.id)}
className={cn(
"w-full rounded-lg px-3 py-2.5 text-left transition-colors",
session.id === sessionId
? "bg-zinc-100"
: "hover:bg-zinc-50",
)}
>
<div className="flex min-w-0 max-w-full flex-col overflow-hidden">
<div className="min-w-0 max-w-full">
<Text
variant="body"
className={cn(
"truncate font-normal",
session.id === sessionId
? "text-zinc-600"
: "text-zinc-800",
)}
>
{session.title || `Untitled chat`}
</Text>
</div>
<Text variant="small" className="text-neutral-400">
{formatDate(session.updated_at)}
</Text>
</div>
</button>
))
)}
</motion.div>
)}
</SidebarContent>
{!isCollapsed && sessionId && (
<SidebarFooter className="shrink-0 bg-zinc-50 p-3 pb-1 shadow-[0_-4px_6px_-1px_rgba(0,0,0,0.05)]">
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ duration: 0.2, delay: 0.2 }}
>
<Button
variant="primary"
size="small"
onClick={handleNewChat}
className="w-full"
leftIcon={<PlusIcon className="h-4 w-4" weight="bold" />}
>
New Chat
</Button>
</motion.div>
</SidebarFooter>
)}
</Sidebar>
);
}

View File

@@ -1,16 +0,0 @@
"use client";
import { CopilotChatActionsContext } from "./useCopilotChatActions";
interface Props {
onSend: (message: string) => void | Promise<void>;
children: React.ReactNode;
}
export function CopilotChatActionsProvider({ onSend, children }: Props) {
return (
<CopilotChatActionsContext.Provider value={{ onSend }}>
{children}
</CopilotChatActionsContext.Provider>
);
}

View File

@@ -1,23 +0,0 @@
"use client";
import { createContext, useContext } from "react";
interface CopilotChatActions {
onSend: (message: string) => void | Promise<void>;
}
const CopilotChatActionsContext = createContext<CopilotChatActions | null>(
null,
);
export function useCopilotChatActions(): CopilotChatActions {
const ctx = useContext(CopilotChatActionsContext);
if (!ctx) {
throw new Error(
"useCopilotChatActions must be used within CopilotChatActionsProvider",
);
}
return ctx;
}
export { CopilotChatActionsContext };

View File

@@ -0,0 +1,99 @@
"use client";
import { ChatLoader } from "@/components/contextual/Chat/components/ChatLoader/ChatLoader";
import { Text } from "@/components/atoms/Text/Text";
import { NAVBAR_HEIGHT_PX } from "@/lib/constants";
import type { ReactNode } from "react";
import { DesktopSidebar } from "./components/DesktopSidebar/DesktopSidebar";
import { MobileDrawer } from "./components/MobileDrawer/MobileDrawer";
import { MobileHeader } from "./components/MobileHeader/MobileHeader";
import { useCopilotShell } from "./useCopilotShell";
interface Props {
children: ReactNode;
}
export function CopilotShell({ children }: Props) {
const {
isMobile,
isDrawerOpen,
isLoading,
isCreatingSession,
isLoggedIn,
hasActiveSession,
sessions,
currentSessionId,
handleOpenDrawer,
handleCloseDrawer,
handleDrawerOpenChange,
handleNewChatClick,
handleSessionClick,
hasNextPage,
isFetchingNextPage,
fetchNextPage,
} = useCopilotShell();
if (!isLoggedIn) {
return (
<div className="flex h-full items-center justify-center">
<ChatLoader />
</div>
);
}
return (
<div
className="flex overflow-hidden bg-[#EFEFF0]"
style={{ height: `calc(100vh - ${NAVBAR_HEIGHT_PX}px)` }}
>
{!isMobile && (
<DesktopSidebar
sessions={sessions}
currentSessionId={currentSessionId}
isLoading={isLoading}
hasNextPage={hasNextPage}
isFetchingNextPage={isFetchingNextPage}
onSelectSession={handleSessionClick}
onFetchNextPage={fetchNextPage}
onNewChat={handleNewChatClick}
hasActiveSession={Boolean(hasActiveSession)}
/>
)}
<div className="relative flex min-h-0 flex-1 flex-col">
{isMobile && <MobileHeader onOpenDrawer={handleOpenDrawer} />}
<div className="flex min-h-0 flex-1 flex-col">
{isCreatingSession ? (
<div className="flex h-full flex-1 flex-col items-center justify-center bg-[#f8f8f9]">
<div className="flex flex-col items-center gap-4">
<ChatLoader />
<Text variant="body" className="text-zinc-500">
Creating your chat...
</Text>
</div>
</div>
) : (
children
)}
</div>
</div>
{isMobile && (
<MobileDrawer
isOpen={isDrawerOpen}
sessions={sessions}
currentSessionId={currentSessionId}
isLoading={isLoading}
hasNextPage={hasNextPage}
isFetchingNextPage={isFetchingNextPage}
onSelectSession={handleSessionClick}
onFetchNextPage={fetchNextPage}
onNewChat={handleNewChatClick}
onClose={handleCloseDrawer}
onOpenChange={handleDrawerOpenChange}
hasActiveSession={Boolean(hasActiveSession)}
/>
)}
</div>
);
}

View File

@@ -0,0 +1,70 @@
import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse";
import { Button } from "@/components/atoms/Button/Button";
import { Text } from "@/components/atoms/Text/Text";
import { scrollbarStyles } from "@/components/styles/scrollbars";
import { cn } from "@/lib/utils";
import { Plus } from "@phosphor-icons/react";
import { SessionsList } from "../SessionsList/SessionsList";
interface Props {
sessions: SessionSummaryResponse[];
currentSessionId: string | null;
isLoading: boolean;
hasNextPage: boolean;
isFetchingNextPage: boolean;
onSelectSession: (sessionId: string) => void;
onFetchNextPage: () => void;
onNewChat: () => void;
hasActiveSession: boolean;
}
export function DesktopSidebar({
sessions,
currentSessionId,
isLoading,
hasNextPage,
isFetchingNextPage,
onSelectSession,
onFetchNextPage,
onNewChat,
hasActiveSession,
}: Props) {
return (
<aside className="flex h-full w-80 flex-col border-r border-zinc-100 bg-zinc-50">
<div className="shrink-0 px-6 py-4">
<Text variant="h3" size="body-medium">
Your chats
</Text>
</div>
<div
className={cn(
"flex min-h-0 flex-1 flex-col overflow-y-auto px-3 py-3",
scrollbarStyles,
)}
>
<SessionsList
sessions={sessions}
currentSessionId={currentSessionId}
isLoading={isLoading}
hasNextPage={hasNextPage}
isFetchingNextPage={isFetchingNextPage}
onSelectSession={onSelectSession}
onFetchNextPage={onFetchNextPage}
/>
</div>
{hasActiveSession && (
<div className="shrink-0 bg-zinc-50 p-3 shadow-[0_-4px_6px_-1px_rgba(0,0,0,0.05)]">
<Button
variant="primary"
size="small"
onClick={onNewChat}
className="w-full"
leftIcon={<Plus width="1rem" height="1rem" />}
>
New Chat
</Button>
</div>
)}
</aside>
);
}

View File

@@ -0,0 +1,91 @@
import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse";
import { Button } from "@/components/atoms/Button/Button";
import { scrollbarStyles } from "@/components/styles/scrollbars";
import { cn } from "@/lib/utils";
import { PlusIcon, X } from "@phosphor-icons/react";
import { Drawer } from "vaul";
import { SessionsList } from "../SessionsList/SessionsList";
interface Props {
isOpen: boolean;
sessions: SessionSummaryResponse[];
currentSessionId: string | null;
isLoading: boolean;
hasNextPage: boolean;
isFetchingNextPage: boolean;
onSelectSession: (sessionId: string) => void;
onFetchNextPage: () => void;
onNewChat: () => void;
onClose: () => void;
onOpenChange: (open: boolean) => void;
hasActiveSession: boolean;
}
export function MobileDrawer({
isOpen,
sessions,
currentSessionId,
isLoading,
hasNextPage,
isFetchingNextPage,
onSelectSession,
onFetchNextPage,
onNewChat,
onClose,
onOpenChange,
hasActiveSession,
}: Props) {
return (
<Drawer.Root open={isOpen} onOpenChange={onOpenChange} direction="left">
<Drawer.Portal>
<Drawer.Overlay className="fixed inset-0 z-[60] bg-black/10 backdrop-blur-sm" />
<Drawer.Content className="fixed left-0 top-0 z-[70] flex h-full w-80 flex-col border-r border-zinc-200 bg-zinc-50">
<div className="shrink-0 border-b border-zinc-200 p-4">
<div className="flex items-center justify-between">
<Drawer.Title className="text-lg font-semibold text-zinc-800">
Your chats
</Drawer.Title>
<Button
variant="icon"
size="icon"
aria-label="Close sessions"
onClick={onClose}
>
<X width="1.25rem" height="1.25rem" />
</Button>
</div>
</div>
<div
className={cn(
"flex min-h-0 flex-1 flex-col overflow-y-auto px-3 py-3",
scrollbarStyles,
)}
>
<SessionsList
sessions={sessions}
currentSessionId={currentSessionId}
isLoading={isLoading}
hasNextPage={hasNextPage}
isFetchingNextPage={isFetchingNextPage}
onSelectSession={onSelectSession}
onFetchNextPage={onFetchNextPage}
/>
</div>
{hasActiveSession && (
<div className="shrink-0 bg-white p-3 shadow-[0_-4px_6px_-1px_rgba(0,0,0,0.05)]">
<Button
variant="primary"
size="small"
onClick={onNewChat}
className="w-full"
leftIcon={<PlusIcon width="1rem" height="1rem" />}
>
New Chat
</Button>
</div>
)}
</Drawer.Content>
</Drawer.Portal>
</Drawer.Root>
);
}

View File

@@ -0,0 +1,24 @@
import { useState } from "react";
export function useMobileDrawer() {
const [isDrawerOpen, setIsDrawerOpen] = useState(false);
const handleOpenDrawer = () => {
setIsDrawerOpen(true);
};
const handleCloseDrawer = () => {
setIsDrawerOpen(false);
};
const handleDrawerOpenChange = (open: boolean) => {
setIsDrawerOpen(open);
};
return {
isDrawerOpen,
handleOpenDrawer,
handleCloseDrawer,
handleDrawerOpenChange,
};
}

View File

@@ -0,0 +1,80 @@
import type { SessionSummaryResponse } from "@/app/api/__generated__/models/sessionSummaryResponse";
import { Skeleton } from "@/components/__legacy__/ui/skeleton";
import { Text } from "@/components/atoms/Text/Text";
import { InfiniteList } from "@/components/molecules/InfiniteList/InfiniteList";
import { cn } from "@/lib/utils";
import { getSessionTitle } from "../../helpers";
interface Props {
sessions: SessionSummaryResponse[];
currentSessionId: string | null;
isLoading: boolean;
hasNextPage: boolean;
isFetchingNextPage: boolean;
onSelectSession: (sessionId: string) => void;
onFetchNextPage: () => void;
}
export function SessionsList({
sessions,
currentSessionId,
isLoading,
hasNextPage,
isFetchingNextPage,
onSelectSession,
onFetchNextPage,
}: Props) {
if (isLoading) {
return (
<div className="space-y-1">
{Array.from({ length: 5 }).map((_, i) => (
<div key={i} className="rounded-lg px-3 py-2.5">
<Skeleton className="h-5 w-full" />
</div>
))}
</div>
);
}
if (sessions.length === 0) {
return (
<div className="flex h-full items-center justify-center">
<Text variant="body" className="text-zinc-500">
You don&apos;t have previous chats
</Text>
</div>
);
}
return (
<InfiniteList
items={sessions}
hasMore={hasNextPage}
isFetchingMore={isFetchingNextPage}
onEndReached={onFetchNextPage}
className="space-y-1"
renderItem={(session) => {
const isActive = session.id === currentSessionId;
return (
<button
onClick={() => onSelectSession(session.id)}
className={cn(
"w-full rounded-lg px-3 py-2.5 text-left transition-colors",
isActive ? "bg-zinc-100" : "hover:bg-zinc-50",
)}
>
<Text
variant="body"
className={cn(
"font-normal",
isActive ? "text-zinc-600" : "text-zinc-800",
)}
>
{getSessionTitle(session)}
</Text>
</button>
);
}}
/>
);
}

Some files were not shown because too many files have changed in this diff Show More